]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
7 | #include <linux/stddef.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/sched/signal.h> | |
10 | #include <linux/swap.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/pagevec.h> | |
16 | #include <linux/writeback.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memremap.h> | |
22 | #include <linux/memory_hotplug.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/ioport.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/migrate.h> | |
28 | #include <linux/page-isolation.h> | |
29 | #include <linux/pfn.h> | |
30 | #include <linux/suspend.h> | |
31 | #include <linux/mm_inline.h> | |
32 | #include <linux/firmware-map.h> | |
33 | #include <linux/stop_machine.h> | |
34 | #include <linux/hugetlb.h> | |
35 | #include <linux/memblock.h> | |
36 | #include <linux/compaction.h> | |
37 | #include <linux/rmap.h> | |
38 | ||
39 | #include <asm/tlbflush.h> | |
40 | ||
41 | #include "internal.h" | |
42 | ||
43 | /* | |
44 | * online_page_callback contains pointer to current page onlining function. | |
45 | * Initially it is generic_online_page(). If it is required it could be | |
46 | * changed by calling set_online_page_callback() for callback registration | |
47 | * and restore_online_page_callback() for generic callback restore. | |
48 | */ | |
49 | ||
50 | static void generic_online_page(struct page *page, unsigned int order); | |
51 | ||
52 | static online_page_callback_t online_page_callback = generic_online_page; | |
53 | static DEFINE_MUTEX(online_page_callback_lock); | |
54 | ||
55 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); | |
56 | ||
57 | void get_online_mems(void) | |
58 | { | |
59 | percpu_down_read(&mem_hotplug_lock); | |
60 | } | |
61 | ||
62 | void put_online_mems(void) | |
63 | { | |
64 | percpu_up_read(&mem_hotplug_lock); | |
65 | } | |
66 | ||
67 | bool movable_node_enabled = false; | |
68 | ||
69 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE | |
70 | bool memhp_auto_online; | |
71 | #else | |
72 | bool memhp_auto_online = true; | |
73 | #endif | |
74 | EXPORT_SYMBOL_GPL(memhp_auto_online); | |
75 | ||
76 | static int __init setup_memhp_default_state(char *str) | |
77 | { | |
78 | if (!strcmp(str, "online")) | |
79 | memhp_auto_online = true; | |
80 | else if (!strcmp(str, "offline")) | |
81 | memhp_auto_online = false; | |
82 | ||
83 | return 1; | |
84 | } | |
85 | __setup("memhp_default_state=", setup_memhp_default_state); | |
86 | ||
87 | void mem_hotplug_begin(void) | |
88 | { | |
89 | cpus_read_lock(); | |
90 | percpu_down_write(&mem_hotplug_lock); | |
91 | } | |
92 | ||
93 | void mem_hotplug_done(void) | |
94 | { | |
95 | percpu_up_write(&mem_hotplug_lock); | |
96 | cpus_read_unlock(); | |
97 | } | |
98 | ||
99 | u64 max_mem_size = U64_MAX; | |
100 | ||
101 | /* add this memory to iomem resource */ | |
102 | static struct resource *register_memory_resource(u64 start, u64 size) | |
103 | { | |
104 | struct resource *res; | |
105 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
106 | char *resource_name = "System RAM"; | |
107 | ||
108 | if (start + size > max_mem_size) | |
109 | return ERR_PTR(-E2BIG); | |
110 | ||
111 | /* | |
112 | * Request ownership of the new memory range. This might be | |
113 | * a child of an existing resource that was present but | |
114 | * not marked as busy. | |
115 | */ | |
116 | res = __request_region(&iomem_resource, start, size, | |
117 | resource_name, flags); | |
118 | ||
119 | if (!res) { | |
120 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
121 | start, start + size); | |
122 | return ERR_PTR(-EEXIST); | |
123 | } | |
124 | return res; | |
125 | } | |
126 | ||
127 | static void release_memory_resource(struct resource *res) | |
128 | { | |
129 | if (!res) | |
130 | return; | |
131 | release_resource(res); | |
132 | kfree(res); | |
133 | return; | |
134 | } | |
135 | ||
136 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | |
137 | void get_page_bootmem(unsigned long info, struct page *page, | |
138 | unsigned long type) | |
139 | { | |
140 | page->freelist = (void *)type; | |
141 | SetPagePrivate(page); | |
142 | set_page_private(page, info); | |
143 | page_ref_inc(page); | |
144 | } | |
145 | ||
146 | void put_page_bootmem(struct page *page) | |
147 | { | |
148 | unsigned long type; | |
149 | ||
150 | type = (unsigned long) page->freelist; | |
151 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | |
152 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); | |
153 | ||
154 | if (page_ref_dec_return(page) == 1) { | |
155 | page->freelist = NULL; | |
156 | ClearPagePrivate(page); | |
157 | set_page_private(page, 0); | |
158 | INIT_LIST_HEAD(&page->lru); | |
159 | free_reserved_page(page); | |
160 | } | |
161 | } | |
162 | ||
163 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | |
164 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
165 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
166 | { | |
167 | unsigned long *usemap, mapsize, section_nr, i; | |
168 | struct mem_section *ms; | |
169 | struct page *page, *memmap; | |
170 | ||
171 | section_nr = pfn_to_section_nr(start_pfn); | |
172 | ms = __nr_to_section(section_nr); | |
173 | ||
174 | /* Get section's memmap address */ | |
175 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
176 | ||
177 | /* | |
178 | * Get page for the memmap's phys address | |
179 | * XXX: need more consideration for sparse_vmemmap... | |
180 | */ | |
181 | page = virt_to_page(memmap); | |
182 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
183 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
184 | ||
185 | /* remember memmap's page */ | |
186 | for (i = 0; i < mapsize; i++, page++) | |
187 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
188 | ||
189 | usemap = ms->pageblock_flags; | |
190 | page = virt_to_page(usemap); | |
191 | ||
192 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
193 | ||
194 | for (i = 0; i < mapsize; i++, page++) | |
195 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
196 | ||
197 | } | |
198 | #else /* CONFIG_SPARSEMEM_VMEMMAP */ | |
199 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
200 | { | |
201 | unsigned long *usemap, mapsize, section_nr, i; | |
202 | struct mem_section *ms; | |
203 | struct page *page, *memmap; | |
204 | ||
205 | section_nr = pfn_to_section_nr(start_pfn); | |
206 | ms = __nr_to_section(section_nr); | |
207 | ||
208 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
209 | ||
210 | register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); | |
211 | ||
212 | usemap = ms->pageblock_flags; | |
213 | page = virt_to_page(usemap); | |
214 | ||
215 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
216 | ||
217 | for (i = 0; i < mapsize; i++, page++) | |
218 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
219 | } | |
220 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
221 | ||
222 | void __init register_page_bootmem_info_node(struct pglist_data *pgdat) | |
223 | { | |
224 | unsigned long i, pfn, end_pfn, nr_pages; | |
225 | int node = pgdat->node_id; | |
226 | struct page *page; | |
227 | ||
228 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
229 | page = virt_to_page(pgdat); | |
230 | ||
231 | for (i = 0; i < nr_pages; i++, page++) | |
232 | get_page_bootmem(node, page, NODE_INFO); | |
233 | ||
234 | pfn = pgdat->node_start_pfn; | |
235 | end_pfn = pgdat_end_pfn(pgdat); | |
236 | ||
237 | /* register section info */ | |
238 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
239 | /* | |
240 | * Some platforms can assign the same pfn to multiple nodes - on | |
241 | * node0 as well as nodeN. To avoid registering a pfn against | |
242 | * multiple nodes we check that this pfn does not already | |
243 | * reside in some other nodes. | |
244 | */ | |
245 | if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) | |
246 | register_page_bootmem_info_section(pfn); | |
247 | } | |
248 | } | |
249 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ | |
250 | ||
251 | static int __meminit __add_section(int nid, unsigned long phys_start_pfn, | |
252 | struct vmem_altmap *altmap, bool want_memblock) | |
253 | { | |
254 | int ret; | |
255 | ||
256 | if (pfn_valid(phys_start_pfn)) | |
257 | return -EEXIST; | |
258 | ||
259 | ret = sparse_add_one_section(nid, phys_start_pfn, altmap); | |
260 | if (ret < 0) | |
261 | return ret; | |
262 | ||
263 | if (!want_memblock) | |
264 | return 0; | |
265 | ||
266 | return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn)); | |
267 | } | |
268 | ||
269 | /* | |
270 | * Reasonably generic function for adding memory. It is | |
271 | * expected that archs that support memory hotplug will | |
272 | * call this function after deciding the zone to which to | |
273 | * add the new pages. | |
274 | */ | |
275 | int __ref __add_pages(int nid, unsigned long phys_start_pfn, | |
276 | unsigned long nr_pages, struct mhp_restrictions *restrictions) | |
277 | { | |
278 | unsigned long i; | |
279 | int err = 0; | |
280 | int start_sec, end_sec; | |
281 | struct vmem_altmap *altmap = restrictions->altmap; | |
282 | ||
283 | /* during initialize mem_map, align hot-added range to section */ | |
284 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
285 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
286 | ||
287 | if (altmap) { | |
288 | /* | |
289 | * Validate altmap is within bounds of the total request | |
290 | */ | |
291 | if (altmap->base_pfn != phys_start_pfn | |
292 | || vmem_altmap_offset(altmap) > nr_pages) { | |
293 | pr_warn_once("memory add fail, invalid altmap\n"); | |
294 | err = -EINVAL; | |
295 | goto out; | |
296 | } | |
297 | altmap->alloc = 0; | |
298 | } | |
299 | ||
300 | for (i = start_sec; i <= end_sec; i++) { | |
301 | err = __add_section(nid, section_nr_to_pfn(i), altmap, | |
302 | restrictions->flags & MHP_MEMBLOCK_API); | |
303 | ||
304 | /* | |
305 | * EEXIST is finally dealt with by ioresource collision | |
306 | * check. see add_memory() => register_memory_resource() | |
307 | * Warning will be printed if there is collision. | |
308 | */ | |
309 | if (err && (err != -EEXIST)) | |
310 | break; | |
311 | err = 0; | |
312 | cond_resched(); | |
313 | } | |
314 | vmemmap_populate_print_last(); | |
315 | out: | |
316 | return err; | |
317 | } | |
318 | ||
319 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
320 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | |
321 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, | |
322 | unsigned long start_pfn, | |
323 | unsigned long end_pfn) | |
324 | { | |
325 | struct mem_section *ms; | |
326 | ||
327 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { | |
328 | ms = __pfn_to_section(start_pfn); | |
329 | ||
330 | if (unlikely(!valid_section(ms))) | |
331 | continue; | |
332 | ||
333 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
334 | continue; | |
335 | ||
336 | if (zone && zone != page_zone(pfn_to_page(start_pfn))) | |
337 | continue; | |
338 | ||
339 | return start_pfn; | |
340 | } | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
346 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, | |
347 | unsigned long start_pfn, | |
348 | unsigned long end_pfn) | |
349 | { | |
350 | struct mem_section *ms; | |
351 | unsigned long pfn; | |
352 | ||
353 | /* pfn is the end pfn of a memory section. */ | |
354 | pfn = end_pfn - 1; | |
355 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { | |
356 | ms = __pfn_to_section(pfn); | |
357 | ||
358 | if (unlikely(!valid_section(ms))) | |
359 | continue; | |
360 | ||
361 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
362 | continue; | |
363 | ||
364 | if (zone && zone != page_zone(pfn_to_page(pfn))) | |
365 | continue; | |
366 | ||
367 | return pfn; | |
368 | } | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
374 | unsigned long end_pfn) | |
375 | { | |
376 | unsigned long zone_start_pfn = zone->zone_start_pfn; | |
377 | unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ | |
378 | unsigned long zone_end_pfn = z; | |
379 | unsigned long pfn; | |
380 | struct mem_section *ms; | |
381 | int nid = zone_to_nid(zone); | |
382 | ||
383 | zone_span_writelock(zone); | |
384 | if (zone_start_pfn == start_pfn) { | |
385 | /* | |
386 | * If the section is smallest section in the zone, it need | |
387 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
388 | * In this case, we find second smallest valid mem_section | |
389 | * for shrinking zone. | |
390 | */ | |
391 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
392 | zone_end_pfn); | |
393 | if (pfn) { | |
394 | zone->zone_start_pfn = pfn; | |
395 | zone->spanned_pages = zone_end_pfn - pfn; | |
396 | } | |
397 | } else if (zone_end_pfn == end_pfn) { | |
398 | /* | |
399 | * If the section is biggest section in the zone, it need | |
400 | * shrink zone->spanned_pages. | |
401 | * In this case, we find second biggest valid mem_section for | |
402 | * shrinking zone. | |
403 | */ | |
404 | pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, | |
405 | start_pfn); | |
406 | if (pfn) | |
407 | zone->spanned_pages = pfn - zone_start_pfn + 1; | |
408 | } | |
409 | ||
410 | /* | |
411 | * The section is not biggest or smallest mem_section in the zone, it | |
412 | * only creates a hole in the zone. So in this case, we need not | |
413 | * change the zone. But perhaps, the zone has only hole data. Thus | |
414 | * it check the zone has only hole or not. | |
415 | */ | |
416 | pfn = zone_start_pfn; | |
417 | for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { | |
418 | ms = __pfn_to_section(pfn); | |
419 | ||
420 | if (unlikely(!valid_section(ms))) | |
421 | continue; | |
422 | ||
423 | if (page_zone(pfn_to_page(pfn)) != zone) | |
424 | continue; | |
425 | ||
426 | /* If the section is current section, it continues the loop */ | |
427 | if (start_pfn == pfn) | |
428 | continue; | |
429 | ||
430 | /* If we find valid section, we have nothing to do */ | |
431 | zone_span_writeunlock(zone); | |
432 | return; | |
433 | } | |
434 | ||
435 | /* The zone has no valid section */ | |
436 | zone->zone_start_pfn = 0; | |
437 | zone->spanned_pages = 0; | |
438 | zone_span_writeunlock(zone); | |
439 | } | |
440 | ||
441 | static void shrink_pgdat_span(struct pglist_data *pgdat, | |
442 | unsigned long start_pfn, unsigned long end_pfn) | |
443 | { | |
444 | unsigned long pgdat_start_pfn = pgdat->node_start_pfn; | |
445 | unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ | |
446 | unsigned long pgdat_end_pfn = p; | |
447 | unsigned long pfn; | |
448 | struct mem_section *ms; | |
449 | int nid = pgdat->node_id; | |
450 | ||
451 | if (pgdat_start_pfn == start_pfn) { | |
452 | /* | |
453 | * If the section is smallest section in the pgdat, it need | |
454 | * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. | |
455 | * In this case, we find second smallest valid mem_section | |
456 | * for shrinking zone. | |
457 | */ | |
458 | pfn = find_smallest_section_pfn(nid, NULL, end_pfn, | |
459 | pgdat_end_pfn); | |
460 | if (pfn) { | |
461 | pgdat->node_start_pfn = pfn; | |
462 | pgdat->node_spanned_pages = pgdat_end_pfn - pfn; | |
463 | } | |
464 | } else if (pgdat_end_pfn == end_pfn) { | |
465 | /* | |
466 | * If the section is biggest section in the pgdat, it need | |
467 | * shrink pgdat->node_spanned_pages. | |
468 | * In this case, we find second biggest valid mem_section for | |
469 | * shrinking zone. | |
470 | */ | |
471 | pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, | |
472 | start_pfn); | |
473 | if (pfn) | |
474 | pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; | |
475 | } | |
476 | ||
477 | /* | |
478 | * If the section is not biggest or smallest mem_section in the pgdat, | |
479 | * it only creates a hole in the pgdat. So in this case, we need not | |
480 | * change the pgdat. | |
481 | * But perhaps, the pgdat has only hole data. Thus it check the pgdat | |
482 | * has only hole or not. | |
483 | */ | |
484 | pfn = pgdat_start_pfn; | |
485 | for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { | |
486 | ms = __pfn_to_section(pfn); | |
487 | ||
488 | if (unlikely(!valid_section(ms))) | |
489 | continue; | |
490 | ||
491 | if (pfn_to_nid(pfn) != nid) | |
492 | continue; | |
493 | ||
494 | /* If the section is current section, it continues the loop */ | |
495 | if (start_pfn == pfn) | |
496 | continue; | |
497 | ||
498 | /* If we find valid section, we have nothing to do */ | |
499 | return; | |
500 | } | |
501 | ||
502 | /* The pgdat has no valid section */ | |
503 | pgdat->node_start_pfn = 0; | |
504 | pgdat->node_spanned_pages = 0; | |
505 | } | |
506 | ||
507 | static void __remove_zone(struct zone *zone, unsigned long start_pfn) | |
508 | { | |
509 | struct pglist_data *pgdat = zone->zone_pgdat; | |
510 | int nr_pages = PAGES_PER_SECTION; | |
511 | unsigned long flags; | |
512 | ||
513 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
514 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); | |
515 | shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); | |
516 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
517 | } | |
518 | ||
519 | static void __remove_section(struct zone *zone, struct mem_section *ms, | |
520 | unsigned long map_offset, | |
521 | struct vmem_altmap *altmap) | |
522 | { | |
523 | unsigned long start_pfn; | |
524 | int scn_nr; | |
525 | ||
526 | if (WARN_ON_ONCE(!valid_section(ms))) | |
527 | return; | |
528 | ||
529 | unregister_memory_section(ms); | |
530 | ||
531 | scn_nr = __section_nr(ms); | |
532 | start_pfn = section_nr_to_pfn((unsigned long)scn_nr); | |
533 | __remove_zone(zone, start_pfn); | |
534 | ||
535 | sparse_remove_one_section(zone, ms, map_offset, altmap); | |
536 | } | |
537 | ||
538 | /** | |
539 | * __remove_pages() - remove sections of pages from a zone | |
540 | * @zone: zone from which pages need to be removed | |
541 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
542 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
543 | * @altmap: alternative device page map or %NULL if default memmap is used | |
544 | * | |
545 | * Generic helper function to remove section mappings and sysfs entries | |
546 | * for the section of the memory we are removing. Caller needs to make | |
547 | * sure that pages are marked reserved and zones are adjust properly by | |
548 | * calling offline_pages(). | |
549 | */ | |
550 | void __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
551 | unsigned long nr_pages, struct vmem_altmap *altmap) | |
552 | { | |
553 | unsigned long i; | |
554 | unsigned long map_offset = 0; | |
555 | int sections_to_remove; | |
556 | ||
557 | /* In the ZONE_DEVICE case device driver owns the memory region */ | |
558 | if (is_dev_zone(zone)) { | |
559 | if (altmap) | |
560 | map_offset = vmem_altmap_offset(altmap); | |
561 | } | |
562 | ||
563 | clear_zone_contiguous(zone); | |
564 | ||
565 | /* | |
566 | * We can only remove entire sections | |
567 | */ | |
568 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
569 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
570 | ||
571 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | |
572 | for (i = 0; i < sections_to_remove; i++) { | |
573 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
574 | ||
575 | cond_resched(); | |
576 | __remove_section(zone, __pfn_to_section(pfn), map_offset, | |
577 | altmap); | |
578 | map_offset = 0; | |
579 | } | |
580 | ||
581 | set_zone_contiguous(zone); | |
582 | } | |
583 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
584 | ||
585 | int set_online_page_callback(online_page_callback_t callback) | |
586 | { | |
587 | int rc = -EINVAL; | |
588 | ||
589 | get_online_mems(); | |
590 | mutex_lock(&online_page_callback_lock); | |
591 | ||
592 | if (online_page_callback == generic_online_page) { | |
593 | online_page_callback = callback; | |
594 | rc = 0; | |
595 | } | |
596 | ||
597 | mutex_unlock(&online_page_callback_lock); | |
598 | put_online_mems(); | |
599 | ||
600 | return rc; | |
601 | } | |
602 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
603 | ||
604 | int restore_online_page_callback(online_page_callback_t callback) | |
605 | { | |
606 | int rc = -EINVAL; | |
607 | ||
608 | get_online_mems(); | |
609 | mutex_lock(&online_page_callback_lock); | |
610 | ||
611 | if (online_page_callback == callback) { | |
612 | online_page_callback = generic_online_page; | |
613 | rc = 0; | |
614 | } | |
615 | ||
616 | mutex_unlock(&online_page_callback_lock); | |
617 | put_online_mems(); | |
618 | ||
619 | return rc; | |
620 | } | |
621 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
622 | ||
623 | void __online_page_set_limits(struct page *page) | |
624 | { | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(__online_page_set_limits); | |
627 | ||
628 | void __online_page_increment_counters(struct page *page) | |
629 | { | |
630 | adjust_managed_page_count(page, 1); | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); | |
633 | ||
634 | void __online_page_free(struct page *page) | |
635 | { | |
636 | __free_reserved_page(page); | |
637 | } | |
638 | EXPORT_SYMBOL_GPL(__online_page_free); | |
639 | ||
640 | static void generic_online_page(struct page *page, unsigned int order) | |
641 | { | |
642 | kernel_map_pages(page, 1 << order, 1); | |
643 | __free_pages_core(page, order); | |
644 | totalram_pages_add(1UL << order); | |
645 | #ifdef CONFIG_HIGHMEM | |
646 | if (PageHighMem(page)) | |
647 | totalhigh_pages_add(1UL << order); | |
648 | #endif | |
649 | } | |
650 | ||
651 | static int online_pages_blocks(unsigned long start, unsigned long nr_pages) | |
652 | { | |
653 | unsigned long end = start + nr_pages; | |
654 | int order, onlined_pages = 0; | |
655 | ||
656 | while (start < end) { | |
657 | order = min(MAX_ORDER - 1, | |
658 | get_order(PFN_PHYS(end) - PFN_PHYS(start))); | |
659 | (*online_page_callback)(pfn_to_page(start), order); | |
660 | ||
661 | onlined_pages += (1UL << order); | |
662 | start += (1UL << order); | |
663 | } | |
664 | return onlined_pages; | |
665 | } | |
666 | ||
667 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | |
668 | void *arg) | |
669 | { | |
670 | unsigned long onlined_pages = *(unsigned long *)arg; | |
671 | ||
672 | if (PageReserved(pfn_to_page(start_pfn))) | |
673 | onlined_pages += online_pages_blocks(start_pfn, nr_pages); | |
674 | ||
675 | online_mem_sections(start_pfn, start_pfn + nr_pages); | |
676 | ||
677 | *(unsigned long *)arg = onlined_pages; | |
678 | return 0; | |
679 | } | |
680 | ||
681 | /* check which state of node_states will be changed when online memory */ | |
682 | static void node_states_check_changes_online(unsigned long nr_pages, | |
683 | struct zone *zone, struct memory_notify *arg) | |
684 | { | |
685 | int nid = zone_to_nid(zone); | |
686 | ||
687 | arg->status_change_nid = NUMA_NO_NODE; | |
688 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
689 | arg->status_change_nid_high = NUMA_NO_NODE; | |
690 | ||
691 | if (!node_state(nid, N_MEMORY)) | |
692 | arg->status_change_nid = nid; | |
693 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
694 | arg->status_change_nid_normal = nid; | |
695 | #ifdef CONFIG_HIGHMEM | |
696 | if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) | |
697 | arg->status_change_nid_high = nid; | |
698 | #endif | |
699 | } | |
700 | ||
701 | static void node_states_set_node(int node, struct memory_notify *arg) | |
702 | { | |
703 | if (arg->status_change_nid_normal >= 0) | |
704 | node_set_state(node, N_NORMAL_MEMORY); | |
705 | ||
706 | if (arg->status_change_nid_high >= 0) | |
707 | node_set_state(node, N_HIGH_MEMORY); | |
708 | ||
709 | if (arg->status_change_nid >= 0) | |
710 | node_set_state(node, N_MEMORY); | |
711 | } | |
712 | ||
713 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, | |
714 | unsigned long nr_pages) | |
715 | { | |
716 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
717 | ||
718 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
719 | zone->zone_start_pfn = start_pfn; | |
720 | ||
721 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
722 | } | |
723 | ||
724 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
725 | unsigned long nr_pages) | |
726 | { | |
727 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
728 | ||
729 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
730 | pgdat->node_start_pfn = start_pfn; | |
731 | ||
732 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
733 | } | |
734 | ||
735 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, | |
736 | unsigned long nr_pages, struct vmem_altmap *altmap) | |
737 | { | |
738 | struct pglist_data *pgdat = zone->zone_pgdat; | |
739 | int nid = pgdat->node_id; | |
740 | unsigned long flags; | |
741 | ||
742 | clear_zone_contiguous(zone); | |
743 | ||
744 | /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ | |
745 | pgdat_resize_lock(pgdat, &flags); | |
746 | zone_span_writelock(zone); | |
747 | if (zone_is_empty(zone)) | |
748 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
749 | resize_zone_range(zone, start_pfn, nr_pages); | |
750 | zone_span_writeunlock(zone); | |
751 | resize_pgdat_range(pgdat, start_pfn, nr_pages); | |
752 | pgdat_resize_unlock(pgdat, &flags); | |
753 | ||
754 | /* | |
755 | * TODO now we have a visible range of pages which are not associated | |
756 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
757 | * expects the zone spans the pfn range. All the pages in the range | |
758 | * are reserved so nobody should be touching them so we should be safe | |
759 | */ | |
760 | memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, | |
761 | MEMMAP_HOTPLUG, altmap); | |
762 | ||
763 | set_zone_contiguous(zone); | |
764 | } | |
765 | ||
766 | /* | |
767 | * Returns a default kernel memory zone for the given pfn range. | |
768 | * If no kernel zone covers this pfn range it will automatically go | |
769 | * to the ZONE_NORMAL. | |
770 | */ | |
771 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, | |
772 | unsigned long nr_pages) | |
773 | { | |
774 | struct pglist_data *pgdat = NODE_DATA(nid); | |
775 | int zid; | |
776 | ||
777 | for (zid = 0; zid <= ZONE_NORMAL; zid++) { | |
778 | struct zone *zone = &pgdat->node_zones[zid]; | |
779 | ||
780 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
781 | return zone; | |
782 | } | |
783 | ||
784 | return &pgdat->node_zones[ZONE_NORMAL]; | |
785 | } | |
786 | ||
787 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, | |
788 | unsigned long nr_pages) | |
789 | { | |
790 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, | |
791 | nr_pages); | |
792 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
793 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
794 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
795 | ||
796 | /* | |
797 | * We inherit the existing zone in a simple case where zones do not | |
798 | * overlap in the given range | |
799 | */ | |
800 | if (in_kernel ^ in_movable) | |
801 | return (in_kernel) ? kernel_zone : movable_zone; | |
802 | ||
803 | /* | |
804 | * If the range doesn't belong to any zone or two zones overlap in the | |
805 | * given range then we use movable zone only if movable_node is | |
806 | * enabled because we always online to a kernel zone by default. | |
807 | */ | |
808 | return movable_node_enabled ? movable_zone : kernel_zone; | |
809 | } | |
810 | ||
811 | struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, | |
812 | unsigned long nr_pages) | |
813 | { | |
814 | if (online_type == MMOP_ONLINE_KERNEL) | |
815 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
816 | ||
817 | if (online_type == MMOP_ONLINE_MOVABLE) | |
818 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
819 | ||
820 | return default_zone_for_pfn(nid, start_pfn, nr_pages); | |
821 | } | |
822 | ||
823 | /* | |
824 | * Associates the given pfn range with the given node and the zone appropriate | |
825 | * for the given online type. | |
826 | */ | |
827 | static struct zone * __meminit move_pfn_range(int online_type, int nid, | |
828 | unsigned long start_pfn, unsigned long nr_pages) | |
829 | { | |
830 | struct zone *zone; | |
831 | ||
832 | zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); | |
833 | move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); | |
834 | return zone; | |
835 | } | |
836 | ||
837 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) | |
838 | { | |
839 | unsigned long flags; | |
840 | unsigned long onlined_pages = 0; | |
841 | struct zone *zone; | |
842 | int need_zonelists_rebuild = 0; | |
843 | int nid; | |
844 | int ret; | |
845 | struct memory_notify arg; | |
846 | struct memory_block *mem; | |
847 | ||
848 | mem_hotplug_begin(); | |
849 | ||
850 | /* | |
851 | * We can't use pfn_to_nid() because nid might be stored in struct page | |
852 | * which is not yet initialized. Instead, we find nid from memory block. | |
853 | */ | |
854 | mem = find_memory_block(__pfn_to_section(pfn)); | |
855 | nid = mem->nid; | |
856 | put_device(&mem->dev); | |
857 | ||
858 | /* associate pfn range with the zone */ | |
859 | zone = move_pfn_range(online_type, nid, pfn, nr_pages); | |
860 | ||
861 | arg.start_pfn = pfn; | |
862 | arg.nr_pages = nr_pages; | |
863 | node_states_check_changes_online(nr_pages, zone, &arg); | |
864 | ||
865 | ret = memory_notify(MEM_GOING_ONLINE, &arg); | |
866 | ret = notifier_to_errno(ret); | |
867 | if (ret) | |
868 | goto failed_addition; | |
869 | ||
870 | /* | |
871 | * If this zone is not populated, then it is not in zonelist. | |
872 | * This means the page allocator ignores this zone. | |
873 | * So, zonelist must be updated after online. | |
874 | */ | |
875 | if (!populated_zone(zone)) { | |
876 | need_zonelists_rebuild = 1; | |
877 | setup_zone_pageset(zone); | |
878 | } | |
879 | ||
880 | ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, | |
881 | online_pages_range); | |
882 | if (ret) { | |
883 | if (need_zonelists_rebuild) | |
884 | zone_pcp_reset(zone); | |
885 | goto failed_addition; | |
886 | } | |
887 | ||
888 | zone->present_pages += onlined_pages; | |
889 | ||
890 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
891 | zone->zone_pgdat->node_present_pages += onlined_pages; | |
892 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
893 | ||
894 | if (onlined_pages) { | |
895 | node_states_set_node(nid, &arg); | |
896 | if (need_zonelists_rebuild) | |
897 | build_all_zonelists(NULL); | |
898 | else | |
899 | zone_pcp_update(zone); | |
900 | } | |
901 | ||
902 | init_per_zone_wmark_min(); | |
903 | ||
904 | if (onlined_pages) { | |
905 | kswapd_run(nid); | |
906 | kcompactd_run(nid); | |
907 | } | |
908 | ||
909 | vm_total_pages = nr_free_pagecache_pages(); | |
910 | ||
911 | writeback_set_ratelimit(); | |
912 | ||
913 | if (onlined_pages) | |
914 | memory_notify(MEM_ONLINE, &arg); | |
915 | mem_hotplug_done(); | |
916 | return 0; | |
917 | ||
918 | failed_addition: | |
919 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
920 | (unsigned long long) pfn << PAGE_SHIFT, | |
921 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
922 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
923 | mem_hotplug_done(); | |
924 | return ret; | |
925 | } | |
926 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | |
927 | ||
928 | static void reset_node_present_pages(pg_data_t *pgdat) | |
929 | { | |
930 | struct zone *z; | |
931 | ||
932 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | |
933 | z->present_pages = 0; | |
934 | ||
935 | pgdat->node_present_pages = 0; | |
936 | } | |
937 | ||
938 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
939 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |
940 | { | |
941 | struct pglist_data *pgdat; | |
942 | unsigned long start_pfn = PFN_DOWN(start); | |
943 | ||
944 | pgdat = NODE_DATA(nid); | |
945 | if (!pgdat) { | |
946 | pgdat = arch_alloc_nodedata(nid); | |
947 | if (!pgdat) | |
948 | return NULL; | |
949 | ||
950 | arch_refresh_nodedata(nid, pgdat); | |
951 | } else { | |
952 | /* | |
953 | * Reset the nr_zones, order and classzone_idx before reuse. | |
954 | * Note that kswapd will init kswapd_classzone_idx properly | |
955 | * when it starts in the near future. | |
956 | */ | |
957 | pgdat->nr_zones = 0; | |
958 | pgdat->kswapd_order = 0; | |
959 | pgdat->kswapd_classzone_idx = 0; | |
960 | } | |
961 | ||
962 | /* we can use NODE_DATA(nid) from here */ | |
963 | ||
964 | pgdat->node_id = nid; | |
965 | pgdat->node_start_pfn = start_pfn; | |
966 | ||
967 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
968 | free_area_init_core_hotplug(nid); | |
969 | pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); | |
970 | ||
971 | /* | |
972 | * The node we allocated has no zone fallback lists. For avoiding | |
973 | * to access not-initialized zonelist, build here. | |
974 | */ | |
975 | build_all_zonelists(pgdat); | |
976 | ||
977 | /* | |
978 | * When memory is hot-added, all the memory is in offline state. So | |
979 | * clear all zones' present_pages because they will be updated in | |
980 | * online_pages() and offline_pages(). | |
981 | */ | |
982 | reset_node_managed_pages(pgdat); | |
983 | reset_node_present_pages(pgdat); | |
984 | ||
985 | return pgdat; | |
986 | } | |
987 | ||
988 | static void rollback_node_hotadd(int nid) | |
989 | { | |
990 | pg_data_t *pgdat = NODE_DATA(nid); | |
991 | ||
992 | arch_refresh_nodedata(nid, NULL); | |
993 | free_percpu(pgdat->per_cpu_nodestats); | |
994 | arch_free_nodedata(pgdat); | |
995 | return; | |
996 | } | |
997 | ||
998 | ||
999 | /** | |
1000 | * try_online_node - online a node if offlined | |
1001 | * @nid: the node ID | |
1002 | * @start: start addr of the node | |
1003 | * @set_node_online: Whether we want to online the node | |
1004 | * called by cpu_up() to online a node without onlined memory. | |
1005 | * | |
1006 | * Returns: | |
1007 | * 1 -> a new node has been allocated | |
1008 | * 0 -> the node is already online | |
1009 | * -ENOMEM -> the node could not be allocated | |
1010 | */ | |
1011 | static int __try_online_node(int nid, u64 start, bool set_node_online) | |
1012 | { | |
1013 | pg_data_t *pgdat; | |
1014 | int ret = 1; | |
1015 | ||
1016 | if (node_online(nid)) | |
1017 | return 0; | |
1018 | ||
1019 | pgdat = hotadd_new_pgdat(nid, start); | |
1020 | if (!pgdat) { | |
1021 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); | |
1022 | ret = -ENOMEM; | |
1023 | goto out; | |
1024 | } | |
1025 | ||
1026 | if (set_node_online) { | |
1027 | node_set_online(nid); | |
1028 | ret = register_one_node(nid); | |
1029 | BUG_ON(ret); | |
1030 | } | |
1031 | out: | |
1032 | return ret; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * Users of this function always want to online/register the node | |
1037 | */ | |
1038 | int try_online_node(int nid) | |
1039 | { | |
1040 | int ret; | |
1041 | ||
1042 | mem_hotplug_begin(); | |
1043 | ret = __try_online_node(nid, 0, true); | |
1044 | mem_hotplug_done(); | |
1045 | return ret; | |
1046 | } | |
1047 | ||
1048 | static int check_hotplug_memory_range(u64 start, u64 size) | |
1049 | { | |
1050 | unsigned long block_sz = memory_block_size_bytes(); | |
1051 | u64 block_nr_pages = block_sz >> PAGE_SHIFT; | |
1052 | u64 nr_pages = size >> PAGE_SHIFT; | |
1053 | u64 start_pfn = PFN_DOWN(start); | |
1054 | ||
1055 | /* memory range must be block size aligned */ | |
1056 | if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) || | |
1057 | !IS_ALIGNED(nr_pages, block_nr_pages)) { | |
1058 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", | |
1059 | block_sz, start, size); | |
1060 | return -EINVAL; | |
1061 | } | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | static int online_memory_block(struct memory_block *mem, void *arg) | |
1067 | { | |
1068 | return device_online(&mem->dev); | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1073 | * and online/offline operations (triggered e.g. by sysfs). | |
1074 | * | |
1075 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1076 | */ | |
1077 | int __ref add_memory_resource(int nid, struct resource *res) | |
1078 | { | |
1079 | struct mhp_restrictions restrictions = { | |
1080 | .flags = MHP_MEMBLOCK_API, | |
1081 | }; | |
1082 | u64 start, size; | |
1083 | bool new_node = false; | |
1084 | int ret; | |
1085 | ||
1086 | start = res->start; | |
1087 | size = resource_size(res); | |
1088 | ||
1089 | ret = check_hotplug_memory_range(start, size); | |
1090 | if (ret) | |
1091 | return ret; | |
1092 | ||
1093 | mem_hotplug_begin(); | |
1094 | ||
1095 | /* | |
1096 | * Add new range to memblock so that when hotadd_new_pgdat() is called | |
1097 | * to allocate new pgdat, get_pfn_range_for_nid() will be able to find | |
1098 | * this new range and calculate total pages correctly. The range will | |
1099 | * be removed at hot-remove time. | |
1100 | */ | |
1101 | memblock_add_node(start, size, nid); | |
1102 | ||
1103 | ret = __try_online_node(nid, start, false); | |
1104 | if (ret < 0) | |
1105 | goto error; | |
1106 | new_node = ret; | |
1107 | ||
1108 | /* call arch's memory hotadd */ | |
1109 | ret = arch_add_memory(nid, start, size, &restrictions); | |
1110 | if (ret < 0) | |
1111 | goto error; | |
1112 | ||
1113 | if (new_node) { | |
1114 | /* If sysfs file of new node can't be created, cpu on the node | |
1115 | * can't be hot-added. There is no rollback way now. | |
1116 | * So, check by BUG_ON() to catch it reluctantly.. | |
1117 | * We online node here. We can't roll back from here. | |
1118 | */ | |
1119 | node_set_online(nid); | |
1120 | ret = __register_one_node(nid); | |
1121 | BUG_ON(ret); | |
1122 | } | |
1123 | ||
1124 | /* link memory sections under this node.*/ | |
1125 | ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); | |
1126 | BUG_ON(ret); | |
1127 | ||
1128 | /* create new memmap entry */ | |
1129 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
1130 | ||
1131 | /* device_online() will take the lock when calling online_pages() */ | |
1132 | mem_hotplug_done(); | |
1133 | ||
1134 | /* online pages if requested */ | |
1135 | if (memhp_auto_online) | |
1136 | walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), | |
1137 | NULL, online_memory_block); | |
1138 | ||
1139 | return ret; | |
1140 | error: | |
1141 | /* rollback pgdat allocation and others */ | |
1142 | if (new_node) | |
1143 | rollback_node_hotadd(nid); | |
1144 | memblock_remove(start, size); | |
1145 | mem_hotplug_done(); | |
1146 | return ret; | |
1147 | } | |
1148 | ||
1149 | /* requires device_hotplug_lock, see add_memory_resource() */ | |
1150 | int __ref __add_memory(int nid, u64 start, u64 size) | |
1151 | { | |
1152 | struct resource *res; | |
1153 | int ret; | |
1154 | ||
1155 | res = register_memory_resource(start, size); | |
1156 | if (IS_ERR(res)) | |
1157 | return PTR_ERR(res); | |
1158 | ||
1159 | ret = add_memory_resource(nid, res); | |
1160 | if (ret < 0) | |
1161 | release_memory_resource(res); | |
1162 | return ret; | |
1163 | } | |
1164 | ||
1165 | int add_memory(int nid, u64 start, u64 size) | |
1166 | { | |
1167 | int rc; | |
1168 | ||
1169 | lock_device_hotplug(); | |
1170 | rc = __add_memory(nid, start, size); | |
1171 | unlock_device_hotplug(); | |
1172 | ||
1173 | return rc; | |
1174 | } | |
1175 | EXPORT_SYMBOL_GPL(add_memory); | |
1176 | ||
1177 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
1178 | /* | |
1179 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | |
1180 | * set and the size of the free page is given by page_order(). Using this, | |
1181 | * the function determines if the pageblock contains only free pages. | |
1182 | * Due to buddy contraints, a free page at least the size of a pageblock will | |
1183 | * be located at the start of the pageblock | |
1184 | */ | |
1185 | static inline int pageblock_free(struct page *page) | |
1186 | { | |
1187 | return PageBuddy(page) && page_order(page) >= pageblock_order; | |
1188 | } | |
1189 | ||
1190 | /* Return the pfn of the start of the next active pageblock after a given pfn */ | |
1191 | static unsigned long next_active_pageblock(unsigned long pfn) | |
1192 | { | |
1193 | struct page *page = pfn_to_page(pfn); | |
1194 | ||
1195 | /* Ensure the starting page is pageblock-aligned */ | |
1196 | BUG_ON(pfn & (pageblock_nr_pages - 1)); | |
1197 | ||
1198 | /* If the entire pageblock is free, move to the end of free page */ | |
1199 | if (pageblock_free(page)) { | |
1200 | int order; | |
1201 | /* be careful. we don't have locks, page_order can be changed.*/ | |
1202 | order = page_order(page); | |
1203 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | |
1204 | return pfn + (1 << order); | |
1205 | } | |
1206 | ||
1207 | return pfn + pageblock_nr_pages; | |
1208 | } | |
1209 | ||
1210 | static bool is_pageblock_removable_nolock(unsigned long pfn) | |
1211 | { | |
1212 | struct page *page = pfn_to_page(pfn); | |
1213 | struct zone *zone; | |
1214 | ||
1215 | /* | |
1216 | * We have to be careful here because we are iterating over memory | |
1217 | * sections which are not zone aware so we might end up outside of | |
1218 | * the zone but still within the section. | |
1219 | * We have to take care about the node as well. If the node is offline | |
1220 | * its NODE_DATA will be NULL - see page_zone. | |
1221 | */ | |
1222 | if (!node_online(page_to_nid(page))) | |
1223 | return false; | |
1224 | ||
1225 | zone = page_zone(page); | |
1226 | pfn = page_to_pfn(page); | |
1227 | if (!zone_spans_pfn(zone, pfn)) | |
1228 | return false; | |
1229 | ||
1230 | return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON); | |
1231 | } | |
1232 | ||
1233 | /* Checks if this range of memory is likely to be hot-removable. */ | |
1234 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |
1235 | { | |
1236 | unsigned long end_pfn, pfn; | |
1237 | ||
1238 | end_pfn = min(start_pfn + nr_pages, | |
1239 | zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); | |
1240 | ||
1241 | /* Check the starting page of each pageblock within the range */ | |
1242 | for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { | |
1243 | if (!is_pageblock_removable_nolock(pfn)) | |
1244 | return false; | |
1245 | cond_resched(); | |
1246 | } | |
1247 | ||
1248 | /* All pageblocks in the memory block are likely to be hot-removable */ | |
1249 | return true; | |
1250 | } | |
1251 | ||
1252 | /* | |
1253 | * Confirm all pages in a range [start, end) belong to the same zone. | |
1254 | * When true, return its valid [start, end). | |
1255 | */ | |
1256 | int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |
1257 | unsigned long *valid_start, unsigned long *valid_end) | |
1258 | { | |
1259 | unsigned long pfn, sec_end_pfn; | |
1260 | unsigned long start, end; | |
1261 | struct zone *zone = NULL; | |
1262 | struct page *page; | |
1263 | int i; | |
1264 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); | |
1265 | pfn < end_pfn; | |
1266 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { | |
1267 | /* Make sure the memory section is present first */ | |
1268 | if (!present_section_nr(pfn_to_section_nr(pfn))) | |
1269 | continue; | |
1270 | for (; pfn < sec_end_pfn && pfn < end_pfn; | |
1271 | pfn += MAX_ORDER_NR_PAGES) { | |
1272 | i = 0; | |
1273 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
1274 | while ((i < MAX_ORDER_NR_PAGES) && | |
1275 | !pfn_valid_within(pfn + i)) | |
1276 | i++; | |
1277 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) | |
1278 | continue; | |
1279 | /* Check if we got outside of the zone */ | |
1280 | if (zone && !zone_spans_pfn(zone, pfn + i)) | |
1281 | return 0; | |
1282 | page = pfn_to_page(pfn + i); | |
1283 | if (zone && page_zone(page) != zone) | |
1284 | return 0; | |
1285 | if (!zone) | |
1286 | start = pfn + i; | |
1287 | zone = page_zone(page); | |
1288 | end = pfn + MAX_ORDER_NR_PAGES; | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | if (zone) { | |
1293 | *valid_start = start; | |
1294 | *valid_end = min(end, end_pfn); | |
1295 | return 1; | |
1296 | } else { | |
1297 | return 0; | |
1298 | } | |
1299 | } | |
1300 | ||
1301 | /* | |
1302 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, | |
1303 | * non-lru movable pages and hugepages). We scan pfn because it's much | |
1304 | * easier than scanning over linked list. This function returns the pfn | |
1305 | * of the first found movable page if it's found, otherwise 0. | |
1306 | */ | |
1307 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | |
1308 | { | |
1309 | unsigned long pfn; | |
1310 | ||
1311 | for (pfn = start; pfn < end; pfn++) { | |
1312 | struct page *page, *head; | |
1313 | unsigned long skip; | |
1314 | ||
1315 | if (!pfn_valid(pfn)) | |
1316 | continue; | |
1317 | page = pfn_to_page(pfn); | |
1318 | if (PageLRU(page)) | |
1319 | return pfn; | |
1320 | if (__PageMovable(page)) | |
1321 | return pfn; | |
1322 | ||
1323 | if (!PageHuge(page)) | |
1324 | continue; | |
1325 | head = compound_head(page); | |
1326 | if (page_huge_active(head)) | |
1327 | return pfn; | |
1328 | skip = (1 << compound_order(head)) - (page - head); | |
1329 | pfn += skip - 1; | |
1330 | } | |
1331 | return 0; | |
1332 | } | |
1333 | ||
1334 | static struct page *new_node_page(struct page *page, unsigned long private) | |
1335 | { | |
1336 | int nid = page_to_nid(page); | |
1337 | nodemask_t nmask = node_states[N_MEMORY]; | |
1338 | ||
1339 | /* | |
1340 | * try to allocate from a different node but reuse this node if there | |
1341 | * are no other online nodes to be used (e.g. we are offlining a part | |
1342 | * of the only existing node) | |
1343 | */ | |
1344 | node_clear(nid, nmask); | |
1345 | if (nodes_empty(nmask)) | |
1346 | node_set(nid, nmask); | |
1347 | ||
1348 | return new_page_nodemask(page, nid, &nmask); | |
1349 | } | |
1350 | ||
1351 | static int | |
1352 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1353 | { | |
1354 | unsigned long pfn; | |
1355 | struct page *page; | |
1356 | int ret = 0; | |
1357 | LIST_HEAD(source); | |
1358 | ||
1359 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | |
1360 | if (!pfn_valid(pfn)) | |
1361 | continue; | |
1362 | page = pfn_to_page(pfn); | |
1363 | ||
1364 | if (PageHuge(page)) { | |
1365 | struct page *head = compound_head(page); | |
1366 | pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; | |
1367 | isolate_huge_page(head, &source); | |
1368 | continue; | |
1369 | } else if (PageTransHuge(page)) | |
1370 | pfn = page_to_pfn(compound_head(page)) | |
1371 | + hpage_nr_pages(page) - 1; | |
1372 | ||
1373 | /* | |
1374 | * HWPoison pages have elevated reference counts so the migration would | |
1375 | * fail on them. It also doesn't make any sense to migrate them in the | |
1376 | * first place. Still try to unmap such a page in case it is still mapped | |
1377 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1378 | * the unmap as the catch all safety net). | |
1379 | */ | |
1380 | if (PageHWPoison(page)) { | |
1381 | if (WARN_ON(PageLRU(page))) | |
1382 | isolate_lru_page(page); | |
1383 | if (page_mapped(page)) | |
1384 | try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); | |
1385 | continue; | |
1386 | } | |
1387 | ||
1388 | if (!get_page_unless_zero(page)) | |
1389 | continue; | |
1390 | /* | |
1391 | * We can skip free pages. And we can deal with pages on | |
1392 | * LRU and non-lru movable pages. | |
1393 | */ | |
1394 | if (PageLRU(page)) | |
1395 | ret = isolate_lru_page(page); | |
1396 | else | |
1397 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
1398 | if (!ret) { /* Success */ | |
1399 | list_add_tail(&page->lru, &source); | |
1400 | if (!__PageMovable(page)) | |
1401 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
1402 | page_is_file_cache(page)); | |
1403 | ||
1404 | } else { | |
1405 | pr_warn("failed to isolate pfn %lx\n", pfn); | |
1406 | dump_page(page, "isolation failed"); | |
1407 | } | |
1408 | put_page(page); | |
1409 | } | |
1410 | if (!list_empty(&source)) { | |
1411 | /* Allocate a new page from the nearest neighbor node */ | |
1412 | ret = migrate_pages(&source, new_node_page, NULL, 0, | |
1413 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
1414 | if (ret) { | |
1415 | list_for_each_entry(page, &source, lru) { | |
1416 | pr_warn("migrating pfn %lx failed ret:%d ", | |
1417 | page_to_pfn(page), ret); | |
1418 | dump_page(page, "migration failure"); | |
1419 | } | |
1420 | putback_movable_pages(&source); | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | /* | |
1428 | * remove from free_area[] and mark all as Reserved. | |
1429 | */ | |
1430 | static int | |
1431 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
1432 | void *data) | |
1433 | { | |
1434 | unsigned long *offlined_pages = (unsigned long *)data; | |
1435 | ||
1436 | *offlined_pages += __offline_isolated_pages(start, start + nr_pages); | |
1437 | return 0; | |
1438 | } | |
1439 | ||
1440 | /* | |
1441 | * Check all pages in range, recoreded as memory resource, are isolated. | |
1442 | */ | |
1443 | static int | |
1444 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
1445 | void *data) | |
1446 | { | |
1447 | return test_pages_isolated(start_pfn, start_pfn + nr_pages, true); | |
1448 | } | |
1449 | ||
1450 | static int __init cmdline_parse_movable_node(char *p) | |
1451 | { | |
1452 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
1453 | movable_node_enabled = true; | |
1454 | #else | |
1455 | pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n"); | |
1456 | #endif | |
1457 | return 0; | |
1458 | } | |
1459 | early_param("movable_node", cmdline_parse_movable_node); | |
1460 | ||
1461 | /* check which state of node_states will be changed when offline memory */ | |
1462 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1463 | struct zone *zone, struct memory_notify *arg) | |
1464 | { | |
1465 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1466 | unsigned long present_pages = 0; | |
1467 | enum zone_type zt; | |
1468 | ||
1469 | arg->status_change_nid = NUMA_NO_NODE; | |
1470 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
1471 | arg->status_change_nid_high = NUMA_NO_NODE; | |
1472 | ||
1473 | /* | |
1474 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. | |
1475 | * If the memory to be offline is within the range | |
1476 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1477 | * the zones in that range will become empty after the offlining, | |
1478 | * thus we can determine that we need to clear the node from | |
1479 | * node_states[N_NORMAL_MEMORY]. | |
1480 | */ | |
1481 | for (zt = 0; zt <= ZONE_NORMAL; zt++) | |
1482 | present_pages += pgdat->node_zones[zt].present_pages; | |
1483 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) | |
1484 | arg->status_change_nid_normal = zone_to_nid(zone); | |
1485 | ||
1486 | #ifdef CONFIG_HIGHMEM | |
1487 | /* | |
1488 | * node_states[N_HIGH_MEMORY] contains nodes which | |
1489 | * have normal memory or high memory. | |
1490 | * Here we add the present_pages belonging to ZONE_HIGHMEM. | |
1491 | * If the zone is within the range of [0..ZONE_HIGHMEM), and | |
1492 | * we determine that the zones in that range become empty, | |
1493 | * we need to clear the node for N_HIGH_MEMORY. | |
1494 | */ | |
1495 | present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; | |
1496 | if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) | |
1497 | arg->status_change_nid_high = zone_to_nid(zone); | |
1498 | #endif | |
1499 | ||
1500 | /* | |
1501 | * We have accounted the pages from [0..ZONE_NORMAL), and | |
1502 | * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM | |
1503 | * as well. | |
1504 | * Here we count the possible pages from ZONE_MOVABLE. | |
1505 | * If after having accounted all the pages, we see that the nr_pages | |
1506 | * to be offlined is over or equal to the accounted pages, | |
1507 | * we know that the node will become empty, and so, we can clear | |
1508 | * it for N_MEMORY as well. | |
1509 | */ | |
1510 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; | |
1511 | ||
1512 | if (nr_pages >= present_pages) | |
1513 | arg->status_change_nid = zone_to_nid(zone); | |
1514 | } | |
1515 | ||
1516 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1517 | { | |
1518 | if (arg->status_change_nid_normal >= 0) | |
1519 | node_clear_state(node, N_NORMAL_MEMORY); | |
1520 | ||
1521 | if (arg->status_change_nid_high >= 0) | |
1522 | node_clear_state(node, N_HIGH_MEMORY); | |
1523 | ||
1524 | if (arg->status_change_nid >= 0) | |
1525 | node_clear_state(node, N_MEMORY); | |
1526 | } | |
1527 | ||
1528 | static int __ref __offline_pages(unsigned long start_pfn, | |
1529 | unsigned long end_pfn) | |
1530 | { | |
1531 | unsigned long pfn, nr_pages; | |
1532 | unsigned long offlined_pages = 0; | |
1533 | int ret, node, nr_isolate_pageblock; | |
1534 | unsigned long flags; | |
1535 | unsigned long valid_start, valid_end; | |
1536 | struct zone *zone; | |
1537 | struct memory_notify arg; | |
1538 | char *reason; | |
1539 | ||
1540 | mem_hotplug_begin(); | |
1541 | ||
1542 | /* This makes hotplug much easier...and readable. | |
1543 | we assume this for now. .*/ | |
1544 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, | |
1545 | &valid_end)) { | |
1546 | ret = -EINVAL; | |
1547 | reason = "multizone range"; | |
1548 | goto failed_removal; | |
1549 | } | |
1550 | ||
1551 | zone = page_zone(pfn_to_page(valid_start)); | |
1552 | node = zone_to_nid(zone); | |
1553 | nr_pages = end_pfn - start_pfn; | |
1554 | ||
1555 | /* set above range as isolated */ | |
1556 | ret = start_isolate_page_range(start_pfn, end_pfn, | |
1557 | MIGRATE_MOVABLE, | |
1558 | SKIP_HWPOISON | REPORT_FAILURE); | |
1559 | if (ret < 0) { | |
1560 | reason = "failure to isolate range"; | |
1561 | goto failed_removal; | |
1562 | } | |
1563 | nr_isolate_pageblock = ret; | |
1564 | ||
1565 | arg.start_pfn = start_pfn; | |
1566 | arg.nr_pages = nr_pages; | |
1567 | node_states_check_changes_offline(nr_pages, zone, &arg); | |
1568 | ||
1569 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1570 | ret = notifier_to_errno(ret); | |
1571 | if (ret) { | |
1572 | reason = "notifier failure"; | |
1573 | goto failed_removal_isolated; | |
1574 | } | |
1575 | ||
1576 | do { | |
1577 | for (pfn = start_pfn; pfn;) { | |
1578 | if (signal_pending(current)) { | |
1579 | ret = -EINTR; | |
1580 | reason = "signal backoff"; | |
1581 | goto failed_removal_isolated; | |
1582 | } | |
1583 | ||
1584 | cond_resched(); | |
1585 | lru_add_drain_all(); | |
1586 | ||
1587 | pfn = scan_movable_pages(pfn, end_pfn); | |
1588 | if (pfn) { | |
1589 | /* | |
1590 | * TODO: fatal migration failures should bail | |
1591 | * out | |
1592 | */ | |
1593 | do_migrate_range(pfn, end_pfn); | |
1594 | } | |
1595 | } | |
1596 | ||
1597 | /* | |
1598 | * Dissolve free hugepages in the memory block before doing | |
1599 | * offlining actually in order to make hugetlbfs's object | |
1600 | * counting consistent. | |
1601 | */ | |
1602 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
1603 | if (ret) { | |
1604 | reason = "failure to dissolve huge pages"; | |
1605 | goto failed_removal_isolated; | |
1606 | } | |
1607 | /* check again */ | |
1608 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, | |
1609 | NULL, check_pages_isolated_cb); | |
1610 | } while (ret); | |
1611 | ||
1612 | /* Ok, all of our target is isolated. | |
1613 | We cannot do rollback at this point. */ | |
1614 | walk_system_ram_range(start_pfn, end_pfn - start_pfn, | |
1615 | &offlined_pages, offline_isolated_pages_cb); | |
1616 | pr_info("Offlined Pages %ld\n", offlined_pages); | |
1617 | /* | |
1618 | * Onlining will reset pagetype flags and makes migrate type | |
1619 | * MOVABLE, so just need to decrease the number of isolated | |
1620 | * pageblocks zone counter here. | |
1621 | */ | |
1622 | spin_lock_irqsave(&zone->lock, flags); | |
1623 | zone->nr_isolate_pageblock -= nr_isolate_pageblock; | |
1624 | spin_unlock_irqrestore(&zone->lock, flags); | |
1625 | ||
1626 | /* removal success */ | |
1627 | adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); | |
1628 | zone->present_pages -= offlined_pages; | |
1629 | ||
1630 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
1631 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
1632 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
1633 | ||
1634 | init_per_zone_wmark_min(); | |
1635 | ||
1636 | if (!populated_zone(zone)) { | |
1637 | zone_pcp_reset(zone); | |
1638 | build_all_zonelists(NULL); | |
1639 | } else | |
1640 | zone_pcp_update(zone); | |
1641 | ||
1642 | node_states_clear_node(node, &arg); | |
1643 | if (arg.status_change_nid >= 0) { | |
1644 | kswapd_stop(node); | |
1645 | kcompactd_stop(node); | |
1646 | } | |
1647 | ||
1648 | vm_total_pages = nr_free_pagecache_pages(); | |
1649 | writeback_set_ratelimit(); | |
1650 | ||
1651 | memory_notify(MEM_OFFLINE, &arg); | |
1652 | mem_hotplug_done(); | |
1653 | return 0; | |
1654 | ||
1655 | failed_removal_isolated: | |
1656 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
1657 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | |
1658 | failed_removal: | |
1659 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", | |
1660 | (unsigned long long) start_pfn << PAGE_SHIFT, | |
1661 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, | |
1662 | reason); | |
1663 | /* pushback to free area */ | |
1664 | mem_hotplug_done(); | |
1665 | return ret; | |
1666 | } | |
1667 | ||
1668 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | |
1669 | { | |
1670 | return __offline_pages(start_pfn, start_pfn + nr_pages); | |
1671 | } | |
1672 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
1673 | ||
1674 | /** | |
1675 | * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) | |
1676 | * @start_pfn: start pfn of the memory range | |
1677 | * @end_pfn: end pfn of the memory range | |
1678 | * @arg: argument passed to func | |
1679 | * @func: callback for each memory section walked | |
1680 | * | |
1681 | * This function walks through all present mem sections in range | |
1682 | * [start_pfn, end_pfn) and call func on each mem section. | |
1683 | * | |
1684 | * Returns the return value of func. | |
1685 | */ | |
1686 | int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | |
1687 | void *arg, int (*func)(struct memory_block *, void *)) | |
1688 | { | |
1689 | struct memory_block *mem = NULL; | |
1690 | struct mem_section *section; | |
1691 | unsigned long pfn, section_nr; | |
1692 | int ret; | |
1693 | ||
1694 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
1695 | section_nr = pfn_to_section_nr(pfn); | |
1696 | if (!present_section_nr(section_nr)) | |
1697 | continue; | |
1698 | ||
1699 | section = __nr_to_section(section_nr); | |
1700 | /* same memblock? */ | |
1701 | if (mem) | |
1702 | if ((section_nr >= mem->start_section_nr) && | |
1703 | (section_nr <= mem->end_section_nr)) | |
1704 | continue; | |
1705 | ||
1706 | mem = find_memory_block_hinted(section, mem); | |
1707 | if (!mem) | |
1708 | continue; | |
1709 | ||
1710 | ret = func(mem, arg); | |
1711 | if (ret) { | |
1712 | kobject_put(&mem->dev.kobj); | |
1713 | return ret; | |
1714 | } | |
1715 | } | |
1716 | ||
1717 | if (mem) | |
1718 | kobject_put(&mem->dev.kobj); | |
1719 | ||
1720 | return 0; | |
1721 | } | |
1722 | ||
1723 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
1724 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) | |
1725 | { | |
1726 | int ret = !is_memblock_offlined(mem); | |
1727 | ||
1728 | if (unlikely(ret)) { | |
1729 | phys_addr_t beginpa, endpa; | |
1730 | ||
1731 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
1732 | endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; | |
1733 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", | |
1734 | &beginpa, &endpa); | |
1735 | } | |
1736 | ||
1737 | return ret; | |
1738 | } | |
1739 | ||
1740 | static int check_cpu_on_node(pg_data_t *pgdat) | |
1741 | { | |
1742 | int cpu; | |
1743 | ||
1744 | for_each_present_cpu(cpu) { | |
1745 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1746 | /* | |
1747 | * the cpu on this node isn't removed, and we can't | |
1748 | * offline this node. | |
1749 | */ | |
1750 | return -EBUSY; | |
1751 | } | |
1752 | ||
1753 | return 0; | |
1754 | } | |
1755 | ||
1756 | /** | |
1757 | * try_offline_node | |
1758 | * @nid: the node ID | |
1759 | * | |
1760 | * Offline a node if all memory sections and cpus of the node are removed. | |
1761 | * | |
1762 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1763 | * and online/offline operations before this call. | |
1764 | */ | |
1765 | void try_offline_node(int nid) | |
1766 | { | |
1767 | pg_data_t *pgdat = NODE_DATA(nid); | |
1768 | unsigned long start_pfn = pgdat->node_start_pfn; | |
1769 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; | |
1770 | unsigned long pfn; | |
1771 | ||
1772 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
1773 | unsigned long section_nr = pfn_to_section_nr(pfn); | |
1774 | ||
1775 | if (!present_section_nr(section_nr)) | |
1776 | continue; | |
1777 | ||
1778 | if (pfn_to_nid(pfn) != nid) | |
1779 | continue; | |
1780 | ||
1781 | /* | |
1782 | * some memory sections of this node are not removed, and we | |
1783 | * can't offline node now. | |
1784 | */ | |
1785 | return; | |
1786 | } | |
1787 | ||
1788 | if (check_cpu_on_node(pgdat)) | |
1789 | return; | |
1790 | ||
1791 | /* | |
1792 | * all memory/cpu of this node are removed, we can offline this | |
1793 | * node now. | |
1794 | */ | |
1795 | node_set_offline(nid); | |
1796 | unregister_one_node(nid); | |
1797 | } | |
1798 | EXPORT_SYMBOL(try_offline_node); | |
1799 | ||
1800 | static void __release_memory_resource(resource_size_t start, | |
1801 | resource_size_t size) | |
1802 | { | |
1803 | int ret; | |
1804 | ||
1805 | /* | |
1806 | * When removing memory in the same granularity as it was added, | |
1807 | * this function never fails. It might only fail if resources | |
1808 | * have to be adjusted or split. We'll ignore the error, as | |
1809 | * removing of memory cannot fail. | |
1810 | */ | |
1811 | ret = release_mem_region_adjustable(&iomem_resource, start, size); | |
1812 | if (ret) { | |
1813 | resource_size_t endres = start + size - 1; | |
1814 | ||
1815 | pr_warn("Unable to release resource <%pa-%pa> (%d)\n", | |
1816 | &start, &endres, ret); | |
1817 | } | |
1818 | } | |
1819 | ||
1820 | /** | |
1821 | * remove_memory | |
1822 | * @nid: the node ID | |
1823 | * @start: physical address of the region to remove | |
1824 | * @size: size of the region to remove | |
1825 | * | |
1826 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1827 | * and online/offline operations before this call, as required by | |
1828 | * try_offline_node(). | |
1829 | */ | |
1830 | void __ref __remove_memory(int nid, u64 start, u64 size) | |
1831 | { | |
1832 | int ret; | |
1833 | ||
1834 | BUG_ON(check_hotplug_memory_range(start, size)); | |
1835 | ||
1836 | mem_hotplug_begin(); | |
1837 | ||
1838 | /* | |
1839 | * All memory blocks must be offlined before removing memory. Check | |
1840 | * whether all memory blocks in question are offline and trigger a BUG() | |
1841 | * if this is not the case. | |
1842 | */ | |
1843 | ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, | |
1844 | check_memblock_offlined_cb); | |
1845 | if (ret) | |
1846 | BUG(); | |
1847 | ||
1848 | /* remove memmap entry */ | |
1849 | firmware_map_remove(start, start + size, "System RAM"); | |
1850 | memblock_free(start, size); | |
1851 | memblock_remove(start, size); | |
1852 | ||
1853 | arch_remove_memory(nid, start, size, NULL); | |
1854 | __release_memory_resource(start, size); | |
1855 | ||
1856 | try_offline_node(nid); | |
1857 | ||
1858 | mem_hotplug_done(); | |
1859 | } | |
1860 | ||
1861 | void remove_memory(int nid, u64 start, u64 size) | |
1862 | { | |
1863 | lock_device_hotplug(); | |
1864 | __remove_memory(nid, start, size); | |
1865 | unlock_device_hotplug(); | |
1866 | } | |
1867 | EXPORT_SYMBOL_GPL(remove_memory); | |
1868 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |