]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3947be19 DH |
2 | /* |
3 | * linux/mm/memory_hotplug.c | |
4 | * | |
5 | * Copyright (C) | |
6 | */ | |
7 | ||
3947be19 DH |
8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> | |
174cd4b1 | 10 | #include <linux/sched/signal.h> |
3947be19 DH |
11 | #include <linux/swap.h> |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/pagemap.h> | |
3947be19 | 14 | #include <linux/compiler.h> |
b95f1b31 | 15 | #include <linux/export.h> |
3947be19 | 16 | #include <linux/pagevec.h> |
2d1d43f6 | 17 | #include <linux/writeback.h> |
3947be19 DH |
18 | #include <linux/slab.h> |
19 | #include <linux/sysctl.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/memory.h> | |
4b94ffdc | 22 | #include <linux/memremap.h> |
3947be19 DH |
23 | #include <linux/memory_hotplug.h> |
24 | #include <linux/highmem.h> | |
25 | #include <linux/vmalloc.h> | |
0a547039 | 26 | #include <linux/ioport.h> |
0c0e6195 KH |
27 | #include <linux/delay.h> |
28 | #include <linux/migrate.h> | |
29 | #include <linux/page-isolation.h> | |
71088785 | 30 | #include <linux/pfn.h> |
6ad696d2 | 31 | #include <linux/suspend.h> |
6d9c285a | 32 | #include <linux/mm_inline.h> |
d96ae530 | 33 | #include <linux/firmware-map.h> |
60a5a19e | 34 | #include <linux/stop_machine.h> |
c8721bbb | 35 | #include <linux/hugetlb.h> |
c5320926 | 36 | #include <linux/memblock.h> |
698b1b30 | 37 | #include <linux/compaction.h> |
b15c8726 | 38 | #include <linux/rmap.h> |
3947be19 DH |
39 | |
40 | #include <asm/tlbflush.h> | |
41 | ||
1e5ad9a3 | 42 | #include "internal.h" |
e900a918 | 43 | #include "shuffle.h" |
1e5ad9a3 | 44 | |
e3a9d9fc OS |
45 | |
46 | /* | |
47 | * memory_hotplug.memmap_on_memory parameter | |
48 | */ | |
49 | static bool memmap_on_memory __ro_after_init; | |
50 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY | |
51 | module_param(memmap_on_memory, bool, 0444); | |
52 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); | |
53 | #endif | |
a08a2ae3 | 54 | |
9d0ad8ca DK |
55 | /* |
56 | * online_page_callback contains pointer to current page onlining function. | |
57 | * Initially it is generic_online_page(). If it is required it could be | |
58 | * changed by calling set_online_page_callback() for callback registration | |
59 | * and restore_online_page_callback() for generic callback restore. | |
60 | */ | |
61 | ||
9d0ad8ca | 62 | static online_page_callback_t online_page_callback = generic_online_page; |
bfc8c901 | 63 | static DEFINE_MUTEX(online_page_callback_lock); |
9d0ad8ca | 64 | |
3f906ba2 | 65 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); |
bfc8c901 | 66 | |
3f906ba2 TG |
67 | void get_online_mems(void) |
68 | { | |
69 | percpu_down_read(&mem_hotplug_lock); | |
70 | } | |
bfc8c901 | 71 | |
3f906ba2 TG |
72 | void put_online_mems(void) |
73 | { | |
74 | percpu_up_read(&mem_hotplug_lock); | |
75 | } | |
bfc8c901 | 76 | |
4932381e MH |
77 | bool movable_node_enabled = false; |
78 | ||
8604d9e5 | 79 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE |
1adf8b46 | 80 | int mhp_default_online_type = MMOP_OFFLINE; |
8604d9e5 | 81 | #else |
1adf8b46 | 82 | int mhp_default_online_type = MMOP_ONLINE; |
8604d9e5 | 83 | #endif |
31bc3858 | 84 | |
86dd995d VK |
85 | static int __init setup_memhp_default_state(char *str) |
86 | { | |
1adf8b46 | 87 | const int online_type = mhp_online_type_from_str(str); |
5f47adf7 DH |
88 | |
89 | if (online_type >= 0) | |
1adf8b46 | 90 | mhp_default_online_type = online_type; |
86dd995d VK |
91 | |
92 | return 1; | |
93 | } | |
94 | __setup("memhp_default_state=", setup_memhp_default_state); | |
95 | ||
30467e0b | 96 | void mem_hotplug_begin(void) |
20d6c96b | 97 | { |
3f906ba2 TG |
98 | cpus_read_lock(); |
99 | percpu_down_write(&mem_hotplug_lock); | |
20d6c96b KM |
100 | } |
101 | ||
30467e0b | 102 | void mem_hotplug_done(void) |
bfc8c901 | 103 | { |
3f906ba2 TG |
104 | percpu_up_write(&mem_hotplug_lock); |
105 | cpus_read_unlock(); | |
bfc8c901 | 106 | } |
20d6c96b | 107 | |
357b4da5 JG |
108 | u64 max_mem_size = U64_MAX; |
109 | ||
45e0b78b | 110 | /* add this memory to iomem resource */ |
7b7b2721 DH |
111 | static struct resource *register_memory_resource(u64 start, u64 size, |
112 | const char *resource_name) | |
45e0b78b | 113 | { |
2794129e DH |
114 | struct resource *res; |
115 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
7b7b2721 DH |
116 | |
117 | if (strcmp(resource_name, "System RAM")) | |
7cf603d1 | 118 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; |
357b4da5 | 119 | |
bca3feaa AK |
120 | if (!mhp_range_allowed(start, size, true)) |
121 | return ERR_PTR(-E2BIG); | |
122 | ||
f3cd4c86 BH |
123 | /* |
124 | * Make sure value parsed from 'mem=' only restricts memory adding | |
125 | * while booting, so that memory hotplug won't be impacted. Please | |
126 | * refer to document of 'mem=' in kernel-parameters.txt for more | |
127 | * details. | |
128 | */ | |
129 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) | |
357b4da5 JG |
130 | return ERR_PTR(-E2BIG); |
131 | ||
2794129e DH |
132 | /* |
133 | * Request ownership of the new memory range. This might be | |
134 | * a child of an existing resource that was present but | |
135 | * not marked as busy. | |
136 | */ | |
137 | res = __request_region(&iomem_resource, start, size, | |
138 | resource_name, flags); | |
139 | ||
140 | if (!res) { | |
141 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
142 | start, start + size); | |
6f754ba4 | 143 | return ERR_PTR(-EEXIST); |
45e0b78b KM |
144 | } |
145 | return res; | |
146 | } | |
147 | ||
148 | static void release_memory_resource(struct resource *res) | |
149 | { | |
150 | if (!res) | |
151 | return; | |
152 | release_resource(res); | |
153 | kfree(res); | |
45e0b78b KM |
154 | } |
155 | ||
53947027 | 156 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
46723bfa YI |
157 | void get_page_bootmem(unsigned long info, struct page *page, |
158 | unsigned long type) | |
04753278 | 159 | { |
ddffe98d | 160 | page->freelist = (void *)type; |
04753278 YG |
161 | SetPagePrivate(page); |
162 | set_page_private(page, info); | |
fe896d18 | 163 | page_ref_inc(page); |
04753278 YG |
164 | } |
165 | ||
170a5a7e | 166 | void put_page_bootmem(struct page *page) |
04753278 | 167 | { |
5f24ce5f | 168 | unsigned long type; |
04753278 | 169 | |
ddffe98d | 170 | type = (unsigned long) page->freelist; |
5f24ce5f AA |
171 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
172 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); | |
04753278 | 173 | |
fe896d18 | 174 | if (page_ref_dec_return(page) == 1) { |
ddffe98d | 175 | page->freelist = NULL; |
04753278 YG |
176 | ClearPagePrivate(page); |
177 | set_page_private(page, 0); | |
5f24ce5f | 178 | INIT_LIST_HEAD(&page->lru); |
170a5a7e | 179 | free_reserved_page(page); |
04753278 | 180 | } |
04753278 YG |
181 | } |
182 | ||
46723bfa YI |
183 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
184 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
d92bc318 | 185 | static void register_page_bootmem_info_section(unsigned long start_pfn) |
04753278 | 186 | { |
f1eca35a | 187 | unsigned long mapsize, section_nr, i; |
04753278 YG |
188 | struct mem_section *ms; |
189 | struct page *page, *memmap; | |
f1eca35a | 190 | struct mem_section_usage *usage; |
04753278 | 191 | |
04753278 YG |
192 | section_nr = pfn_to_section_nr(start_pfn); |
193 | ms = __nr_to_section(section_nr); | |
194 | ||
195 | /* Get section's memmap address */ | |
196 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
197 | ||
198 | /* | |
199 | * Get page for the memmap's phys address | |
200 | * XXX: need more consideration for sparse_vmemmap... | |
201 | */ | |
202 | page = virt_to_page(memmap); | |
203 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
204 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
205 | ||
206 | /* remember memmap's page */ | |
207 | for (i = 0; i < mapsize; i++, page++) | |
208 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
209 | ||
f1eca35a DW |
210 | usage = ms->usage; |
211 | page = virt_to_page(usage); | |
04753278 | 212 | |
f1eca35a | 213 | mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; |
04753278 YG |
214 | |
215 | for (i = 0; i < mapsize; i++, page++) | |
af370fb8 | 216 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); |
04753278 YG |
217 | |
218 | } | |
46723bfa YI |
219 | #else /* CONFIG_SPARSEMEM_VMEMMAP */ |
220 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
221 | { | |
f1eca35a | 222 | unsigned long mapsize, section_nr, i; |
46723bfa YI |
223 | struct mem_section *ms; |
224 | struct page *page, *memmap; | |
f1eca35a | 225 | struct mem_section_usage *usage; |
46723bfa | 226 | |
46723bfa YI |
227 | section_nr = pfn_to_section_nr(start_pfn); |
228 | ms = __nr_to_section(section_nr); | |
229 | ||
230 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
231 | ||
232 | register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); | |
233 | ||
f1eca35a DW |
234 | usage = ms->usage; |
235 | page = virt_to_page(usage); | |
46723bfa | 236 | |
f1eca35a | 237 | mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; |
46723bfa YI |
238 | |
239 | for (i = 0; i < mapsize; i++, page++) | |
240 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
241 | } | |
242 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
04753278 | 243 | |
7ded384a | 244 | void __init register_page_bootmem_info_node(struct pglist_data *pgdat) |
04753278 YG |
245 | { |
246 | unsigned long i, pfn, end_pfn, nr_pages; | |
247 | int node = pgdat->node_id; | |
248 | struct page *page; | |
04753278 YG |
249 | |
250 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
251 | page = virt_to_page(pgdat); | |
252 | ||
253 | for (i = 0; i < nr_pages; i++, page++) | |
254 | get_page_bootmem(node, page, NODE_INFO); | |
255 | ||
04753278 | 256 | pfn = pgdat->node_start_pfn; |
c1f19495 | 257 | end_pfn = pgdat_end_pfn(pgdat); |
04753278 | 258 | |
7e9f5eb0 | 259 | /* register section info */ |
f14851af | 260 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
261 | /* | |
262 | * Some platforms can assign the same pfn to multiple nodes - on | |
263 | * node0 as well as nodeN. To avoid registering a pfn against | |
264 | * multiple nodes we check that this pfn does not already | |
7e9f5eb0 | 265 | * reside in some other nodes. |
f14851af | 266 | */ |
f65e91df | 267 | if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) |
f14851af | 268 | register_page_bootmem_info_section(pfn); |
269 | } | |
04753278 | 270 | } |
46723bfa | 271 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ |
04753278 | 272 | |
7ea62160 DW |
273 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, |
274 | const char *reason) | |
275 | { | |
276 | /* | |
277 | * Disallow all operations smaller than a sub-section and only | |
278 | * allow operations smaller than a section for | |
279 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() | |
280 | * enforces a larger memory_block_size_bytes() granularity for | |
281 | * memory that will be marked online, so this check should only | |
282 | * fire for direct arch_{add,remove}_memory() users outside of | |
283 | * add_memory_resource(). | |
284 | */ | |
285 | unsigned long min_align; | |
286 | ||
287 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) | |
288 | min_align = PAGES_PER_SUBSECTION; | |
289 | else | |
290 | min_align = PAGES_PER_SECTION; | |
291 | if (!IS_ALIGNED(pfn, min_align) | |
292 | || !IS_ALIGNED(nr_pages, min_align)) { | |
293 | WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", | |
294 | reason, pfn, pfn + nr_pages - 1); | |
295 | return -EINVAL; | |
296 | } | |
297 | return 0; | |
298 | } | |
299 | ||
9f605f26 DW |
300 | /* |
301 | * Return page for the valid pfn only if the page is online. All pfn | |
302 | * walkers which rely on the fully initialized page->flags and others | |
303 | * should use this rather than pfn_valid && pfn_to_page | |
304 | */ | |
305 | struct page *pfn_to_online_page(unsigned long pfn) | |
306 | { | |
307 | unsigned long nr = pfn_to_section_nr(pfn); | |
1f90a347 | 308 | struct dev_pagemap *pgmap; |
9f9b02e5 DW |
309 | struct mem_section *ms; |
310 | ||
311 | if (nr >= NR_MEM_SECTIONS) | |
312 | return NULL; | |
313 | ||
314 | ms = __nr_to_section(nr); | |
315 | if (!online_section(ms)) | |
316 | return NULL; | |
317 | ||
318 | /* | |
319 | * Save some code text when online_section() + | |
320 | * pfn_section_valid() are sufficient. | |
321 | */ | |
322 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) | |
323 | return NULL; | |
324 | ||
325 | if (!pfn_section_valid(ms, pfn)) | |
326 | return NULL; | |
9f605f26 | 327 | |
1f90a347 DW |
328 | if (!online_device_section(ms)) |
329 | return pfn_to_page(pfn); | |
330 | ||
331 | /* | |
332 | * Slowpath: when ZONE_DEVICE collides with | |
333 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in | |
334 | * the section may be 'offline' but 'valid'. Only | |
335 | * get_dev_pagemap() can determine sub-section online status. | |
336 | */ | |
337 | pgmap = get_dev_pagemap(pfn, NULL); | |
338 | put_dev_pagemap(pgmap); | |
339 | ||
340 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ | |
341 | if (pgmap) | |
342 | return NULL; | |
343 | ||
9f9b02e5 | 344 | return pfn_to_page(pfn); |
9f605f26 DW |
345 | } |
346 | EXPORT_SYMBOL_GPL(pfn_to_online_page); | |
347 | ||
4edd7cef DR |
348 | /* |
349 | * Reasonably generic function for adding memory. It is | |
350 | * expected that archs that support memory hotplug will | |
351 | * call this function after deciding the zone to which to | |
352 | * add the new pages. | |
353 | */ | |
7ea62160 | 354 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, |
f5637d3b | 355 | struct mhp_params *params) |
4edd7cef | 356 | { |
6cdd0b30 DH |
357 | const unsigned long end_pfn = pfn + nr_pages; |
358 | unsigned long cur_nr_pages; | |
9a845030 | 359 | int err; |
f5637d3b | 360 | struct vmem_altmap *altmap = params->altmap; |
4b94ffdc | 361 | |
bfeb022f LG |
362 | if (WARN_ON_ONCE(!params->pgprot.pgprot)) |
363 | return -EINVAL; | |
364 | ||
bca3feaa | 365 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); |
dca4436d | 366 | |
4b94ffdc DW |
367 | if (altmap) { |
368 | /* | |
369 | * Validate altmap is within bounds of the total request | |
370 | */ | |
7ea62160 | 371 | if (altmap->base_pfn != pfn |
4b94ffdc DW |
372 | || vmem_altmap_offset(altmap) > nr_pages) { |
373 | pr_warn_once("memory add fail, invalid altmap\n"); | |
7ea62160 | 374 | return -EINVAL; |
4b94ffdc DW |
375 | } |
376 | altmap->alloc = 0; | |
377 | } | |
378 | ||
7ea62160 DW |
379 | err = check_pfn_span(pfn, nr_pages, "add"); |
380 | if (err) | |
381 | return err; | |
382 | ||
6cdd0b30 DH |
383 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
384 | /* Select all remaining pages up to the next section boundary */ | |
385 | cur_nr_pages = min(end_pfn - pfn, | |
386 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
387 | err = sparse_add_section(nid, pfn, cur_nr_pages, altmap); | |
ba72b4c8 DW |
388 | if (err) |
389 | break; | |
f64ac5e6 | 390 | cond_resched(); |
4edd7cef | 391 | } |
c435a390 | 392 | vmemmap_populate_print_last(); |
4edd7cef DR |
393 | return err; |
394 | } | |
4edd7cef | 395 | |
815121d2 | 396 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
d09b0137 | 397 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
398 | unsigned long start_pfn, |
399 | unsigned long end_pfn) | |
400 | { | |
49ba3c6b | 401 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
7ce700bf | 402 | if (unlikely(!pfn_to_online_page(start_pfn))) |
815121d2 YI |
403 | continue; |
404 | ||
405 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
406 | continue; | |
407 | ||
9b05158f | 408 | if (zone != page_zone(pfn_to_page(start_pfn))) |
815121d2 YI |
409 | continue; |
410 | ||
411 | return start_pfn; | |
412 | } | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
417 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
d09b0137 | 418 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
419 | unsigned long start_pfn, |
420 | unsigned long end_pfn) | |
421 | { | |
815121d2 YI |
422 | unsigned long pfn; |
423 | ||
424 | /* pfn is the end pfn of a memory section. */ | |
425 | pfn = end_pfn - 1; | |
49ba3c6b | 426 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
7ce700bf | 427 | if (unlikely(!pfn_to_online_page(pfn))) |
815121d2 YI |
428 | continue; |
429 | ||
430 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
431 | continue; | |
432 | ||
9b05158f | 433 | if (zone != page_zone(pfn_to_page(pfn))) |
815121d2 YI |
434 | continue; |
435 | ||
436 | return pfn; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
442 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
443 | unsigned long end_pfn) | |
444 | { | |
815121d2 | 445 | unsigned long pfn; |
815121d2 YI |
446 | int nid = zone_to_nid(zone); |
447 | ||
448 | zone_span_writelock(zone); | |
5d12071c | 449 | if (zone->zone_start_pfn == start_pfn) { |
815121d2 YI |
450 | /* |
451 | * If the section is smallest section in the zone, it need | |
452 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
453 | * In this case, we find second smallest valid mem_section | |
454 | * for shrinking zone. | |
455 | */ | |
456 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
5d12071c | 457 | zone_end_pfn(zone)); |
815121d2 | 458 | if (pfn) { |
5d12071c | 459 | zone->spanned_pages = zone_end_pfn(zone) - pfn; |
815121d2 | 460 | zone->zone_start_pfn = pfn; |
950b68d9 DH |
461 | } else { |
462 | zone->zone_start_pfn = 0; | |
463 | zone->spanned_pages = 0; | |
815121d2 | 464 | } |
5d12071c | 465 | } else if (zone_end_pfn(zone) == end_pfn) { |
815121d2 YI |
466 | /* |
467 | * If the section is biggest section in the zone, it need | |
468 | * shrink zone->spanned_pages. | |
469 | * In this case, we find second biggest valid mem_section for | |
470 | * shrinking zone. | |
471 | */ | |
5d12071c | 472 | pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, |
815121d2 YI |
473 | start_pfn); |
474 | if (pfn) | |
5d12071c | 475 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; |
950b68d9 DH |
476 | else { |
477 | zone->zone_start_pfn = 0; | |
478 | zone->spanned_pages = 0; | |
479 | } | |
815121d2 | 480 | } |
815121d2 YI |
481 | zone_span_writeunlock(zone); |
482 | } | |
483 | ||
00d6c019 | 484 | static void update_pgdat_span(struct pglist_data *pgdat) |
815121d2 | 485 | { |
00d6c019 DH |
486 | unsigned long node_start_pfn = 0, node_end_pfn = 0; |
487 | struct zone *zone; | |
488 | ||
489 | for (zone = pgdat->node_zones; | |
490 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { | |
6c922cf7 | 491 | unsigned long end_pfn = zone_end_pfn(zone); |
00d6c019 DH |
492 | |
493 | /* No need to lock the zones, they can't change. */ | |
656d5711 DH |
494 | if (!zone->spanned_pages) |
495 | continue; | |
496 | if (!node_end_pfn) { | |
497 | node_start_pfn = zone->zone_start_pfn; | |
6c922cf7 | 498 | node_end_pfn = end_pfn; |
656d5711 DH |
499 | continue; |
500 | } | |
501 | ||
6c922cf7 ML |
502 | if (end_pfn > node_end_pfn) |
503 | node_end_pfn = end_pfn; | |
00d6c019 DH |
504 | if (zone->zone_start_pfn < node_start_pfn) |
505 | node_start_pfn = zone->zone_start_pfn; | |
815121d2 YI |
506 | } |
507 | ||
00d6c019 DH |
508 | pgdat->node_start_pfn = node_start_pfn; |
509 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; | |
815121d2 YI |
510 | } |
511 | ||
feee6b29 DH |
512 | void __ref remove_pfn_range_from_zone(struct zone *zone, |
513 | unsigned long start_pfn, | |
514 | unsigned long nr_pages) | |
815121d2 | 515 | { |
b7e3debd | 516 | const unsigned long end_pfn = start_pfn + nr_pages; |
815121d2 | 517 | struct pglist_data *pgdat = zone->zone_pgdat; |
b7e3debd | 518 | unsigned long pfn, cur_nr_pages, flags; |
815121d2 | 519 | |
d33695b1 | 520 | /* Poison struct pages because they are now uninitialized again. */ |
b7e3debd BW |
521 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { |
522 | cond_resched(); | |
523 | ||
524 | /* Select all remaining pages up to the next section boundary */ | |
525 | cur_nr_pages = | |
526 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); | |
527 | page_init_poison(pfn_to_page(pfn), | |
528 | sizeof(struct page) * cur_nr_pages); | |
529 | } | |
d33695b1 | 530 | |
7ce700bf DH |
531 | #ifdef CONFIG_ZONE_DEVICE |
532 | /* | |
533 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So | |
534 | * we will not try to shrink the zones - which is okay as | |
535 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. | |
536 | */ | |
537 | if (zone_idx(zone) == ZONE_DEVICE) | |
538 | return; | |
539 | #endif | |
540 | ||
feee6b29 DH |
541 | clear_zone_contiguous(zone); |
542 | ||
815121d2 YI |
543 | pgdat_resize_lock(zone->zone_pgdat, &flags); |
544 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); | |
00d6c019 | 545 | update_pgdat_span(pgdat); |
815121d2 | 546 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
feee6b29 DH |
547 | |
548 | set_zone_contiguous(zone); | |
815121d2 YI |
549 | } |
550 | ||
feee6b29 DH |
551 | static void __remove_section(unsigned long pfn, unsigned long nr_pages, |
552 | unsigned long map_offset, | |
553 | struct vmem_altmap *altmap) | |
ea01ea93 | 554 | { |
10404901 | 555 | struct mem_section *ms = __pfn_to_section(pfn); |
ea01ea93 | 556 | |
9d1d887d DH |
557 | if (WARN_ON_ONCE(!valid_section(ms))) |
558 | return; | |
ea01ea93 | 559 | |
ba72b4c8 | 560 | sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); |
ea01ea93 BP |
561 | } |
562 | ||
ea01ea93 | 563 | /** |
feee6b29 | 564 | * __remove_pages() - remove sections of pages |
7ea62160 | 565 | * @pfn: starting pageframe (must be aligned to start of a section) |
ea01ea93 | 566 | * @nr_pages: number of pages to remove (must be multiple of section size) |
e8b098fc | 567 | * @altmap: alternative device page map or %NULL if default memmap is used |
ea01ea93 BP |
568 | * |
569 | * Generic helper function to remove section mappings and sysfs entries | |
570 | * for the section of the memory we are removing. Caller needs to make | |
571 | * sure that pages are marked reserved and zones are adjust properly by | |
572 | * calling offline_pages(). | |
573 | */ | |
feee6b29 DH |
574 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, |
575 | struct vmem_altmap *altmap) | |
ea01ea93 | 576 | { |
52fb87c8 DH |
577 | const unsigned long end_pfn = pfn + nr_pages; |
578 | unsigned long cur_nr_pages; | |
4b94ffdc | 579 | unsigned long map_offset = 0; |
4b94ffdc | 580 | |
96da4350 | 581 | map_offset = vmem_altmap_offset(altmap); |
ea01ea93 | 582 | |
7ea62160 DW |
583 | if (check_pfn_span(pfn, nr_pages, "remove")) |
584 | return; | |
ea01ea93 | 585 | |
52fb87c8 | 586 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
dd33ad7b | 587 | cond_resched(); |
52fb87c8 | 588 | /* Select all remaining pages up to the next section boundary */ |
a11b9419 DH |
589 | cur_nr_pages = min(end_pfn - pfn, |
590 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
52fb87c8 | 591 | __remove_section(pfn, cur_nr_pages, map_offset, altmap); |
4b94ffdc | 592 | map_offset = 0; |
ea01ea93 | 593 | } |
ea01ea93 | 594 | } |
ea01ea93 | 595 | |
9d0ad8ca DK |
596 | int set_online_page_callback(online_page_callback_t callback) |
597 | { | |
598 | int rc = -EINVAL; | |
599 | ||
bfc8c901 VD |
600 | get_online_mems(); |
601 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
602 | |
603 | if (online_page_callback == generic_online_page) { | |
604 | online_page_callback = callback; | |
605 | rc = 0; | |
606 | } | |
607 | ||
bfc8c901 VD |
608 | mutex_unlock(&online_page_callback_lock); |
609 | put_online_mems(); | |
9d0ad8ca DK |
610 | |
611 | return rc; | |
612 | } | |
613 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
614 | ||
615 | int restore_online_page_callback(online_page_callback_t callback) | |
616 | { | |
617 | int rc = -EINVAL; | |
618 | ||
bfc8c901 VD |
619 | get_online_mems(); |
620 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
621 | |
622 | if (online_page_callback == callback) { | |
623 | online_page_callback = generic_online_page; | |
624 | rc = 0; | |
625 | } | |
626 | ||
bfc8c901 VD |
627 | mutex_unlock(&online_page_callback_lock); |
628 | put_online_mems(); | |
9d0ad8ca DK |
629 | |
630 | return rc; | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
633 | ||
18db1491 | 634 | void generic_online_page(struct page *page, unsigned int order) |
9d0ad8ca | 635 | { |
c87cbc1f VB |
636 | /* |
637 | * Freeing the page with debug_pagealloc enabled will try to unmap it, | |
638 | * so we should map it first. This is better than introducing a special | |
639 | * case in page freeing fast path. | |
640 | */ | |
77bc7fd6 | 641 | debug_pagealloc_map_pages(page, 1 << order); |
a9cd410a AK |
642 | __free_pages_core(page, order); |
643 | totalram_pages_add(1UL << order); | |
644 | #ifdef CONFIG_HIGHMEM | |
645 | if (PageHighMem(page)) | |
646 | totalhigh_pages_add(1UL << order); | |
647 | #endif | |
648 | } | |
18db1491 | 649 | EXPORT_SYMBOL_GPL(generic_online_page); |
a9cd410a | 650 | |
aac65321 | 651 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) |
3947be19 | 652 | { |
b2c2ab20 DH |
653 | const unsigned long end_pfn = start_pfn + nr_pages; |
654 | unsigned long pfn; | |
b2c2ab20 DH |
655 | |
656 | /* | |
aac65321 DH |
657 | * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might |
658 | * decide to not expose all pages to the buddy (e.g., expose them | |
659 | * later). We account all pages as being online and belonging to this | |
660 | * zone ("present"). | |
a08a2ae3 OS |
661 | * When using memmap_on_memory, the range might not be aligned to |
662 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect | |
663 | * this and the first chunk to online will be pageblock_nr_pages. | |
b2c2ab20 | 664 | */ |
a08a2ae3 OS |
665 | for (pfn = start_pfn; pfn < end_pfn;) { |
666 | int order = min(MAX_ORDER - 1UL, __ffs(pfn)); | |
667 | ||
668 | (*online_page_callback)(pfn_to_page(pfn), order); | |
669 | pfn += (1UL << order); | |
670 | } | |
2d070eab | 671 | |
b2c2ab20 DH |
672 | /* mark all involved sections as online */ |
673 | online_mem_sections(start_pfn, end_pfn); | |
75884fb1 KH |
674 | } |
675 | ||
d9713679 LJ |
676 | /* check which state of node_states will be changed when online memory */ |
677 | static void node_states_check_changes_online(unsigned long nr_pages, | |
678 | struct zone *zone, struct memory_notify *arg) | |
679 | { | |
680 | int nid = zone_to_nid(zone); | |
d9713679 | 681 | |
98fa15f3 AK |
682 | arg->status_change_nid = NUMA_NO_NODE; |
683 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
684 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 | 685 | |
8efe33f4 OS |
686 | if (!node_state(nid, N_MEMORY)) |
687 | arg->status_change_nid = nid; | |
688 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
d9713679 | 689 | arg->status_change_nid_normal = nid; |
6715ddf9 | 690 | #ifdef CONFIG_HIGHMEM |
d3ba3ae1 | 691 | if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) |
6715ddf9 | 692 | arg->status_change_nid_high = nid; |
6715ddf9 | 693 | #endif |
d9713679 LJ |
694 | } |
695 | ||
696 | static void node_states_set_node(int node, struct memory_notify *arg) | |
697 | { | |
698 | if (arg->status_change_nid_normal >= 0) | |
699 | node_set_state(node, N_NORMAL_MEMORY); | |
700 | ||
6715ddf9 LJ |
701 | if (arg->status_change_nid_high >= 0) |
702 | node_set_state(node, N_HIGH_MEMORY); | |
703 | ||
83d83612 OS |
704 | if (arg->status_change_nid >= 0) |
705 | node_set_state(node, N_MEMORY); | |
d9713679 LJ |
706 | } |
707 | ||
f1dd2cd1 MH |
708 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, |
709 | unsigned long nr_pages) | |
710 | { | |
711 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
712 | ||
713 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
714 | zone->zone_start_pfn = start_pfn; | |
715 | ||
716 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
717 | } | |
718 | ||
719 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
720 | unsigned long nr_pages) | |
721 | { | |
722 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
723 | ||
724 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
725 | pgdat->node_start_pfn = start_pfn; | |
726 | ||
727 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
f1dd2cd1 | 728 | |
3fccb74c | 729 | } |
1f90a347 DW |
730 | |
731 | static void section_taint_zone_device(unsigned long pfn) | |
732 | { | |
733 | struct mem_section *ms = __pfn_to_section(pfn); | |
734 | ||
735 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; | |
736 | } | |
737 | ||
3fccb74c DH |
738 | /* |
739 | * Associate the pfn range with the given zone, initializing the memmaps | |
740 | * and resizing the pgdat/zone data to span the added pages. After this | |
741 | * call, all affected pages are PG_reserved. | |
d882c006 DH |
742 | * |
743 | * All aligned pageblocks are initialized to the specified migratetype | |
744 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
745 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
3fccb74c | 746 | */ |
a99583e7 | 747 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
d882c006 DH |
748 | unsigned long nr_pages, |
749 | struct vmem_altmap *altmap, int migratetype) | |
f1dd2cd1 MH |
750 | { |
751 | struct pglist_data *pgdat = zone->zone_pgdat; | |
752 | int nid = pgdat->node_id; | |
753 | unsigned long flags; | |
df429ac0 | 754 | |
f1dd2cd1 MH |
755 | clear_zone_contiguous(zone); |
756 | ||
757 | /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ | |
758 | pgdat_resize_lock(pgdat, &flags); | |
759 | zone_span_writelock(zone); | |
fa004ab7 WY |
760 | if (zone_is_empty(zone)) |
761 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
f1dd2cd1 MH |
762 | resize_zone_range(zone, start_pfn, nr_pages); |
763 | zone_span_writeunlock(zone); | |
764 | resize_pgdat_range(pgdat, start_pfn, nr_pages); | |
765 | pgdat_resize_unlock(pgdat, &flags); | |
766 | ||
1f90a347 DW |
767 | /* |
768 | * Subsection population requires care in pfn_to_online_page(). | |
769 | * Set the taint to enable the slow path detection of | |
770 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} | |
771 | * section. | |
772 | */ | |
773 | if (zone_is_zone_device(zone)) { | |
774 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) | |
775 | section_taint_zone_device(start_pfn); | |
776 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) | |
777 | section_taint_zone_device(start_pfn + nr_pages); | |
778 | } | |
779 | ||
f1dd2cd1 MH |
780 | /* |
781 | * TODO now we have a visible range of pages which are not associated | |
782 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
783 | * expects the zone spans the pfn range. All the pages in the range | |
784 | * are reserved so nobody should be touching them so we should be safe | |
785 | */ | |
ab28cb6e | 786 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, |
d882c006 | 787 | MEMINIT_HOTPLUG, altmap, migratetype); |
f1dd2cd1 MH |
788 | |
789 | set_zone_contiguous(zone); | |
790 | } | |
791 | ||
c246a213 MH |
792 | /* |
793 | * Returns a default kernel memory zone for the given pfn range. | |
794 | * If no kernel zone covers this pfn range it will automatically go | |
795 | * to the ZONE_NORMAL. | |
796 | */ | |
c6f03e29 | 797 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, |
c246a213 MH |
798 | unsigned long nr_pages) |
799 | { | |
800 | struct pglist_data *pgdat = NODE_DATA(nid); | |
801 | int zid; | |
802 | ||
803 | for (zid = 0; zid <= ZONE_NORMAL; zid++) { | |
804 | struct zone *zone = &pgdat->node_zones[zid]; | |
805 | ||
806 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
807 | return zone; | |
808 | } | |
809 | ||
810 | return &pgdat->node_zones[ZONE_NORMAL]; | |
811 | } | |
812 | ||
c6f03e29 MH |
813 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, |
814 | unsigned long nr_pages) | |
e5e68930 | 815 | { |
c6f03e29 MH |
816 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, |
817 | nr_pages); | |
818 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
819 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
820 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
e5e68930 MH |
821 | |
822 | /* | |
c6f03e29 MH |
823 | * We inherit the existing zone in a simple case where zones do not |
824 | * overlap in the given range | |
e5e68930 | 825 | */ |
c6f03e29 MH |
826 | if (in_kernel ^ in_movable) |
827 | return (in_kernel) ? kernel_zone : movable_zone; | |
9f123ab5 | 828 | |
c6f03e29 MH |
829 | /* |
830 | * If the range doesn't belong to any zone or two zones overlap in the | |
831 | * given range then we use movable zone only if movable_node is | |
832 | * enabled because we always online to a kernel zone by default. | |
833 | */ | |
834 | return movable_node_enabled ? movable_zone : kernel_zone; | |
9f123ab5 MH |
835 | } |
836 | ||
68d68ff6 | 837 | struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, |
e5e68930 | 838 | unsigned long nr_pages) |
f1dd2cd1 | 839 | { |
c6f03e29 MH |
840 | if (online_type == MMOP_ONLINE_KERNEL) |
841 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
f1dd2cd1 | 842 | |
c6f03e29 MH |
843 | if (online_type == MMOP_ONLINE_MOVABLE) |
844 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
df429ac0 | 845 | |
c6f03e29 | 846 | return default_zone_for_pfn(nid, start_pfn, nr_pages); |
e5e68930 MH |
847 | } |
848 | ||
a08a2ae3 OS |
849 | /* |
850 | * This function should only be called by memory_block_{online,offline}, | |
851 | * and {online,offline}_pages. | |
852 | */ | |
853 | void adjust_present_page_count(struct zone *zone, long nr_pages) | |
f9901144 DH |
854 | { |
855 | unsigned long flags; | |
856 | ||
857 | zone->present_pages += nr_pages; | |
858 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
859 | zone->zone_pgdat->node_present_pages += nr_pages; | |
860 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
861 | } | |
862 | ||
a08a2ae3 OS |
863 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
864 | struct zone *zone) | |
865 | { | |
866 | unsigned long end_pfn = pfn + nr_pages; | |
867 | int ret; | |
868 | ||
869 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
870 | if (ret) | |
871 | return ret; | |
872 | ||
873 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); | |
874 | ||
875 | /* | |
876 | * It might be that the vmemmap_pages fully span sections. If that is | |
877 | * the case, mark those sections online here as otherwise they will be | |
878 | * left offline. | |
879 | */ | |
880 | if (nr_pages >= PAGES_PER_SECTION) | |
881 | online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
882 | ||
883 | return ret; | |
884 | } | |
885 | ||
886 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) | |
887 | { | |
888 | unsigned long end_pfn = pfn + nr_pages; | |
889 | ||
890 | /* | |
891 | * It might be that the vmemmap_pages fully span sections. If that is | |
892 | * the case, mark those sections offline here as otherwise they will be | |
893 | * left online. | |
894 | */ | |
895 | if (nr_pages >= PAGES_PER_SECTION) | |
896 | offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
897 | ||
898 | /* | |
899 | * The pages associated with this vmemmap have been offlined, so | |
900 | * we can reset its state here. | |
901 | */ | |
902 | remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); | |
903 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
904 | } | |
905 | ||
906 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) | |
75884fb1 | 907 | { |
aa47228a | 908 | unsigned long flags; |
6811378e | 909 | int need_zonelists_rebuild = 0; |
a08a2ae3 | 910 | const int nid = zone_to_nid(zone); |
7b78d335 YG |
911 | int ret; |
912 | struct memory_notify arg; | |
d0dc12e8 | 913 | |
dd8e2f23 OS |
914 | /* |
915 | * {on,off}lining is constrained to full memory sections (or more | |
916 | * precisly to memory blocks from the user space POV). | |
917 | * memmap_on_memory is an exception because it reserves initial part | |
918 | * of the physical memory space for vmemmaps. That space is pageblock | |
919 | * aligned. | |
920 | */ | |
4986fac1 | 921 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
922 | !IS_ALIGNED(pfn, pageblock_nr_pages) || |
923 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
924 | return -EINVAL; |
925 | ||
381eab4a DH |
926 | mem_hotplug_begin(); |
927 | ||
f1dd2cd1 | 928 | /* associate pfn range with the zone */ |
b30c5927 | 929 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); |
f1dd2cd1 | 930 | |
7b78d335 YG |
931 | arg.start_pfn = pfn; |
932 | arg.nr_pages = nr_pages; | |
d9713679 | 933 | node_states_check_changes_online(nr_pages, zone, &arg); |
7b78d335 | 934 | |
7b78d335 YG |
935 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
936 | ret = notifier_to_errno(ret); | |
e33e33b4 CY |
937 | if (ret) |
938 | goto failed_addition; | |
939 | ||
b30c5927 DH |
940 | /* |
941 | * Fixup the number of isolated pageblocks before marking the sections | |
942 | * onlining, such that undo_isolate_page_range() works correctly. | |
943 | */ | |
944 | spin_lock_irqsave(&zone->lock, flags); | |
945 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; | |
946 | spin_unlock_irqrestore(&zone->lock, flags); | |
947 | ||
6811378e YG |
948 | /* |
949 | * If this zone is not populated, then it is not in zonelist. | |
950 | * This means the page allocator ignores this zone. | |
951 | * So, zonelist must be updated after online. | |
952 | */ | |
6dcd73d7 | 953 | if (!populated_zone(zone)) { |
6811378e | 954 | need_zonelists_rebuild = 1; |
72675e13 | 955 | setup_zone_pageset(zone); |
6dcd73d7 | 956 | } |
6811378e | 957 | |
aac65321 | 958 | online_pages_range(pfn, nr_pages); |
f9901144 | 959 | adjust_present_page_count(zone, nr_pages); |
aa47228a | 960 | |
b30c5927 DH |
961 | node_states_set_node(nid, &arg); |
962 | if (need_zonelists_rebuild) | |
963 | build_all_zonelists(NULL); | |
964 | zone_pcp_update(zone); | |
965 | ||
966 | /* Basic onlining is complete, allow allocation of onlined pages. */ | |
967 | undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); | |
968 | ||
93146d98 | 969 | /* |
b86c5fc4 DH |
970 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to |
971 | * the tail of the freelist when undoing isolation). Shuffle the whole | |
972 | * zone to make sure the just onlined pages are properly distributed | |
973 | * across the whole freelist - to create an initial shuffle. | |
93146d98 | 974 | */ |
e900a918 DW |
975 | shuffle_zone(zone); |
976 | ||
1b79acc9 KM |
977 | init_per_zone_wmark_min(); |
978 | ||
ca9a46f8 DH |
979 | kswapd_run(nid); |
980 | kcompactd_run(nid); | |
61b13993 | 981 | |
2d1d43f6 | 982 | writeback_set_ratelimit(); |
7b78d335 | 983 | |
ca9a46f8 | 984 | memory_notify(MEM_ONLINE, &arg); |
381eab4a | 985 | mem_hotplug_done(); |
30467e0b | 986 | return 0; |
e33e33b4 CY |
987 | |
988 | failed_addition: | |
989 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
990 | (unsigned long long) pfn << PAGE_SHIFT, | |
991 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
992 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
feee6b29 | 993 | remove_pfn_range_from_zone(zone, pfn, nr_pages); |
381eab4a | 994 | mem_hotplug_done(); |
e33e33b4 | 995 | return ret; |
3947be19 | 996 | } |
53947027 | 997 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 998 | |
0bd85420 TC |
999 | static void reset_node_present_pages(pg_data_t *pgdat) |
1000 | { | |
1001 | struct zone *z; | |
1002 | ||
1003 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | |
1004 | z->present_pages = 0; | |
1005 | ||
1006 | pgdat->node_present_pages = 0; | |
1007 | } | |
1008 | ||
e1319331 | 1009 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
c68ab18c | 1010 | static pg_data_t __ref *hotadd_new_pgdat(int nid) |
9af3c2de YG |
1011 | { |
1012 | struct pglist_data *pgdat; | |
9af3c2de | 1013 | |
a1e565aa TC |
1014 | pgdat = NODE_DATA(nid); |
1015 | if (!pgdat) { | |
1016 | pgdat = arch_alloc_nodedata(nid); | |
1017 | if (!pgdat) | |
1018 | return NULL; | |
9af3c2de | 1019 | |
33fce011 WY |
1020 | pgdat->per_cpu_nodestats = |
1021 | alloc_percpu(struct per_cpu_nodestat); | |
a1e565aa | 1022 | arch_refresh_nodedata(nid, pgdat); |
b0dc3a34 | 1023 | } else { |
33fce011 | 1024 | int cpu; |
e716f2eb | 1025 | /* |
97a225e6 JK |
1026 | * Reset the nr_zones, order and highest_zoneidx before reuse. |
1027 | * Note that kswapd will init kswapd_highest_zoneidx properly | |
e716f2eb MG |
1028 | * when it starts in the near future. |
1029 | */ | |
b0dc3a34 | 1030 | pgdat->nr_zones = 0; |
38087d9b | 1031 | pgdat->kswapd_order = 0; |
97a225e6 | 1032 | pgdat->kswapd_highest_zoneidx = 0; |
33fce011 WY |
1033 | for_each_online_cpu(cpu) { |
1034 | struct per_cpu_nodestat *p; | |
1035 | ||
1036 | p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); | |
1037 | memset(p, 0, sizeof(*p)); | |
1038 | } | |
a1e565aa | 1039 | } |
9af3c2de YG |
1040 | |
1041 | /* we can use NODE_DATA(nid) from here */ | |
03e85f9d | 1042 | pgdat->node_id = nid; |
c68ab18c | 1043 | pgdat->node_start_pfn = 0; |
03e85f9d | 1044 | |
9af3c2de | 1045 | /* init node's zones as empty zones, we don't have any present pages.*/ |
03e85f9d | 1046 | free_area_init_core_hotplug(nid); |
9af3c2de | 1047 | |
959ecc48 KH |
1048 | /* |
1049 | * The node we allocated has no zone fallback lists. For avoiding | |
1050 | * to access not-initialized zonelist, build here. | |
1051 | */ | |
72675e13 | 1052 | build_all_zonelists(pgdat); |
959ecc48 | 1053 | |
0bd85420 TC |
1054 | /* |
1055 | * When memory is hot-added, all the memory is in offline state. So | |
1056 | * clear all zones' present_pages because they will be updated in | |
1057 | * online_pages() and offline_pages(). | |
1058 | */ | |
03e85f9d | 1059 | reset_node_managed_pages(pgdat); |
0bd85420 TC |
1060 | reset_node_present_pages(pgdat); |
1061 | ||
9af3c2de YG |
1062 | return pgdat; |
1063 | } | |
1064 | ||
b9ff0360 | 1065 | static void rollback_node_hotadd(int nid) |
9af3c2de | 1066 | { |
b9ff0360 OS |
1067 | pg_data_t *pgdat = NODE_DATA(nid); |
1068 | ||
9af3c2de | 1069 | arch_refresh_nodedata(nid, NULL); |
5830169f | 1070 | free_percpu(pgdat->per_cpu_nodestats); |
9af3c2de | 1071 | arch_free_nodedata(pgdat); |
9af3c2de YG |
1072 | } |
1073 | ||
0a547039 | 1074 | |
01b0f197 TK |
1075 | /** |
1076 | * try_online_node - online a node if offlined | |
e8b098fc | 1077 | * @nid: the node ID |
b9ff0360 | 1078 | * @set_node_online: Whether we want to online the node |
cf23422b | 1079 | * called by cpu_up() to online a node without onlined memory. |
b9ff0360 OS |
1080 | * |
1081 | * Returns: | |
1082 | * 1 -> a new node has been allocated | |
1083 | * 0 -> the node is already online | |
1084 | * -ENOMEM -> the node could not be allocated | |
cf23422b | 1085 | */ |
c68ab18c | 1086 | static int __try_online_node(int nid, bool set_node_online) |
cf23422b | 1087 | { |
b9ff0360 OS |
1088 | pg_data_t *pgdat; |
1089 | int ret = 1; | |
cf23422b | 1090 | |
01b0f197 TK |
1091 | if (node_online(nid)) |
1092 | return 0; | |
1093 | ||
c68ab18c | 1094 | pgdat = hotadd_new_pgdat(nid); |
7553e8f2 | 1095 | if (!pgdat) { |
01b0f197 | 1096 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); |
cf23422b | 1097 | ret = -ENOMEM; |
1098 | goto out; | |
1099 | } | |
b9ff0360 OS |
1100 | |
1101 | if (set_node_online) { | |
1102 | node_set_online(nid); | |
1103 | ret = register_one_node(nid); | |
1104 | BUG_ON(ret); | |
1105 | } | |
cf23422b | 1106 | out: |
b9ff0360 OS |
1107 | return ret; |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Users of this function always want to online/register the node | |
1112 | */ | |
1113 | int try_online_node(int nid) | |
1114 | { | |
1115 | int ret; | |
1116 | ||
1117 | mem_hotplug_begin(); | |
c68ab18c | 1118 | ret = __try_online_node(nid, true); |
bfc8c901 | 1119 | mem_hotplug_done(); |
cf23422b | 1120 | return ret; |
1121 | } | |
1122 | ||
27356f54 TK |
1123 | static int check_hotplug_memory_range(u64 start, u64 size) |
1124 | { | |
ba325585 | 1125 | /* memory range must be block size aligned */ |
cec3ebd0 DH |
1126 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || |
1127 | !IS_ALIGNED(size, memory_block_size_bytes())) { | |
ba325585 | 1128 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", |
cec3ebd0 | 1129 | memory_block_size_bytes(), start, size); |
27356f54 TK |
1130 | return -EINVAL; |
1131 | } | |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
31bc3858 VK |
1136 | static int online_memory_block(struct memory_block *mem, void *arg) |
1137 | { | |
1adf8b46 | 1138 | mem->online_type = mhp_default_online_type; |
dc18d706 | 1139 | return device_online(&mem->dev); |
31bc3858 VK |
1140 | } |
1141 | ||
a08a2ae3 OS |
1142 | bool mhp_supports_memmap_on_memory(unsigned long size) |
1143 | { | |
1144 | unsigned long nr_vmemmap_pages = size / PAGE_SIZE; | |
1145 | unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); | |
1146 | unsigned long remaining_size = size - vmemmap_size; | |
1147 | ||
1148 | /* | |
1149 | * Besides having arch support and the feature enabled at runtime, we | |
1150 | * need a few more assumptions to hold true: | |
1151 | * | |
1152 | * a) We span a single memory block: memory onlining/offlinin;g happens | |
1153 | * in memory block granularity. We don't want the vmemmap of online | |
1154 | * memory blocks to reside on offline memory blocks. In the future, | |
1155 | * we might want to support variable-sized memory blocks to make the | |
1156 | * feature more versatile. | |
1157 | * | |
1158 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code | |
1159 | * to populate memory from the altmap for unrelated parts (i.e., | |
1160 | * other memory blocks) | |
1161 | * | |
1162 | * c) The vmemmap pages (and thereby the pages that will be exposed to | |
1163 | * the buddy) have to cover full pageblocks: memory onlining/offlining | |
1164 | * code requires applicable ranges to be page-aligned, for example, to | |
1165 | * set the migratetypes properly. | |
1166 | * | |
1167 | * TODO: Although we have a check here to make sure that vmemmap pages | |
1168 | * fully populate a PMD, it is not the right place to check for | |
1169 | * this. A much better solution involves improving vmemmap code | |
1170 | * to fallback to base pages when trying to populate vmemmap using | |
1171 | * altmap as an alternative source of memory, and we do not exactly | |
1172 | * populate a single PMD. | |
1173 | */ | |
1174 | return memmap_on_memory && | |
1175 | IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && | |
1176 | size == memory_block_size_bytes() && | |
1177 | IS_ALIGNED(vmemmap_size, PMD_SIZE) && | |
1178 | IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); | |
1179 | } | |
1180 | ||
8df1d0e4 DH |
1181 | /* |
1182 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1183 | * and online/offline operations (triggered e.g. by sysfs). | |
1184 | * | |
1185 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1186 | */ | |
b6117199 | 1187 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) |
bc02af93 | 1188 | { |
d15dfd31 | 1189 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; |
a08a2ae3 | 1190 | struct vmem_altmap mhp_altmap = {}; |
62cedb9f | 1191 | u64 start, size; |
b9ff0360 | 1192 | bool new_node = false; |
bc02af93 YG |
1193 | int ret; |
1194 | ||
62cedb9f DV |
1195 | start = res->start; |
1196 | size = resource_size(res); | |
1197 | ||
27356f54 TK |
1198 | ret = check_hotplug_memory_range(start, size); |
1199 | if (ret) | |
1200 | return ret; | |
1201 | ||
fa6d9ec7 VV |
1202 | if (!node_possible(nid)) { |
1203 | WARN(1, "node %d was absent from the node_possible_map\n", nid); | |
1204 | return -EINVAL; | |
1205 | } | |
1206 | ||
bfc8c901 | 1207 | mem_hotplug_begin(); |
ac13c462 | 1208 | |
52219aea DH |
1209 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1210 | memblock_add_node(start, size, nid); | |
7f36e3e5 | 1211 | |
c68ab18c | 1212 | ret = __try_online_node(nid, false); |
b9ff0360 OS |
1213 | if (ret < 0) |
1214 | goto error; | |
1215 | new_node = ret; | |
9af3c2de | 1216 | |
a08a2ae3 OS |
1217 | /* |
1218 | * Self hosted memmap array | |
1219 | */ | |
1220 | if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { | |
1221 | if (!mhp_supports_memmap_on_memory(size)) { | |
1222 | ret = -EINVAL; | |
1223 | goto error; | |
1224 | } | |
1225 | mhp_altmap.free = PHYS_PFN(size); | |
1226 | mhp_altmap.base_pfn = PHYS_PFN(start); | |
1227 | params.altmap = &mhp_altmap; | |
1228 | } | |
1229 | ||
bc02af93 | 1230 | /* call arch's memory hotadd */ |
f5637d3b | 1231 | ret = arch_add_memory(nid, start, size, ¶ms); |
9af3c2de YG |
1232 | if (ret < 0) |
1233 | goto error; | |
1234 | ||
db051a0d | 1235 | /* create memory block devices after memory was added */ |
a08a2ae3 | 1236 | ret = create_memory_block_devices(start, size, mhp_altmap.alloc); |
db051a0d DH |
1237 | if (ret) { |
1238 | arch_remove_memory(nid, start, size, NULL); | |
1239 | goto error; | |
1240 | } | |
1241 | ||
a1e565aa | 1242 | if (new_node) { |
d5b6f6a3 | 1243 | /* If sysfs file of new node can't be created, cpu on the node |
0fc44159 YG |
1244 | * can't be hot-added. There is no rollback way now. |
1245 | * So, check by BUG_ON() to catch it reluctantly.. | |
d5b6f6a3 | 1246 | * We online node here. We can't roll back from here. |
0fc44159 | 1247 | */ |
d5b6f6a3 OS |
1248 | node_set_online(nid); |
1249 | ret = __register_one_node(nid); | |
0fc44159 YG |
1250 | BUG_ON(ret); |
1251 | } | |
1252 | ||
d5b6f6a3 | 1253 | /* link memory sections under this node.*/ |
90c7eaeb LD |
1254 | link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), |
1255 | MEMINIT_HOTPLUG); | |
d5b6f6a3 | 1256 | |
d96ae530 | 1257 | /* create new memmap entry */ |
7b7b2721 DH |
1258 | if (!strcmp(res->name, "System RAM")) |
1259 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
d96ae530 | 1260 | |
381eab4a DH |
1261 | /* device_online() will take the lock when calling online_pages() */ |
1262 | mem_hotplug_done(); | |
1263 | ||
9ca6551e DH |
1264 | /* |
1265 | * In case we're allowed to merge the resource, flag it and trigger | |
1266 | * merging now that adding succeeded. | |
1267 | */ | |
26011267 | 1268 | if (mhp_flags & MHP_MERGE_RESOURCE) |
9ca6551e DH |
1269 | merge_system_ram_resource(res); |
1270 | ||
31bc3858 | 1271 | /* online pages if requested */ |
1adf8b46 | 1272 | if (mhp_default_online_type != MMOP_OFFLINE) |
fbcf73ce | 1273 | walk_memory_blocks(start, size, NULL, online_memory_block); |
31bc3858 | 1274 | |
381eab4a | 1275 | return ret; |
9af3c2de YG |
1276 | error: |
1277 | /* rollback pgdat allocation and others */ | |
b9ff0360 OS |
1278 | if (new_node) |
1279 | rollback_node_hotadd(nid); | |
52219aea DH |
1280 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1281 | memblock_remove(start, size); | |
bfc8c901 | 1282 | mem_hotplug_done(); |
bc02af93 YG |
1283 | return ret; |
1284 | } | |
62cedb9f | 1285 | |
8df1d0e4 | 1286 | /* requires device_hotplug_lock, see add_memory_resource() */ |
b6117199 | 1287 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
62cedb9f DV |
1288 | { |
1289 | struct resource *res; | |
1290 | int ret; | |
1291 | ||
7b7b2721 | 1292 | res = register_memory_resource(start, size, "System RAM"); |
6f754ba4 VK |
1293 | if (IS_ERR(res)) |
1294 | return PTR_ERR(res); | |
62cedb9f | 1295 | |
b6117199 | 1296 | ret = add_memory_resource(nid, res, mhp_flags); |
62cedb9f DV |
1297 | if (ret < 0) |
1298 | release_memory_resource(res); | |
1299 | return ret; | |
1300 | } | |
8df1d0e4 | 1301 | |
b6117199 | 1302 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
8df1d0e4 DH |
1303 | { |
1304 | int rc; | |
1305 | ||
1306 | lock_device_hotplug(); | |
b6117199 | 1307 | rc = __add_memory(nid, start, size, mhp_flags); |
8df1d0e4 DH |
1308 | unlock_device_hotplug(); |
1309 | ||
1310 | return rc; | |
1311 | } | |
bc02af93 | 1312 | EXPORT_SYMBOL_GPL(add_memory); |
0c0e6195 | 1313 | |
7b7b2721 DH |
1314 | /* |
1315 | * Add special, driver-managed memory to the system as system RAM. Such | |
1316 | * memory is not exposed via the raw firmware-provided memmap as system | |
1317 | * RAM, instead, it is detected and added by a driver - during cold boot, | |
1318 | * after a reboot, and after kexec. | |
1319 | * | |
1320 | * Reasons why this memory should not be used for the initial memmap of a | |
1321 | * kexec kernel or for placing kexec images: | |
1322 | * - The booting kernel is in charge of determining how this memory will be | |
1323 | * used (e.g., use persistent memory as system RAM) | |
1324 | * - Coordination with a hypervisor is required before this memory | |
1325 | * can be used (e.g., inaccessible parts). | |
1326 | * | |
1327 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided | |
1328 | * memory map") are created. Also, the created memory resource is flagged | |
7cf603d1 | 1329 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case |
7b7b2721 DH |
1330 | * this memory as well (esp., not place kexec images onto it). |
1331 | * | |
1332 | * The resource_name (visible via /proc/iomem) has to have the format | |
1333 | * "System RAM ($DRIVER)". | |
1334 | */ | |
1335 | int add_memory_driver_managed(int nid, u64 start, u64 size, | |
b6117199 | 1336 | const char *resource_name, mhp_t mhp_flags) |
7b7b2721 DH |
1337 | { |
1338 | struct resource *res; | |
1339 | int rc; | |
1340 | ||
1341 | if (!resource_name || | |
1342 | strstr(resource_name, "System RAM (") != resource_name || | |
1343 | resource_name[strlen(resource_name) - 1] != ')') | |
1344 | return -EINVAL; | |
1345 | ||
1346 | lock_device_hotplug(); | |
1347 | ||
1348 | res = register_memory_resource(start, size, resource_name); | |
1349 | if (IS_ERR(res)) { | |
1350 | rc = PTR_ERR(res); | |
1351 | goto out_unlock; | |
1352 | } | |
1353 | ||
b6117199 | 1354 | rc = add_memory_resource(nid, res, mhp_flags); |
7b7b2721 DH |
1355 | if (rc < 0) |
1356 | release_memory_resource(res); | |
1357 | ||
1358 | out_unlock: | |
1359 | unlock_device_hotplug(); | |
1360 | return rc; | |
1361 | } | |
1362 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); | |
1363 | ||
bca3feaa AK |
1364 | /* |
1365 | * Platforms should define arch_get_mappable_range() that provides | |
1366 | * maximum possible addressable physical memory range for which the | |
1367 | * linear mapping could be created. The platform returned address | |
1368 | * range must adhere to these following semantics. | |
1369 | * | |
1370 | * - range.start <= range.end | |
1371 | * - Range includes both end points [range.start..range.end] | |
1372 | * | |
1373 | * There is also a fallback definition provided here, allowing the | |
1374 | * entire possible physical address range in case any platform does | |
1375 | * not define arch_get_mappable_range(). | |
1376 | */ | |
1377 | struct range __weak arch_get_mappable_range(void) | |
1378 | { | |
1379 | struct range mhp_range = { | |
1380 | .start = 0UL, | |
1381 | .end = -1ULL, | |
1382 | }; | |
1383 | return mhp_range; | |
1384 | } | |
1385 | ||
1386 | struct range mhp_get_pluggable_range(bool need_mapping) | |
1387 | { | |
1388 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; | |
1389 | struct range mhp_range; | |
1390 | ||
1391 | if (need_mapping) { | |
1392 | mhp_range = arch_get_mappable_range(); | |
1393 | if (mhp_range.start > max_phys) { | |
1394 | mhp_range.start = 0; | |
1395 | mhp_range.end = 0; | |
1396 | } | |
1397 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); | |
1398 | } else { | |
1399 | mhp_range.start = 0; | |
1400 | mhp_range.end = max_phys; | |
1401 | } | |
1402 | return mhp_range; | |
1403 | } | |
1404 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); | |
1405 | ||
1406 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) | |
1407 | { | |
1408 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); | |
1409 | u64 end = start + size; | |
1410 | ||
1411 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) | |
1412 | return true; | |
1413 | ||
1414 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", | |
1415 | start, end, mhp_range.start, mhp_range.end); | |
1416 | return false; | |
1417 | } | |
1418 | ||
0c0e6195 KH |
1419 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1420 | /* | |
92917998 DH |
1421 | * Confirm all pages in a range [start, end) belong to the same zone (skipping |
1422 | * memory holes). When true, return the zone. | |
0c0e6195 | 1423 | */ |
92917998 DH |
1424 | struct zone *test_pages_in_a_zone(unsigned long start_pfn, |
1425 | unsigned long end_pfn) | |
0c0e6195 | 1426 | { |
5f0f2887 | 1427 | unsigned long pfn, sec_end_pfn; |
0c0e6195 KH |
1428 | struct zone *zone = NULL; |
1429 | struct page *page; | |
1430 | int i; | |
deb88a2a | 1431 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); |
0c0e6195 | 1432 | pfn < end_pfn; |
deb88a2a | 1433 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { |
5f0f2887 AB |
1434 | /* Make sure the memory section is present first */ |
1435 | if (!present_section_nr(pfn_to_section_nr(pfn))) | |
0c0e6195 | 1436 | continue; |
5f0f2887 AB |
1437 | for (; pfn < sec_end_pfn && pfn < end_pfn; |
1438 | pfn += MAX_ORDER_NR_PAGES) { | |
1439 | i = 0; | |
1440 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
1441 | while ((i < MAX_ORDER_NR_PAGES) && | |
1442 | !pfn_valid_within(pfn + i)) | |
1443 | i++; | |
d6d8c8a4 | 1444 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) |
5f0f2887 | 1445 | continue; |
24feb47c MZ |
1446 | /* Check if we got outside of the zone */ |
1447 | if (zone && !zone_spans_pfn(zone, pfn + i)) | |
92917998 | 1448 | return NULL; |
5f0f2887 AB |
1449 | page = pfn_to_page(pfn + i); |
1450 | if (zone && page_zone(page) != zone) | |
92917998 | 1451 | return NULL; |
5f0f2887 AB |
1452 | zone = page_zone(page); |
1453 | } | |
0c0e6195 | 1454 | } |
deb88a2a | 1455 | |
92917998 | 1456 | return zone; |
0c0e6195 KH |
1457 | } |
1458 | ||
1459 | /* | |
0efadf48 | 1460 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
aa218795 DH |
1461 | * non-lru movable pages and hugepages). Will skip over most unmovable |
1462 | * pages (esp., pages that can be skipped when offlining), but bail out on | |
1463 | * definitely unmovable pages. | |
1464 | * | |
1465 | * Returns: | |
1466 | * 0 in case a movable page is found and movable_pfn was updated. | |
1467 | * -ENOENT in case no movable page was found. | |
1468 | * -EBUSY in case a definitely unmovable page was found. | |
0c0e6195 | 1469 | */ |
aa218795 DH |
1470 | static int scan_movable_pages(unsigned long start, unsigned long end, |
1471 | unsigned long *movable_pfn) | |
0c0e6195 KH |
1472 | { |
1473 | unsigned long pfn; | |
eeb0efd0 | 1474 | |
0c0e6195 | 1475 | for (pfn = start; pfn < end; pfn++) { |
eeb0efd0 OS |
1476 | struct page *page, *head; |
1477 | unsigned long skip; | |
1478 | ||
1479 | if (!pfn_valid(pfn)) | |
1480 | continue; | |
1481 | page = pfn_to_page(pfn); | |
1482 | if (PageLRU(page)) | |
aa218795 | 1483 | goto found; |
eeb0efd0 | 1484 | if (__PageMovable(page)) |
aa218795 DH |
1485 | goto found; |
1486 | ||
1487 | /* | |
1488 | * PageOffline() pages that are not marked __PageMovable() and | |
1489 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are | |
1490 | * definitely unmovable. If their reference count would be 0, | |
1491 | * they could at least be skipped when offlining memory. | |
1492 | */ | |
1493 | if (PageOffline(page) && page_count(page)) | |
1494 | return -EBUSY; | |
eeb0efd0 OS |
1495 | |
1496 | if (!PageHuge(page)) | |
1497 | continue; | |
1498 | head = compound_head(page); | |
8f251a3d MK |
1499 | /* |
1500 | * This test is racy as we hold no reference or lock. The | |
1501 | * hugetlb page could have been free'ed and head is no longer | |
1502 | * a hugetlb page before the following check. In such unlikely | |
1503 | * cases false positives and negatives are possible. Calling | |
1504 | * code must deal with these scenarios. | |
1505 | */ | |
1506 | if (HPageMigratable(head)) | |
aa218795 | 1507 | goto found; |
d8c6546b | 1508 | skip = compound_nr(head) - (page - head); |
eeb0efd0 | 1509 | pfn += skip - 1; |
0c0e6195 | 1510 | } |
aa218795 DH |
1511 | return -ENOENT; |
1512 | found: | |
1513 | *movable_pfn = pfn; | |
0c0e6195 KH |
1514 | return 0; |
1515 | } | |
1516 | ||
0c0e6195 KH |
1517 | static int |
1518 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1519 | { | |
1520 | unsigned long pfn; | |
6c357848 | 1521 | struct page *page, *head; |
0c0e6195 KH |
1522 | int ret = 0; |
1523 | LIST_HEAD(source); | |
1524 | ||
a85009c3 | 1525 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
0c0e6195 KH |
1526 | if (!pfn_valid(pfn)) |
1527 | continue; | |
1528 | page = pfn_to_page(pfn); | |
6c357848 | 1529 | head = compound_head(page); |
c8721bbb NH |
1530 | |
1531 | if (PageHuge(page)) { | |
d8c6546b | 1532 | pfn = page_to_pfn(head) + compound_nr(head) - 1; |
daf3538a | 1533 | isolate_huge_page(head, &source); |
c8721bbb | 1534 | continue; |
94723aaf | 1535 | } else if (PageTransHuge(page)) |
6c357848 | 1536 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; |
c8721bbb | 1537 | |
b15c8726 MH |
1538 | /* |
1539 | * HWPoison pages have elevated reference counts so the migration would | |
1540 | * fail on them. It also doesn't make any sense to migrate them in the | |
1541 | * first place. Still try to unmap such a page in case it is still mapped | |
1542 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1543 | * the unmap as the catch all safety net). | |
1544 | */ | |
1545 | if (PageHWPoison(page)) { | |
1546 | if (WARN_ON(PageLRU(page))) | |
1547 | isolate_lru_page(page); | |
1548 | if (page_mapped(page)) | |
013339df | 1549 | try_to_unmap(page, TTU_IGNORE_MLOCK); |
b15c8726 MH |
1550 | continue; |
1551 | } | |
1552 | ||
700c2a46 | 1553 | if (!get_page_unless_zero(page)) |
0c0e6195 KH |
1554 | continue; |
1555 | /* | |
0efadf48 YX |
1556 | * We can skip free pages. And we can deal with pages on |
1557 | * LRU and non-lru movable pages. | |
0c0e6195 | 1558 | */ |
0efadf48 YX |
1559 | if (PageLRU(page)) |
1560 | ret = isolate_lru_page(page); | |
1561 | else | |
1562 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
0c0e6195 | 1563 | if (!ret) { /* Success */ |
62695a84 | 1564 | list_add_tail(&page->lru, &source); |
0efadf48 YX |
1565 | if (!__PageMovable(page)) |
1566 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
9de4f22a | 1567 | page_is_file_lru(page)); |
6d9c285a | 1568 | |
0c0e6195 | 1569 | } else { |
2932c8b0 | 1570 | pr_warn("failed to isolate pfn %lx\n", pfn); |
0efadf48 | 1571 | dump_page(page, "isolation failed"); |
0c0e6195 | 1572 | } |
1723058e | 1573 | put_page(page); |
0c0e6195 | 1574 | } |
f3ab2636 | 1575 | if (!list_empty(&source)) { |
203e6e5c JK |
1576 | nodemask_t nmask = node_states[N_MEMORY]; |
1577 | struct migration_target_control mtc = { | |
1578 | .nmask = &nmask, | |
1579 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, | |
1580 | }; | |
1581 | ||
1582 | /* | |
1583 | * We have checked that migration range is on a single zone so | |
1584 | * we can use the nid of the first page to all the others. | |
1585 | */ | |
1586 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); | |
1587 | ||
1588 | /* | |
1589 | * try to allocate from a different node but reuse this node | |
1590 | * if there are no other online nodes to be used (e.g. we are | |
1591 | * offlining a part of the only existing node) | |
1592 | */ | |
1593 | node_clear(mtc.nid, nmask); | |
1594 | if (nodes_empty(nmask)) | |
1595 | node_set(mtc.nid, nmask); | |
1596 | ret = migrate_pages(&source, alloc_migration_target, NULL, | |
1597 | (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
2932c8b0 MH |
1598 | if (ret) { |
1599 | list_for_each_entry(page, &source, lru) { | |
1600 | pr_warn("migrating pfn %lx failed ret:%d ", | |
1601 | page_to_pfn(page), ret); | |
1602 | dump_page(page, "migration failure"); | |
1603 | } | |
c8721bbb | 1604 | putback_movable_pages(&source); |
2932c8b0 | 1605 | } |
0c0e6195 | 1606 | } |
1723058e | 1607 | |
0c0e6195 KH |
1608 | return ret; |
1609 | } | |
1610 | ||
c5320926 TC |
1611 | static int __init cmdline_parse_movable_node(char *p) |
1612 | { | |
55ac590c | 1613 | movable_node_enabled = true; |
c5320926 TC |
1614 | return 0; |
1615 | } | |
1616 | early_param("movable_node", cmdline_parse_movable_node); | |
1617 | ||
d9713679 LJ |
1618 | /* check which state of node_states will be changed when offline memory */ |
1619 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1620 | struct zone *zone, struct memory_notify *arg) | |
1621 | { | |
1622 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1623 | unsigned long present_pages = 0; | |
86b27bea | 1624 | enum zone_type zt; |
d9713679 | 1625 | |
98fa15f3 AK |
1626 | arg->status_change_nid = NUMA_NO_NODE; |
1627 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
1628 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 LJ |
1629 | |
1630 | /* | |
86b27bea OS |
1631 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. |
1632 | * If the memory to be offline is within the range | |
1633 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1634 | * the zones in that range will become empty after the offlining, | |
1635 | * thus we can determine that we need to clear the node from | |
1636 | * node_states[N_NORMAL_MEMORY]. | |
d9713679 | 1637 | */ |
86b27bea | 1638 | for (zt = 0; zt <= ZONE_NORMAL; zt++) |
d9713679 | 1639 | present_pages += pgdat->node_zones[zt].present_pages; |
86b27bea | 1640 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) |
d9713679 | 1641 | arg->status_change_nid_normal = zone_to_nid(zone); |
d9713679 | 1642 | |
6715ddf9 LJ |
1643 | #ifdef CONFIG_HIGHMEM |
1644 | /* | |
86b27bea OS |
1645 | * node_states[N_HIGH_MEMORY] contains nodes which |
1646 | * have normal memory or high memory. | |
1647 | * Here we add the present_pages belonging to ZONE_HIGHMEM. | |
1648 | * If the zone is within the range of [0..ZONE_HIGHMEM), and | |
1649 | * we determine that the zones in that range become empty, | |
1650 | * we need to clear the node for N_HIGH_MEMORY. | |
6715ddf9 | 1651 | */ |
86b27bea OS |
1652 | present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; |
1653 | if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) | |
6715ddf9 | 1654 | arg->status_change_nid_high = zone_to_nid(zone); |
6715ddf9 LJ |
1655 | #endif |
1656 | ||
d9713679 | 1657 | /* |
86b27bea OS |
1658 | * We have accounted the pages from [0..ZONE_NORMAL), and |
1659 | * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM | |
1660 | * as well. | |
1661 | * Here we count the possible pages from ZONE_MOVABLE. | |
1662 | * If after having accounted all the pages, we see that the nr_pages | |
1663 | * to be offlined is over or equal to the accounted pages, | |
1664 | * we know that the node will become empty, and so, we can clear | |
1665 | * it for N_MEMORY as well. | |
d9713679 | 1666 | */ |
86b27bea | 1667 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; |
d9713679 | 1668 | |
d9713679 LJ |
1669 | if (nr_pages >= present_pages) |
1670 | arg->status_change_nid = zone_to_nid(zone); | |
d9713679 LJ |
1671 | } |
1672 | ||
1673 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1674 | { | |
1675 | if (arg->status_change_nid_normal >= 0) | |
1676 | node_clear_state(node, N_NORMAL_MEMORY); | |
1677 | ||
cf01f6f5 | 1678 | if (arg->status_change_nid_high >= 0) |
d9713679 | 1679 | node_clear_state(node, N_HIGH_MEMORY); |
6715ddf9 | 1680 | |
cf01f6f5 | 1681 | if (arg->status_change_nid >= 0) |
6715ddf9 | 1682 | node_clear_state(node, N_MEMORY); |
d9713679 LJ |
1683 | } |
1684 | ||
c5e79ef5 DH |
1685 | static int count_system_ram_pages_cb(unsigned long start_pfn, |
1686 | unsigned long nr_pages, void *data) | |
1687 | { | |
1688 | unsigned long *nr_system_ram_pages = data; | |
1689 | ||
1690 | *nr_system_ram_pages += nr_pages; | |
1691 | return 0; | |
1692 | } | |
1693 | ||
73a11c96 | 1694 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
0c0e6195 | 1695 | { |
73a11c96 | 1696 | const unsigned long end_pfn = start_pfn + nr_pages; |
0a1a9a00 | 1697 | unsigned long pfn, system_ram_pages = 0; |
d702909f | 1698 | unsigned long flags; |
0c0e6195 | 1699 | struct zone *zone; |
7b78d335 | 1700 | struct memory_notify arg; |
ea15153c | 1701 | int ret, node; |
79605093 | 1702 | char *reason; |
0c0e6195 | 1703 | |
dd8e2f23 OS |
1704 | /* |
1705 | * {on,off}lining is constrained to full memory sections (or more | |
1706 | * precisly to memory blocks from the user space POV). | |
1707 | * memmap_on_memory is an exception because it reserves initial part | |
1708 | * of the physical memory space for vmemmaps. That space is pageblock | |
1709 | * aligned. | |
1710 | */ | |
4986fac1 | 1711 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
1712 | !IS_ALIGNED(start_pfn, pageblock_nr_pages) || |
1713 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
1714 | return -EINVAL; |
1715 | ||
381eab4a DH |
1716 | mem_hotplug_begin(); |
1717 | ||
c5e79ef5 DH |
1718 | /* |
1719 | * Don't allow to offline memory blocks that contain holes. | |
1720 | * Consequently, memory blocks with holes can never get onlined | |
1721 | * via the hotplug path - online_pages() - as hotplugged memory has | |
1722 | * no holes. This way, we e.g., don't have to worry about marking | |
1723 | * memory holes PG_reserved, don't need pfn_valid() checks, and can | |
1724 | * avoid using walk_system_ram_range() later. | |
1725 | */ | |
73a11c96 | 1726 | walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, |
c5e79ef5 | 1727 | count_system_ram_pages_cb); |
73a11c96 | 1728 | if (system_ram_pages != nr_pages) { |
c5e79ef5 DH |
1729 | ret = -EINVAL; |
1730 | reason = "memory holes"; | |
1731 | goto failed_removal; | |
1732 | } | |
1733 | ||
0c0e6195 KH |
1734 | /* This makes hotplug much easier...and readable. |
1735 | we assume this for now. .*/ | |
92917998 DH |
1736 | zone = test_pages_in_a_zone(start_pfn, end_pfn); |
1737 | if (!zone) { | |
79605093 MH |
1738 | ret = -EINVAL; |
1739 | reason = "multizone range"; | |
1740 | goto failed_removal; | |
381eab4a | 1741 | } |
7b78d335 | 1742 | node = zone_to_nid(zone); |
7b78d335 | 1743 | |
ec6e8c7e VB |
1744 | /* |
1745 | * Disable pcplists so that page isolation cannot race with freeing | |
1746 | * in a way that pages from isolated pageblock are left on pcplists. | |
1747 | */ | |
1748 | zone_pcp_disable(zone); | |
d479960e | 1749 | lru_cache_disable(); |
ec6e8c7e | 1750 | |
0c0e6195 | 1751 | /* set above range as isolated */ |
b023f468 | 1752 | ret = start_isolate_page_range(start_pfn, end_pfn, |
d381c547 | 1753 | MIGRATE_MOVABLE, |
756d25be | 1754 | MEMORY_OFFLINE | REPORT_FAILURE); |
3fa0c7c7 | 1755 | if (ret) { |
79605093 | 1756 | reason = "failure to isolate range"; |
ec6e8c7e | 1757 | goto failed_removal_pcplists_disabled; |
381eab4a | 1758 | } |
7b78d335 YG |
1759 | |
1760 | arg.start_pfn = start_pfn; | |
1761 | arg.nr_pages = nr_pages; | |
d9713679 | 1762 | node_states_check_changes_offline(nr_pages, zone, &arg); |
7b78d335 YG |
1763 | |
1764 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1765 | ret = notifier_to_errno(ret); | |
79605093 MH |
1766 | if (ret) { |
1767 | reason = "notifier failure"; | |
1768 | goto failed_removal_isolated; | |
1769 | } | |
7b78d335 | 1770 | |
bb8965bd | 1771 | do { |
aa218795 DH |
1772 | pfn = start_pfn; |
1773 | do { | |
bb8965bd MH |
1774 | if (signal_pending(current)) { |
1775 | ret = -EINTR; | |
1776 | reason = "signal backoff"; | |
1777 | goto failed_removal_isolated; | |
1778 | } | |
72b39cfc | 1779 | |
bb8965bd | 1780 | cond_resched(); |
bb8965bd | 1781 | |
aa218795 DH |
1782 | ret = scan_movable_pages(pfn, end_pfn, &pfn); |
1783 | if (!ret) { | |
bb8965bd MH |
1784 | /* |
1785 | * TODO: fatal migration failures should bail | |
1786 | * out | |
1787 | */ | |
1788 | do_migrate_range(pfn, end_pfn); | |
1789 | } | |
aa218795 DH |
1790 | } while (!ret); |
1791 | ||
1792 | if (ret != -ENOENT) { | |
1793 | reason = "unmovable page"; | |
1794 | goto failed_removal_isolated; | |
bb8965bd | 1795 | } |
0c0e6195 | 1796 | |
bb8965bd MH |
1797 | /* |
1798 | * Dissolve free hugepages in the memory block before doing | |
1799 | * offlining actually in order to make hugetlbfs's object | |
1800 | * counting consistent. | |
1801 | */ | |
1802 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
1803 | if (ret) { | |
1804 | reason = "failure to dissolve huge pages"; | |
1805 | goto failed_removal_isolated; | |
1806 | } | |
0a1a9a00 | 1807 | |
0a1a9a00 | 1808 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); |
ec6e8c7e | 1809 | |
5557c766 | 1810 | } while (ret); |
72b39cfc | 1811 | |
0a1a9a00 DH |
1812 | /* Mark all sections offline and remove free pages from the buddy. */ |
1813 | __offline_isolated_pages(start_pfn, end_pfn); | |
7c33023a | 1814 | pr_debug("Offlined Pages %ld\n", nr_pages); |
0a1a9a00 | 1815 | |
9b7ea46a | 1816 | /* |
b30c5927 DH |
1817 | * The memory sections are marked offline, and the pageblock flags |
1818 | * effectively stale; nobody should be touching them. Fixup the number | |
1819 | * of isolated pageblocks, memory onlining will properly revert this. | |
9b7ea46a QC |
1820 | */ |
1821 | spin_lock_irqsave(&zone->lock, flags); | |
ea15153c | 1822 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; |
9b7ea46a QC |
1823 | spin_unlock_irqrestore(&zone->lock, flags); |
1824 | ||
d479960e | 1825 | lru_cache_enable(); |
ec6e8c7e VB |
1826 | zone_pcp_enable(zone); |
1827 | ||
0c0e6195 | 1828 | /* removal success */ |
0a1a9a00 | 1829 | adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); |
f9901144 | 1830 | adjust_present_page_count(zone, -nr_pages); |
7b78d335 | 1831 | |
1b79acc9 KM |
1832 | init_per_zone_wmark_min(); |
1833 | ||
1e8537ba | 1834 | if (!populated_zone(zone)) { |
340175b7 | 1835 | zone_pcp_reset(zone); |
72675e13 | 1836 | build_all_zonelists(NULL); |
1e8537ba XQ |
1837 | } else |
1838 | zone_pcp_update(zone); | |
340175b7 | 1839 | |
d9713679 | 1840 | node_states_clear_node(node, &arg); |
698b1b30 | 1841 | if (arg.status_change_nid >= 0) { |
8fe23e05 | 1842 | kswapd_stop(node); |
698b1b30 VB |
1843 | kcompactd_stop(node); |
1844 | } | |
bce7394a | 1845 | |
0c0e6195 | 1846 | writeback_set_ratelimit(); |
7b78d335 YG |
1847 | |
1848 | memory_notify(MEM_OFFLINE, &arg); | |
feee6b29 | 1849 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
381eab4a | 1850 | mem_hotplug_done(); |
0c0e6195 KH |
1851 | return 0; |
1852 | ||
79605093 MH |
1853 | failed_removal_isolated: |
1854 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
c4efe484 | 1855 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
ec6e8c7e VB |
1856 | failed_removal_pcplists_disabled: |
1857 | zone_pcp_enable(zone); | |
0c0e6195 | 1858 | failed_removal: |
79605093 | 1859 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
e33e33b4 | 1860 | (unsigned long long) start_pfn << PAGE_SHIFT, |
79605093 MH |
1861 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
1862 | reason); | |
0c0e6195 | 1863 | /* pushback to free area */ |
381eab4a | 1864 | mem_hotplug_done(); |
0c0e6195 KH |
1865 | return ret; |
1866 | } | |
71088785 | 1867 | |
d6de9d53 | 1868 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) |
bbc76be6 WC |
1869 | { |
1870 | int ret = !is_memblock_offlined(mem); | |
1871 | ||
349daa0f RD |
1872 | if (unlikely(ret)) { |
1873 | phys_addr_t beginpa, endpa; | |
1874 | ||
1875 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
b6c88d3b | 1876 | endpa = beginpa + memory_block_size_bytes() - 1; |
756a025f | 1877 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", |
349daa0f | 1878 | &beginpa, &endpa); |
bbc76be6 | 1879 | |
eca499ab PT |
1880 | return -EBUSY; |
1881 | } | |
1882 | return 0; | |
bbc76be6 WC |
1883 | } |
1884 | ||
a08a2ae3 OS |
1885 | static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) |
1886 | { | |
1887 | /* | |
1888 | * If not set, continue with the next block. | |
1889 | */ | |
1890 | return mem->nr_vmemmap_pages; | |
1891 | } | |
1892 | ||
0f1cfe9d | 1893 | static int check_cpu_on_node(pg_data_t *pgdat) |
60a5a19e | 1894 | { |
60a5a19e TC |
1895 | int cpu; |
1896 | ||
1897 | for_each_present_cpu(cpu) { | |
1898 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1899 | /* | |
1900 | * the cpu on this node isn't removed, and we can't | |
1901 | * offline this node. | |
1902 | */ | |
1903 | return -EBUSY; | |
1904 | } | |
1905 | ||
1906 | return 0; | |
1907 | } | |
1908 | ||
2c91f8fc DH |
1909 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
1910 | { | |
1911 | int nid = *(int *)arg; | |
1912 | ||
1913 | /* | |
1914 | * If a memory block belongs to multiple nodes, the stored nid is not | |
1915 | * reliable. However, such blocks are always online (e.g., cannot get | |
1916 | * offlined) and, therefore, are still spanned by the node. | |
1917 | */ | |
1918 | return mem->nid == nid ? -EEXIST : 0; | |
1919 | } | |
1920 | ||
0f1cfe9d TK |
1921 | /** |
1922 | * try_offline_node | |
e8b098fc | 1923 | * @nid: the node ID |
0f1cfe9d TK |
1924 | * |
1925 | * Offline a node if all memory sections and cpus of the node are removed. | |
1926 | * | |
1927 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1928 | * and online/offline operations before this call. | |
1929 | */ | |
90b30cdc | 1930 | void try_offline_node(int nid) |
60a5a19e | 1931 | { |
d822b86a | 1932 | pg_data_t *pgdat = NODE_DATA(nid); |
2c91f8fc | 1933 | int rc; |
60a5a19e | 1934 | |
2c91f8fc DH |
1935 | /* |
1936 | * If the node still spans pages (especially ZONE_DEVICE), don't | |
1937 | * offline it. A node spans memory after move_pfn_range_to_zone(), | |
1938 | * e.g., after the memory block was onlined. | |
1939 | */ | |
1940 | if (pgdat->node_spanned_pages) | |
1941 | return; | |
60a5a19e | 1942 | |
2c91f8fc DH |
1943 | /* |
1944 | * Especially offline memory blocks might not be spanned by the | |
1945 | * node. They will get spanned by the node once they get onlined. | |
1946 | * However, they link to the node in sysfs and can get onlined later. | |
1947 | */ | |
1948 | rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); | |
1949 | if (rc) | |
60a5a19e | 1950 | return; |
60a5a19e | 1951 | |
46a3679b | 1952 | if (check_cpu_on_node(pgdat)) |
60a5a19e TC |
1953 | return; |
1954 | ||
1955 | /* | |
1956 | * all memory/cpu of this node are removed, we can offline this | |
1957 | * node now. | |
1958 | */ | |
1959 | node_set_offline(nid); | |
1960 | unregister_one_node(nid); | |
1961 | } | |
90b30cdc | 1962 | EXPORT_SYMBOL(try_offline_node); |
60a5a19e | 1963 | |
eca499ab | 1964 | static int __ref try_remove_memory(int nid, u64 start, u64 size) |
bbc76be6 | 1965 | { |
eca499ab | 1966 | int rc = 0; |
a08a2ae3 OS |
1967 | struct vmem_altmap mhp_altmap = {}; |
1968 | struct vmem_altmap *altmap = NULL; | |
1969 | unsigned long nr_vmemmap_pages; | |
993c1aad | 1970 | |
27356f54 TK |
1971 | BUG_ON(check_hotplug_memory_range(start, size)); |
1972 | ||
6677e3ea | 1973 | /* |
242831eb | 1974 | * All memory blocks must be offlined before removing memory. Check |
eca499ab | 1975 | * whether all memory blocks in question are offline and return error |
242831eb | 1976 | * if this is not the case. |
6677e3ea | 1977 | */ |
fbcf73ce | 1978 | rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); |
eca499ab | 1979 | if (rc) |
b4223a51 | 1980 | return rc; |
6677e3ea | 1981 | |
a08a2ae3 OS |
1982 | /* |
1983 | * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in | |
1984 | * the same granularity it was added - a single memory block. | |
1985 | */ | |
1986 | if (memmap_on_memory) { | |
1987 | nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, | |
1988 | get_nr_vmemmap_pages_cb); | |
1989 | if (nr_vmemmap_pages) { | |
1990 | if (size != memory_block_size_bytes()) { | |
1991 | pr_warn("Refuse to remove %#llx - %#llx," | |
1992 | "wrong granularity\n", | |
1993 | start, start + size); | |
1994 | return -EINVAL; | |
1995 | } | |
1996 | ||
1997 | /* | |
1998 | * Let remove_pmd_table->free_hugepage_table do the | |
1999 | * right thing if we used vmem_altmap when hot-adding | |
2000 | * the range. | |
2001 | */ | |
2002 | mhp_altmap.alloc = nr_vmemmap_pages; | |
2003 | altmap = &mhp_altmap; | |
2004 | } | |
2005 | } | |
2006 | ||
46c66c4b YI |
2007 | /* remove memmap entry */ |
2008 | firmware_map_remove(start, start + size, "System RAM"); | |
4c4b7f9b | 2009 | |
f1037ec0 DW |
2010 | /* |
2011 | * Memory block device removal under the device_hotplug_lock is | |
2012 | * a barrier against racing online attempts. | |
2013 | */ | |
4c4b7f9b | 2014 | remove_memory_block_devices(start, size); |
46c66c4b | 2015 | |
f1037ec0 DW |
2016 | mem_hotplug_begin(); |
2017 | ||
a08a2ae3 | 2018 | arch_remove_memory(nid, start, size, altmap); |
52219aea DH |
2019 | |
2020 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { | |
2021 | memblock_free(start, size); | |
2022 | memblock_remove(start, size); | |
2023 | } | |
2024 | ||
cb8e3c8b | 2025 | release_mem_region_adjustable(start, size); |
24d335ca | 2026 | |
60a5a19e TC |
2027 | try_offline_node(nid); |
2028 | ||
bfc8c901 | 2029 | mem_hotplug_done(); |
b4223a51 | 2030 | return 0; |
71088785 | 2031 | } |
d15e5926 | 2032 | |
eca499ab PT |
2033 | /** |
2034 | * remove_memory | |
2035 | * @nid: the node ID | |
2036 | * @start: physical address of the region to remove | |
2037 | * @size: size of the region to remove | |
2038 | * | |
2039 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2040 | * and online/offline operations before this call, as required by | |
2041 | * try_offline_node(). | |
2042 | */ | |
2043 | void __remove_memory(int nid, u64 start, u64 size) | |
2044 | { | |
2045 | ||
2046 | /* | |
29a90db9 | 2047 | * trigger BUG() if some memory is not offlined prior to calling this |
eca499ab PT |
2048 | * function |
2049 | */ | |
2050 | if (try_remove_memory(nid, start, size)) | |
2051 | BUG(); | |
2052 | } | |
2053 | ||
2054 | /* | |
2055 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | |
2056 | * some memory is not offline | |
2057 | */ | |
2058 | int remove_memory(int nid, u64 start, u64 size) | |
d15e5926 | 2059 | { |
eca499ab PT |
2060 | int rc; |
2061 | ||
d15e5926 | 2062 | lock_device_hotplug(); |
eca499ab | 2063 | rc = try_remove_memory(nid, start, size); |
d15e5926 | 2064 | unlock_device_hotplug(); |
eca499ab PT |
2065 | |
2066 | return rc; | |
d15e5926 | 2067 | } |
71088785 | 2068 | EXPORT_SYMBOL_GPL(remove_memory); |
08b3acd7 | 2069 | |
8dc4bb58 DH |
2070 | static int try_offline_memory_block(struct memory_block *mem, void *arg) |
2071 | { | |
2072 | uint8_t online_type = MMOP_ONLINE_KERNEL; | |
2073 | uint8_t **online_types = arg; | |
2074 | struct page *page; | |
2075 | int rc; | |
2076 | ||
2077 | /* | |
2078 | * Sense the online_type via the zone of the memory block. Offlining | |
2079 | * with multiple zones within one memory block will be rejected | |
2080 | * by offlining code ... so we don't care about that. | |
2081 | */ | |
2082 | page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); | |
2083 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) | |
2084 | online_type = MMOP_ONLINE_MOVABLE; | |
2085 | ||
2086 | rc = device_offline(&mem->dev); | |
2087 | /* | |
2088 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, | |
2089 | * so try_reonline_memory_block() can do the right thing. | |
2090 | */ | |
2091 | if (!rc) | |
2092 | **online_types = online_type; | |
2093 | ||
2094 | (*online_types)++; | |
2095 | /* Ignore if already offline. */ | |
2096 | return rc < 0 ? rc : 0; | |
2097 | } | |
2098 | ||
2099 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) | |
2100 | { | |
2101 | uint8_t **online_types = arg; | |
2102 | int rc; | |
2103 | ||
2104 | if (**online_types != MMOP_OFFLINE) { | |
2105 | mem->online_type = **online_types; | |
2106 | rc = device_online(&mem->dev); | |
2107 | if (rc < 0) | |
2108 | pr_warn("%s: Failed to re-online memory: %d", | |
2109 | __func__, rc); | |
2110 | } | |
2111 | ||
2112 | /* Continue processing all remaining memory blocks. */ | |
2113 | (*online_types)++; | |
2114 | return 0; | |
2115 | } | |
2116 | ||
08b3acd7 | 2117 | /* |
8dc4bb58 DH |
2118 | * Try to offline and remove memory. Might take a long time to finish in case |
2119 | * memory is still in use. Primarily useful for memory devices that logically | |
2120 | * unplugged all memory (so it's no longer in use) and want to offline + remove | |
2121 | * that memory. | |
08b3acd7 DH |
2122 | */ |
2123 | int offline_and_remove_memory(int nid, u64 start, u64 size) | |
2124 | { | |
8dc4bb58 DH |
2125 | const unsigned long mb_count = size / memory_block_size_bytes(); |
2126 | uint8_t *online_types, *tmp; | |
2127 | int rc; | |
08b3acd7 DH |
2128 | |
2129 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || | |
8dc4bb58 DH |
2130 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) |
2131 | return -EINVAL; | |
2132 | ||
2133 | /* | |
2134 | * We'll remember the old online type of each memory block, so we can | |
2135 | * try to revert whatever we did when offlining one memory block fails | |
2136 | * after offlining some others succeeded. | |
2137 | */ | |
2138 | online_types = kmalloc_array(mb_count, sizeof(*online_types), | |
2139 | GFP_KERNEL); | |
2140 | if (!online_types) | |
2141 | return -ENOMEM; | |
2142 | /* | |
2143 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in | |
2144 | * try_offline_memory_block(), we'll skip all unprocessed blocks in | |
2145 | * try_reonline_memory_block(). | |
2146 | */ | |
2147 | memset(online_types, MMOP_OFFLINE, mb_count); | |
08b3acd7 DH |
2148 | |
2149 | lock_device_hotplug(); | |
8dc4bb58 DH |
2150 | |
2151 | tmp = online_types; | |
2152 | rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); | |
08b3acd7 DH |
2153 | |
2154 | /* | |
8dc4bb58 | 2155 | * In case we succeeded to offline all memory, remove it. |
08b3acd7 DH |
2156 | * This cannot fail as it cannot get onlined in the meantime. |
2157 | */ | |
2158 | if (!rc) { | |
2159 | rc = try_remove_memory(nid, start, size); | |
8dc4bb58 DH |
2160 | if (rc) |
2161 | pr_err("%s: Failed to remove memory: %d", __func__, rc); | |
2162 | } | |
2163 | ||
2164 | /* | |
2165 | * Rollback what we did. While memory onlining might theoretically fail | |
2166 | * (nacked by a notifier), it barely ever happens. | |
2167 | */ | |
2168 | if (rc) { | |
2169 | tmp = online_types; | |
2170 | walk_memory_blocks(start, size, &tmp, | |
2171 | try_reonline_memory_block); | |
08b3acd7 DH |
2172 | } |
2173 | unlock_device_hotplug(); | |
2174 | ||
8dc4bb58 | 2175 | kfree(online_types); |
08b3acd7 DH |
2176 | return rc; |
2177 | } | |
2178 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); | |
aba6efc4 | 2179 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |