]>
Commit | Line | Data |
---|---|---|
3947be19 DH |
1 | /* |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
3947be19 DH |
7 | #include <linux/stddef.h> |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/compiler.h> | |
b95f1b31 | 14 | #include <linux/export.h> |
3947be19 | 15 | #include <linux/pagevec.h> |
2d1d43f6 | 16 | #include <linux/writeback.h> |
3947be19 DH |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memory_hotplug.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/vmalloc.h> | |
0a547039 | 24 | #include <linux/ioport.h> |
0c0e6195 KH |
25 | #include <linux/delay.h> |
26 | #include <linux/migrate.h> | |
27 | #include <linux/page-isolation.h> | |
71088785 | 28 | #include <linux/pfn.h> |
6ad696d2 | 29 | #include <linux/suspend.h> |
6d9c285a | 30 | #include <linux/mm_inline.h> |
d96ae530 | 31 | #include <linux/firmware-map.h> |
3947be19 DH |
32 | |
33 | #include <asm/tlbflush.h> | |
34 | ||
1e5ad9a3 AB |
35 | #include "internal.h" |
36 | ||
9d0ad8ca DK |
37 | /* |
38 | * online_page_callback contains pointer to current page onlining function. | |
39 | * Initially it is generic_online_page(). If it is required it could be | |
40 | * changed by calling set_online_page_callback() for callback registration | |
41 | * and restore_online_page_callback() for generic callback restore. | |
42 | */ | |
43 | ||
44 | static void generic_online_page(struct page *page); | |
45 | ||
46 | static online_page_callback_t online_page_callback = generic_online_page; | |
47 | ||
20d6c96b KM |
48 | DEFINE_MUTEX(mem_hotplug_mutex); |
49 | ||
50 | void lock_memory_hotplug(void) | |
51 | { | |
52 | mutex_lock(&mem_hotplug_mutex); | |
53 | ||
54 | /* for exclusive hibernation if CONFIG_HIBERNATION=y */ | |
55 | lock_system_sleep(); | |
56 | } | |
57 | ||
58 | void unlock_memory_hotplug(void) | |
59 | { | |
60 | unlock_system_sleep(); | |
61 | mutex_unlock(&mem_hotplug_mutex); | |
62 | } | |
63 | ||
64 | ||
45e0b78b KM |
65 | /* add this memory to iomem resource */ |
66 | static struct resource *register_memory_resource(u64 start, u64 size) | |
67 | { | |
68 | struct resource *res; | |
69 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
70 | BUG_ON(!res); | |
71 | ||
72 | res->name = "System RAM"; | |
73 | res->start = start; | |
74 | res->end = start + size - 1; | |
887c3cb1 | 75 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
45e0b78b | 76 | if (request_resource(&iomem_resource, res) < 0) { |
a62e2f4f | 77 | printk("System RAM resource %pR cannot be added\n", res); |
45e0b78b KM |
78 | kfree(res); |
79 | res = NULL; | |
80 | } | |
81 | return res; | |
82 | } | |
83 | ||
84 | static void release_memory_resource(struct resource *res) | |
85 | { | |
86 | if (!res) | |
87 | return; | |
88 | release_resource(res); | |
89 | kfree(res); | |
90 | return; | |
91 | } | |
92 | ||
53947027 | 93 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
04753278 | 94 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
5f24ce5f AA |
95 | static void get_page_bootmem(unsigned long info, struct page *page, |
96 | unsigned long type) | |
04753278 | 97 | { |
5f24ce5f | 98 | page->lru.next = (struct list_head *) type; |
04753278 YG |
99 | SetPagePrivate(page); |
100 | set_page_private(page, info); | |
101 | atomic_inc(&page->_count); | |
102 | } | |
103 | ||
23ce932a RM |
104 | /* reference to __meminit __free_pages_bootmem is valid |
105 | * so use __ref to tell modpost not to generate a warning */ | |
106 | void __ref put_page_bootmem(struct page *page) | |
04753278 | 107 | { |
5f24ce5f | 108 | unsigned long type; |
9feedc9d | 109 | static DEFINE_MUTEX(ppb_lock); |
04753278 | 110 | |
5f24ce5f AA |
111 | type = (unsigned long) page->lru.next; |
112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | |
113 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); | |
04753278 YG |
114 | |
115 | if (atomic_dec_return(&page->_count) == 1) { | |
116 | ClearPagePrivate(page); | |
117 | set_page_private(page, 0); | |
5f24ce5f | 118 | INIT_LIST_HEAD(&page->lru); |
9feedc9d JL |
119 | |
120 | /* | |
121 | * Please refer to comment for __free_pages_bootmem() | |
122 | * for why we serialize here. | |
123 | */ | |
124 | mutex_lock(&ppb_lock); | |
04753278 | 125 | __free_pages_bootmem(page, 0); |
9feedc9d | 126 | mutex_unlock(&ppb_lock); |
04753278 YG |
127 | } |
128 | ||
129 | } | |
130 | ||
d92bc318 | 131 | static void register_page_bootmem_info_section(unsigned long start_pfn) |
04753278 YG |
132 | { |
133 | unsigned long *usemap, mapsize, section_nr, i; | |
134 | struct mem_section *ms; | |
135 | struct page *page, *memmap; | |
136 | ||
04753278 YG |
137 | section_nr = pfn_to_section_nr(start_pfn); |
138 | ms = __nr_to_section(section_nr); | |
139 | ||
140 | /* Get section's memmap address */ | |
141 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
142 | ||
143 | /* | |
144 | * Get page for the memmap's phys address | |
145 | * XXX: need more consideration for sparse_vmemmap... | |
146 | */ | |
147 | page = virt_to_page(memmap); | |
148 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
149 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
150 | ||
151 | /* remember memmap's page */ | |
152 | for (i = 0; i < mapsize; i++, page++) | |
153 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
154 | ||
155 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
156 | page = virt_to_page(usemap); | |
157 | ||
158 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
159 | ||
160 | for (i = 0; i < mapsize; i++, page++) | |
af370fb8 | 161 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); |
04753278 YG |
162 | |
163 | } | |
164 | ||
165 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
166 | { | |
167 | unsigned long i, pfn, end_pfn, nr_pages; | |
168 | int node = pgdat->node_id; | |
169 | struct page *page; | |
170 | struct zone *zone; | |
171 | ||
172 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
173 | page = virt_to_page(pgdat); | |
174 | ||
175 | for (i = 0; i < nr_pages; i++, page++) | |
176 | get_page_bootmem(node, page, NODE_INFO); | |
177 | ||
178 | zone = &pgdat->node_zones[0]; | |
179 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | |
180 | if (zone->wait_table) { | |
181 | nr_pages = zone->wait_table_hash_nr_entries | |
182 | * sizeof(wait_queue_head_t); | |
183 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | |
184 | page = virt_to_page(zone->wait_table); | |
185 | ||
186 | for (i = 0; i < nr_pages; i++, page++) | |
187 | get_page_bootmem(node, page, NODE_INFO); | |
188 | } | |
189 | } | |
190 | ||
191 | pfn = pgdat->node_start_pfn; | |
192 | end_pfn = pfn + pgdat->node_spanned_pages; | |
193 | ||
194 | /* register_section info */ | |
f14851af | 195 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
196 | /* | |
197 | * Some platforms can assign the same pfn to multiple nodes - on | |
198 | * node0 as well as nodeN. To avoid registering a pfn against | |
199 | * multiple nodes we check that this pfn does not already | |
200 | * reside in some other node. | |
201 | */ | |
202 | if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) | |
203 | register_page_bootmem_info_section(pfn); | |
204 | } | |
04753278 YG |
205 | } |
206 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
207 | ||
76cdd58e HC |
208 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, |
209 | unsigned long end_pfn) | |
210 | { | |
211 | unsigned long old_zone_end_pfn; | |
212 | ||
213 | zone_span_writelock(zone); | |
214 | ||
215 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
712cd386 | 216 | if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) |
76cdd58e HC |
217 | zone->zone_start_pfn = start_pfn; |
218 | ||
219 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | |
220 | zone->zone_start_pfn; | |
221 | ||
222 | zone_span_writeunlock(zone); | |
223 | } | |
224 | ||
511c2aba LJ |
225 | static void resize_zone(struct zone *zone, unsigned long start_pfn, |
226 | unsigned long end_pfn) | |
227 | { | |
228 | zone_span_writelock(zone); | |
229 | ||
e455a9b9 LJ |
230 | if (end_pfn - start_pfn) { |
231 | zone->zone_start_pfn = start_pfn; | |
232 | zone->spanned_pages = end_pfn - start_pfn; | |
233 | } else { | |
234 | /* | |
235 | * make it consist as free_area_init_core(), | |
236 | * if spanned_pages = 0, then keep start_pfn = 0 | |
237 | */ | |
238 | zone->zone_start_pfn = 0; | |
239 | zone->spanned_pages = 0; | |
240 | } | |
511c2aba LJ |
241 | |
242 | zone_span_writeunlock(zone); | |
243 | } | |
244 | ||
245 | static void fix_zone_id(struct zone *zone, unsigned long start_pfn, | |
246 | unsigned long end_pfn) | |
247 | { | |
248 | enum zone_type zid = zone_idx(zone); | |
249 | int nid = zone->zone_pgdat->node_id; | |
250 | unsigned long pfn; | |
251 | ||
252 | for (pfn = start_pfn; pfn < end_pfn; pfn++) | |
253 | set_page_links(pfn_to_page(pfn), zid, nid, pfn); | |
254 | } | |
255 | ||
e455a9b9 | 256 | static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, |
511c2aba LJ |
257 | unsigned long start_pfn, unsigned long end_pfn) |
258 | { | |
e455a9b9 | 259 | int ret; |
511c2aba | 260 | unsigned long flags; |
e455a9b9 LJ |
261 | unsigned long z1_start_pfn; |
262 | ||
263 | if (!z1->wait_table) { | |
264 | ret = init_currently_empty_zone(z1, start_pfn, | |
265 | end_pfn - start_pfn, MEMMAP_HOTPLUG); | |
266 | if (ret) | |
267 | return ret; | |
268 | } | |
511c2aba LJ |
269 | |
270 | pgdat_resize_lock(z1->zone_pgdat, &flags); | |
271 | ||
272 | /* can't move pfns which are higher than @z2 */ | |
273 | if (end_pfn > z2->zone_start_pfn + z2->spanned_pages) | |
274 | goto out_fail; | |
275 | /* the move out part mast at the left most of @z2 */ | |
276 | if (start_pfn > z2->zone_start_pfn) | |
277 | goto out_fail; | |
278 | /* must included/overlap */ | |
279 | if (end_pfn <= z2->zone_start_pfn) | |
280 | goto out_fail; | |
281 | ||
e455a9b9 LJ |
282 | /* use start_pfn for z1's start_pfn if z1 is empty */ |
283 | if (z1->spanned_pages) | |
284 | z1_start_pfn = z1->zone_start_pfn; | |
285 | else | |
286 | z1_start_pfn = start_pfn; | |
287 | ||
288 | resize_zone(z1, z1_start_pfn, end_pfn); | |
511c2aba LJ |
289 | resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); |
290 | ||
291 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
292 | ||
293 | fix_zone_id(z1, start_pfn, end_pfn); | |
294 | ||
295 | return 0; | |
296 | out_fail: | |
297 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
298 | return -1; | |
299 | } | |
300 | ||
e455a9b9 | 301 | static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, |
511c2aba LJ |
302 | unsigned long start_pfn, unsigned long end_pfn) |
303 | { | |
e455a9b9 | 304 | int ret; |
511c2aba | 305 | unsigned long flags; |
e455a9b9 LJ |
306 | unsigned long z2_end_pfn; |
307 | ||
308 | if (!z2->wait_table) { | |
309 | ret = init_currently_empty_zone(z2, start_pfn, | |
310 | end_pfn - start_pfn, MEMMAP_HOTPLUG); | |
311 | if (ret) | |
312 | return ret; | |
313 | } | |
511c2aba LJ |
314 | |
315 | pgdat_resize_lock(z1->zone_pgdat, &flags); | |
316 | ||
317 | /* can't move pfns which are lower than @z1 */ | |
318 | if (z1->zone_start_pfn > start_pfn) | |
319 | goto out_fail; | |
320 | /* the move out part mast at the right most of @z1 */ | |
321 | if (z1->zone_start_pfn + z1->spanned_pages > end_pfn) | |
322 | goto out_fail; | |
323 | /* must included/overlap */ | |
324 | if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) | |
325 | goto out_fail; | |
326 | ||
e455a9b9 LJ |
327 | /* use end_pfn for z2's end_pfn if z2 is empty */ |
328 | if (z2->spanned_pages) | |
329 | z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; | |
330 | else | |
331 | z2_end_pfn = end_pfn; | |
332 | ||
511c2aba | 333 | resize_zone(z1, z1->zone_start_pfn, start_pfn); |
e455a9b9 | 334 | resize_zone(z2, start_pfn, z2_end_pfn); |
511c2aba LJ |
335 | |
336 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
337 | ||
338 | fix_zone_id(z2, start_pfn, end_pfn); | |
339 | ||
340 | return 0; | |
341 | out_fail: | |
342 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
343 | return -1; | |
344 | } | |
345 | ||
76cdd58e HC |
346 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, |
347 | unsigned long end_pfn) | |
348 | { | |
349 | unsigned long old_pgdat_end_pfn = | |
350 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
351 | ||
712cd386 | 352 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) |
76cdd58e HC |
353 | pgdat->node_start_pfn = start_pfn; |
354 | ||
355 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | |
356 | pgdat->node_start_pfn; | |
357 | } | |
358 | ||
31168481 | 359 | static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
3947be19 DH |
360 | { |
361 | struct pglist_data *pgdat = zone->zone_pgdat; | |
362 | int nr_pages = PAGES_PER_SECTION; | |
363 | int nid = pgdat->node_id; | |
364 | int zone_type; | |
76cdd58e | 365 | unsigned long flags; |
3947be19 DH |
366 | |
367 | zone_type = zone - pgdat->node_zones; | |
76cdd58e HC |
368 | if (!zone->wait_table) { |
369 | int ret; | |
370 | ||
371 | ret = init_currently_empty_zone(zone, phys_start_pfn, | |
372 | nr_pages, MEMMAP_HOTPLUG); | |
373 | if (ret) | |
374 | return ret; | |
375 | } | |
376 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
377 | grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); | |
378 | grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, | |
379 | phys_start_pfn + nr_pages); | |
380 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
a2f3aa02 DH |
381 | memmap_init_zone(nr_pages, nid, zone_type, |
382 | phys_start_pfn, MEMMAP_HOTPLUG); | |
718127cc | 383 | return 0; |
3947be19 DH |
384 | } |
385 | ||
c04fc586 GH |
386 | static int __meminit __add_section(int nid, struct zone *zone, |
387 | unsigned long phys_start_pfn) | |
3947be19 | 388 | { |
3947be19 | 389 | int nr_pages = PAGES_PER_SECTION; |
3947be19 DH |
390 | int ret; |
391 | ||
ebd15302 KH |
392 | if (pfn_valid(phys_start_pfn)) |
393 | return -EEXIST; | |
394 | ||
0b0acbec | 395 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
3947be19 DH |
396 | |
397 | if (ret < 0) | |
398 | return ret; | |
399 | ||
718127cc YG |
400 | ret = __add_zone(zone, phys_start_pfn); |
401 | ||
402 | if (ret < 0) | |
403 | return ret; | |
404 | ||
c04fc586 | 405 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); |
3947be19 DH |
406 | } |
407 | ||
0c0a4a51 YG |
408 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
409 | static int __remove_section(struct zone *zone, struct mem_section *ms) | |
410 | { | |
411 | /* | |
412 | * XXX: Freeing memmap with vmemmap is not implement yet. | |
413 | * This should be removed later. | |
414 | */ | |
415 | return -EBUSY; | |
416 | } | |
417 | #else | |
ea01ea93 BP |
418 | static int __remove_section(struct zone *zone, struct mem_section *ms) |
419 | { | |
420 | unsigned long flags; | |
421 | struct pglist_data *pgdat = zone->zone_pgdat; | |
422 | int ret = -EINVAL; | |
423 | ||
424 | if (!valid_section(ms)) | |
425 | return ret; | |
426 | ||
427 | ret = unregister_memory_section(ms); | |
428 | if (ret) | |
429 | return ret; | |
430 | ||
431 | pgdat_resize_lock(pgdat, &flags); | |
432 | sparse_remove_one_section(zone, ms); | |
433 | pgdat_resize_unlock(pgdat, &flags); | |
434 | return 0; | |
435 | } | |
0c0a4a51 | 436 | #endif |
ea01ea93 | 437 | |
3947be19 DH |
438 | /* |
439 | * Reasonably generic function for adding memory. It is | |
440 | * expected that archs that support memory hotplug will | |
441 | * call this function after deciding the zone to which to | |
442 | * add the new pages. | |
443 | */ | |
c04fc586 GH |
444 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, |
445 | unsigned long nr_pages) | |
3947be19 DH |
446 | { |
447 | unsigned long i; | |
448 | int err = 0; | |
6f712711 KH |
449 | int start_sec, end_sec; |
450 | /* during initialize mem_map, align hot-added range to section */ | |
451 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
452 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
3947be19 | 453 | |
6f712711 | 454 | for (i = start_sec; i <= end_sec; i++) { |
c04fc586 | 455 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); |
3947be19 | 456 | |
6f712711 | 457 | /* |
183ff22b | 458 | * EEXIST is finally dealt with by ioresource collision |
6f712711 KH |
459 | * check. see add_memory() => register_memory_resource() |
460 | * Warning will be printed if there is collision. | |
bed120c6 JS |
461 | */ |
462 | if (err && (err != -EEXIST)) | |
3947be19 | 463 | break; |
6f712711 | 464 | err = 0; |
3947be19 DH |
465 | } |
466 | ||
467 | return err; | |
468 | } | |
bed120c6 | 469 | EXPORT_SYMBOL_GPL(__add_pages); |
3947be19 | 470 | |
ea01ea93 BP |
471 | /** |
472 | * __remove_pages() - remove sections of pages from a zone | |
473 | * @zone: zone from which pages need to be removed | |
474 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
475 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
476 | * | |
477 | * Generic helper function to remove section mappings and sysfs entries | |
478 | * for the section of the memory we are removing. Caller needs to make | |
479 | * sure that pages are marked reserved and zones are adjust properly by | |
480 | * calling offline_pages(). | |
481 | */ | |
482 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
483 | unsigned long nr_pages) | |
484 | { | |
485 | unsigned long i, ret = 0; | |
486 | int sections_to_remove; | |
487 | ||
488 | /* | |
489 | * We can only remove entire sections | |
490 | */ | |
491 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
492 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
493 | ||
d760afd4 YI |
494 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); |
495 | ||
ea01ea93 BP |
496 | sections_to_remove = nr_pages / PAGES_PER_SECTION; |
497 | for (i = 0; i < sections_to_remove; i++) { | |
498 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
499 | ret = __remove_section(zone, __pfn_to_section(pfn)); | |
500 | if (ret) | |
501 | break; | |
502 | } | |
503 | return ret; | |
504 | } | |
505 | EXPORT_SYMBOL_GPL(__remove_pages); | |
506 | ||
9d0ad8ca DK |
507 | int set_online_page_callback(online_page_callback_t callback) |
508 | { | |
509 | int rc = -EINVAL; | |
510 | ||
511 | lock_memory_hotplug(); | |
512 | ||
513 | if (online_page_callback == generic_online_page) { | |
514 | online_page_callback = callback; | |
515 | rc = 0; | |
516 | } | |
517 | ||
518 | unlock_memory_hotplug(); | |
519 | ||
520 | return rc; | |
521 | } | |
522 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
523 | ||
524 | int restore_online_page_callback(online_page_callback_t callback) | |
525 | { | |
526 | int rc = -EINVAL; | |
527 | ||
528 | lock_memory_hotplug(); | |
529 | ||
530 | if (online_page_callback == callback) { | |
531 | online_page_callback = generic_online_page; | |
532 | rc = 0; | |
533 | } | |
534 | ||
535 | unlock_memory_hotplug(); | |
536 | ||
537 | return rc; | |
538 | } | |
539 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
540 | ||
541 | void __online_page_set_limits(struct page *page) | |
180c06ef | 542 | { |
4738e1b9 JB |
543 | unsigned long pfn = page_to_pfn(page); |
544 | ||
4738e1b9 JB |
545 | if (pfn >= num_physpages) |
546 | num_physpages = pfn + 1; | |
9d0ad8ca DK |
547 | } |
548 | EXPORT_SYMBOL_GPL(__online_page_set_limits); | |
549 | ||
550 | void __online_page_increment_counters(struct page *page) | |
551 | { | |
552 | totalram_pages++; | |
180c06ef JF |
553 | |
554 | #ifdef CONFIG_HIGHMEM | |
555 | if (PageHighMem(page)) | |
556 | totalhigh_pages++; | |
557 | #endif | |
9d0ad8ca DK |
558 | } |
559 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); | |
180c06ef | 560 | |
9d0ad8ca DK |
561 | void __online_page_free(struct page *page) |
562 | { | |
180c06ef JF |
563 | ClearPageReserved(page); |
564 | init_page_count(page); | |
565 | __free_page(page); | |
566 | } | |
9d0ad8ca DK |
567 | EXPORT_SYMBOL_GPL(__online_page_free); |
568 | ||
569 | static void generic_online_page(struct page *page) | |
570 | { | |
571 | __online_page_set_limits(page); | |
572 | __online_page_increment_counters(page); | |
573 | __online_page_free(page); | |
574 | } | |
180c06ef | 575 | |
75884fb1 KH |
576 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, |
577 | void *arg) | |
3947be19 DH |
578 | { |
579 | unsigned long i; | |
75884fb1 KH |
580 | unsigned long onlined_pages = *(unsigned long *)arg; |
581 | struct page *page; | |
582 | if (PageReserved(pfn_to_page(start_pfn))) | |
583 | for (i = 0; i < nr_pages; i++) { | |
584 | page = pfn_to_page(start_pfn + i); | |
9d0ad8ca | 585 | (*online_page_callback)(page); |
75884fb1 KH |
586 | onlined_pages++; |
587 | } | |
588 | *(unsigned long *)arg = onlined_pages; | |
589 | return 0; | |
590 | } | |
591 | ||
09285af7 LJ |
592 | #ifdef CONFIG_MOVABLE_NODE |
593 | /* when CONFIG_MOVABLE_NODE, we allow online node don't have normal memory */ | |
594 | static bool can_online_high_movable(struct zone *zone) | |
595 | { | |
596 | return true; | |
597 | } | |
598 | #else /* #ifdef CONFIG_MOVABLE_NODE */ | |
74d42d8f LJ |
599 | /* ensure every online node has NORMAL memory */ |
600 | static bool can_online_high_movable(struct zone *zone) | |
601 | { | |
602 | return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); | |
603 | } | |
09285af7 | 604 | #endif /* #ifdef CONFIG_MOVABLE_NODE */ |
74d42d8f | 605 | |
d9713679 LJ |
606 | /* check which state of node_states will be changed when online memory */ |
607 | static void node_states_check_changes_online(unsigned long nr_pages, | |
608 | struct zone *zone, struct memory_notify *arg) | |
609 | { | |
610 | int nid = zone_to_nid(zone); | |
611 | enum zone_type zone_last = ZONE_NORMAL; | |
612 | ||
613 | /* | |
6715ddf9 LJ |
614 | * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] |
615 | * contains nodes which have zones of 0...ZONE_NORMAL, | |
616 | * set zone_last to ZONE_NORMAL. | |
d9713679 | 617 | * |
6715ddf9 LJ |
618 | * If we don't have HIGHMEM nor movable node, |
619 | * node_states[N_NORMAL_MEMORY] contains nodes which have zones of | |
620 | * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. | |
d9713679 | 621 | */ |
6715ddf9 | 622 | if (N_MEMORY == N_NORMAL_MEMORY) |
d9713679 LJ |
623 | zone_last = ZONE_MOVABLE; |
624 | ||
625 | /* | |
626 | * if the memory to be online is in a zone of 0...zone_last, and | |
627 | * the zones of 0...zone_last don't have memory before online, we will | |
628 | * need to set the node to node_states[N_NORMAL_MEMORY] after | |
629 | * the memory is online. | |
630 | */ | |
631 | if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) | |
632 | arg->status_change_nid_normal = nid; | |
633 | else | |
634 | arg->status_change_nid_normal = -1; | |
635 | ||
6715ddf9 LJ |
636 | #ifdef CONFIG_HIGHMEM |
637 | /* | |
638 | * If we have movable node, node_states[N_HIGH_MEMORY] | |
639 | * contains nodes which have zones of 0...ZONE_HIGHMEM, | |
640 | * set zone_last to ZONE_HIGHMEM. | |
641 | * | |
642 | * If we don't have movable node, node_states[N_NORMAL_MEMORY] | |
643 | * contains nodes which have zones of 0...ZONE_MOVABLE, | |
644 | * set zone_last to ZONE_MOVABLE. | |
645 | */ | |
646 | zone_last = ZONE_HIGHMEM; | |
647 | if (N_MEMORY == N_HIGH_MEMORY) | |
648 | zone_last = ZONE_MOVABLE; | |
649 | ||
650 | if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) | |
651 | arg->status_change_nid_high = nid; | |
652 | else | |
653 | arg->status_change_nid_high = -1; | |
654 | #else | |
655 | arg->status_change_nid_high = arg->status_change_nid_normal; | |
656 | #endif | |
657 | ||
d9713679 LJ |
658 | /* |
659 | * if the node don't have memory befor online, we will need to | |
6715ddf9 | 660 | * set the node to node_states[N_MEMORY] after the memory |
d9713679 LJ |
661 | * is online. |
662 | */ | |
6715ddf9 | 663 | if (!node_state(nid, N_MEMORY)) |
d9713679 LJ |
664 | arg->status_change_nid = nid; |
665 | else | |
666 | arg->status_change_nid = -1; | |
667 | } | |
668 | ||
669 | static void node_states_set_node(int node, struct memory_notify *arg) | |
670 | { | |
671 | if (arg->status_change_nid_normal >= 0) | |
672 | node_set_state(node, N_NORMAL_MEMORY); | |
673 | ||
6715ddf9 LJ |
674 | if (arg->status_change_nid_high >= 0) |
675 | node_set_state(node, N_HIGH_MEMORY); | |
676 | ||
677 | node_set_state(node, N_MEMORY); | |
d9713679 LJ |
678 | } |
679 | ||
75884fb1 | 680 | |
511c2aba | 681 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) |
75884fb1 | 682 | { |
3947be19 DH |
683 | unsigned long onlined_pages = 0; |
684 | struct zone *zone; | |
6811378e | 685 | int need_zonelists_rebuild = 0; |
7b78d335 YG |
686 | int nid; |
687 | int ret; | |
688 | struct memory_notify arg; | |
689 | ||
925268a0 | 690 | lock_memory_hotplug(); |
d9713679 LJ |
691 | /* |
692 | * This doesn't need a lock to do pfn_to_page(). | |
693 | * The section can't be removed here because of the | |
694 | * memory_block->state_mutex. | |
695 | */ | |
696 | zone = page_zone(pfn_to_page(pfn)); | |
697 | ||
74d42d8f LJ |
698 | if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && |
699 | !can_online_high_movable(zone)) { | |
700 | unlock_memory_hotplug(); | |
701 | return -1; | |
702 | } | |
703 | ||
511c2aba LJ |
704 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { |
705 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { | |
706 | unlock_memory_hotplug(); | |
707 | return -1; | |
708 | } | |
709 | } | |
710 | if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { | |
711 | if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { | |
712 | unlock_memory_hotplug(); | |
713 | return -1; | |
714 | } | |
715 | } | |
716 | ||
717 | /* Previous code may changed the zone of the pfn range */ | |
718 | zone = page_zone(pfn_to_page(pfn)); | |
719 | ||
7b78d335 YG |
720 | arg.start_pfn = pfn; |
721 | arg.nr_pages = nr_pages; | |
d9713679 | 722 | node_states_check_changes_online(nr_pages, zone, &arg); |
7b78d335 YG |
723 | |
724 | nid = page_to_nid(pfn_to_page(pfn)); | |
3947be19 | 725 | |
7b78d335 YG |
726 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
727 | ret = notifier_to_errno(ret); | |
728 | if (ret) { | |
729 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
925268a0 | 730 | unlock_memory_hotplug(); |
7b78d335 YG |
731 | return ret; |
732 | } | |
6811378e YG |
733 | /* |
734 | * If this zone is not populated, then it is not in zonelist. | |
735 | * This means the page allocator ignores this zone. | |
736 | * So, zonelist must be updated after online. | |
737 | */ | |
4eaf3f64 | 738 | mutex_lock(&zonelists_mutex); |
6dcd73d7 | 739 | if (!populated_zone(zone)) { |
6811378e | 740 | need_zonelists_rebuild = 1; |
6dcd73d7 WC |
741 | build_all_zonelists(NULL, zone); |
742 | } | |
6811378e | 743 | |
908eedc6 | 744 | ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, |
75884fb1 | 745 | online_pages_range); |
fd8a4221 | 746 | if (ret) { |
6dcd73d7 WC |
747 | if (need_zonelists_rebuild) |
748 | zone_pcp_reset(zone); | |
4eaf3f64 | 749 | mutex_unlock(&zonelists_mutex); |
a62e2f4f BH |
750 | printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", |
751 | (unsigned long long) pfn << PAGE_SHIFT, | |
752 | (((unsigned long long) pfn + nr_pages) | |
753 | << PAGE_SHIFT) - 1); | |
fd8a4221 | 754 | memory_notify(MEM_CANCEL_ONLINE, &arg); |
925268a0 | 755 | unlock_memory_hotplug(); |
fd8a4221 GL |
756 | return ret; |
757 | } | |
758 | ||
9feedc9d | 759 | zone->managed_pages += onlined_pages; |
3947be19 | 760 | zone->present_pages += onlined_pages; |
f2937be5 | 761 | zone->zone_pgdat->node_present_pages += onlined_pages; |
08dff7b7 | 762 | if (onlined_pages) { |
d9713679 | 763 | node_states_set_node(zone_to_nid(zone), &arg); |
08dff7b7 | 764 | if (need_zonelists_rebuild) |
6dcd73d7 | 765 | build_all_zonelists(NULL, NULL); |
08dff7b7 JL |
766 | else |
767 | zone_pcp_update(zone); | |
768 | } | |
3947be19 | 769 | |
4eaf3f64 | 770 | mutex_unlock(&zonelists_mutex); |
1b79acc9 KM |
771 | |
772 | init_per_zone_wmark_min(); | |
773 | ||
08dff7b7 | 774 | if (onlined_pages) |
7ea1530a | 775 | kswapd_run(zone_to_nid(zone)); |
61b13993 | 776 | |
1f522509 | 777 | vm_total_pages = nr_free_pagecache_pages(); |
2f7f24ec | 778 | |
2d1d43f6 | 779 | writeback_set_ratelimit(); |
7b78d335 YG |
780 | |
781 | if (onlined_pages) | |
782 | memory_notify(MEM_ONLINE, &arg); | |
925268a0 | 783 | unlock_memory_hotplug(); |
7b78d335 | 784 | |
3947be19 DH |
785 | return 0; |
786 | } | |
53947027 | 787 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 788 | |
e1319331 HS |
789 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
790 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |
9af3c2de YG |
791 | { |
792 | struct pglist_data *pgdat; | |
793 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | |
794 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | |
795 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
796 | ||
797 | pgdat = arch_alloc_nodedata(nid); | |
798 | if (!pgdat) | |
799 | return NULL; | |
800 | ||
801 | arch_refresh_nodedata(nid, pgdat); | |
802 | ||
803 | /* we can use NODE_DATA(nid) from here */ | |
804 | ||
805 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
9109fb7b | 806 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
9af3c2de | 807 | |
959ecc48 KH |
808 | /* |
809 | * The node we allocated has no zone fallback lists. For avoiding | |
810 | * to access not-initialized zonelist, build here. | |
811 | */ | |
f957db4f | 812 | mutex_lock(&zonelists_mutex); |
9adb62a5 | 813 | build_all_zonelists(pgdat, NULL); |
f957db4f | 814 | mutex_unlock(&zonelists_mutex); |
959ecc48 | 815 | |
9af3c2de YG |
816 | return pgdat; |
817 | } | |
818 | ||
819 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |
820 | { | |
821 | arch_refresh_nodedata(nid, NULL); | |
822 | arch_free_nodedata(pgdat); | |
823 | return; | |
824 | } | |
825 | ||
0a547039 | 826 | |
cf23422b | 827 | /* |
828 | * called by cpu_up() to online a node without onlined memory. | |
829 | */ | |
830 | int mem_online_node(int nid) | |
831 | { | |
832 | pg_data_t *pgdat; | |
833 | int ret; | |
834 | ||
20d6c96b | 835 | lock_memory_hotplug(); |
cf23422b | 836 | pgdat = hotadd_new_pgdat(nid, 0); |
7553e8f2 | 837 | if (!pgdat) { |
cf23422b | 838 | ret = -ENOMEM; |
839 | goto out; | |
840 | } | |
841 | node_set_online(nid); | |
842 | ret = register_one_node(nid); | |
843 | BUG_ON(ret); | |
844 | ||
845 | out: | |
20d6c96b | 846 | unlock_memory_hotplug(); |
cf23422b | 847 | return ret; |
848 | } | |
849 | ||
31168481 AV |
850 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
851 | int __ref add_memory(int nid, u64 start, u64 size) | |
bc02af93 | 852 | { |
9af3c2de YG |
853 | pg_data_t *pgdat = NULL; |
854 | int new_pgdat = 0; | |
ebd15302 | 855 | struct resource *res; |
bc02af93 YG |
856 | int ret; |
857 | ||
20d6c96b | 858 | lock_memory_hotplug(); |
6ad696d2 | 859 | |
ebd15302 | 860 | res = register_memory_resource(start, size); |
6ad696d2 | 861 | ret = -EEXIST; |
ebd15302 | 862 | if (!res) |
6ad696d2 | 863 | goto out; |
ebd15302 | 864 | |
9af3c2de YG |
865 | if (!node_online(nid)) { |
866 | pgdat = hotadd_new_pgdat(nid, start); | |
6ad696d2 | 867 | ret = -ENOMEM; |
9af3c2de | 868 | if (!pgdat) |
41b9e2d7 | 869 | goto error; |
9af3c2de | 870 | new_pgdat = 1; |
9af3c2de YG |
871 | } |
872 | ||
bc02af93 YG |
873 | /* call arch's memory hotadd */ |
874 | ret = arch_add_memory(nid, start, size); | |
875 | ||
9af3c2de YG |
876 | if (ret < 0) |
877 | goto error; | |
878 | ||
0fc44159 | 879 | /* we online node here. we can't roll back from here. */ |
9af3c2de YG |
880 | node_set_online(nid); |
881 | ||
0fc44159 YG |
882 | if (new_pgdat) { |
883 | ret = register_one_node(nid); | |
884 | /* | |
885 | * If sysfs file of new node can't create, cpu on the node | |
886 | * can't be hot-added. There is no rollback way now. | |
887 | * So, check by BUG_ON() to catch it reluctantly.. | |
888 | */ | |
889 | BUG_ON(ret); | |
890 | } | |
891 | ||
d96ae530 AM |
892 | /* create new memmap entry */ |
893 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
894 | ||
6ad696d2 AK |
895 | goto out; |
896 | ||
9af3c2de YG |
897 | error: |
898 | /* rollback pgdat allocation and others */ | |
899 | if (new_pgdat) | |
900 | rollback_node_hotadd(nid, pgdat); | |
ebd15302 KH |
901 | if (res) |
902 | release_memory_resource(res); | |
9af3c2de | 903 | |
6ad696d2 | 904 | out: |
20d6c96b | 905 | unlock_memory_hotplug(); |
bc02af93 YG |
906 | return ret; |
907 | } | |
908 | EXPORT_SYMBOL_GPL(add_memory); | |
0c0e6195 KH |
909 | |
910 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
5c755e9f BP |
911 | /* |
912 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | |
913 | * set and the size of the free page is given by page_order(). Using this, | |
914 | * the function determines if the pageblock contains only free pages. | |
915 | * Due to buddy contraints, a free page at least the size of a pageblock will | |
916 | * be located at the start of the pageblock | |
917 | */ | |
918 | static inline int pageblock_free(struct page *page) | |
919 | { | |
920 | return PageBuddy(page) && page_order(page) >= pageblock_order; | |
921 | } | |
922 | ||
923 | /* Return the start of the next active pageblock after a given page */ | |
924 | static struct page *next_active_pageblock(struct page *page) | |
925 | { | |
5c755e9f BP |
926 | /* Ensure the starting page is pageblock-aligned */ |
927 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | |
928 | ||
5c755e9f | 929 | /* If the entire pageblock is free, move to the end of free page */ |
0dcc48c1 KH |
930 | if (pageblock_free(page)) { |
931 | int order; | |
932 | /* be careful. we don't have locks, page_order can be changed.*/ | |
933 | order = page_order(page); | |
934 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | |
935 | return page + (1 << order); | |
936 | } | |
5c755e9f | 937 | |
0dcc48c1 | 938 | return page + pageblock_nr_pages; |
5c755e9f BP |
939 | } |
940 | ||
941 | /* Checks if this range of memory is likely to be hot-removable. */ | |
942 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |
943 | { | |
5c755e9f BP |
944 | struct page *page = pfn_to_page(start_pfn); |
945 | struct page *end_page = page + nr_pages; | |
946 | ||
947 | /* Check the starting page of each pageblock within the range */ | |
948 | for (; page < end_page; page = next_active_pageblock(page)) { | |
49ac8255 | 949 | if (!is_pageblock_removable_nolock(page)) |
5c755e9f | 950 | return 0; |
49ac8255 | 951 | cond_resched(); |
5c755e9f BP |
952 | } |
953 | ||
954 | /* All pageblocks in the memory block are likely to be hot-removable */ | |
955 | return 1; | |
956 | } | |
957 | ||
0c0e6195 KH |
958 | /* |
959 | * Confirm all pages in a range [start, end) is belongs to the same zone. | |
960 | */ | |
961 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |
962 | { | |
963 | unsigned long pfn; | |
964 | struct zone *zone = NULL; | |
965 | struct page *page; | |
966 | int i; | |
967 | for (pfn = start_pfn; | |
968 | pfn < end_pfn; | |
969 | pfn += MAX_ORDER_NR_PAGES) { | |
970 | i = 0; | |
971 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
972 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | |
973 | i++; | |
974 | if (i == MAX_ORDER_NR_PAGES) | |
975 | continue; | |
976 | page = pfn_to_page(pfn + i); | |
977 | if (zone && page_zone(page) != zone) | |
978 | return 0; | |
979 | zone = page_zone(page); | |
980 | } | |
981 | return 1; | |
982 | } | |
983 | ||
984 | /* | |
985 | * Scanning pfn is much easier than scanning lru list. | |
986 | * Scan pfn from start to end and Find LRU page. | |
987 | */ | |
7bbc0905 | 988 | static unsigned long scan_lru_pages(unsigned long start, unsigned long end) |
0c0e6195 KH |
989 | { |
990 | unsigned long pfn; | |
991 | struct page *page; | |
992 | for (pfn = start; pfn < end; pfn++) { | |
993 | if (pfn_valid(pfn)) { | |
994 | page = pfn_to_page(pfn); | |
995 | if (PageLRU(page)) | |
996 | return pfn; | |
997 | } | |
998 | } | |
999 | return 0; | |
1000 | } | |
1001 | ||
0c0e6195 KH |
1002 | #define NR_OFFLINE_AT_ONCE_PAGES (256) |
1003 | static int | |
1004 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1005 | { | |
1006 | unsigned long pfn; | |
1007 | struct page *page; | |
1008 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | |
1009 | int not_managed = 0; | |
1010 | int ret = 0; | |
1011 | LIST_HEAD(source); | |
1012 | ||
1013 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | |
1014 | if (!pfn_valid(pfn)) | |
1015 | continue; | |
1016 | page = pfn_to_page(pfn); | |
700c2a46 | 1017 | if (!get_page_unless_zero(page)) |
0c0e6195 KH |
1018 | continue; |
1019 | /* | |
1020 | * We can skip free pages. And we can only deal with pages on | |
1021 | * LRU. | |
1022 | */ | |
62695a84 | 1023 | ret = isolate_lru_page(page); |
0c0e6195 | 1024 | if (!ret) { /* Success */ |
700c2a46 | 1025 | put_page(page); |
62695a84 | 1026 | list_add_tail(&page->lru, &source); |
0c0e6195 | 1027 | move_pages--; |
6d9c285a KM |
1028 | inc_zone_page_state(page, NR_ISOLATED_ANON + |
1029 | page_is_file_cache(page)); | |
1030 | ||
0c0e6195 | 1031 | } else { |
0c0e6195 | 1032 | #ifdef CONFIG_DEBUG_VM |
718a3821 WF |
1033 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", |
1034 | pfn); | |
1035 | dump_page(page); | |
0c0e6195 | 1036 | #endif |
700c2a46 | 1037 | put_page(page); |
25985edc | 1038 | /* Because we don't have big zone->lock. we should |
809c4449 BL |
1039 | check this again here. */ |
1040 | if (page_count(page)) { | |
1041 | not_managed++; | |
f3ab2636 | 1042 | ret = -EBUSY; |
809c4449 BL |
1043 | break; |
1044 | } | |
0c0e6195 KH |
1045 | } |
1046 | } | |
f3ab2636 BL |
1047 | if (!list_empty(&source)) { |
1048 | if (not_managed) { | |
1049 | putback_lru_pages(&source); | |
1050 | goto out; | |
1051 | } | |
74c08f98 MK |
1052 | |
1053 | /* | |
1054 | * alloc_migrate_target should be improooooved!! | |
1055 | * migrate_pages returns # of failed pages. | |
1056 | */ | |
1057 | ret = migrate_pages(&source, alloc_migrate_target, 0, | |
7b2a2d4a MG |
1058 | true, MIGRATE_SYNC, |
1059 | MR_MEMORY_HOTPLUG); | |
f3ab2636 | 1060 | if (ret) |
0c0e6195 | 1061 | putback_lru_pages(&source); |
0c0e6195 | 1062 | } |
0c0e6195 KH |
1063 | out: |
1064 | return ret; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * remove from free_area[] and mark all as Reserved. | |
1069 | */ | |
1070 | static int | |
1071 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
1072 | void *data) | |
1073 | { | |
1074 | __offline_isolated_pages(start, start + nr_pages); | |
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | static void | |
1079 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |
1080 | { | |
908eedc6 | 1081 | walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, |
0c0e6195 KH |
1082 | offline_isolated_pages_cb); |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * Check all pages in range, recoreded as memory resource, are isolated. | |
1087 | */ | |
1088 | static int | |
1089 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
1090 | void *data) | |
1091 | { | |
1092 | int ret; | |
1093 | long offlined = *(long *)data; | |
b023f468 | 1094 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); |
0c0e6195 KH |
1095 | offlined = nr_pages; |
1096 | if (!ret) | |
1097 | *(long *)data += offlined; | |
1098 | return ret; | |
1099 | } | |
1100 | ||
1101 | static long | |
1102 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
1103 | { | |
1104 | long offlined = 0; | |
1105 | int ret; | |
1106 | ||
908eedc6 | 1107 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, |
0c0e6195 KH |
1108 | check_pages_isolated_cb); |
1109 | if (ret < 0) | |
1110 | offlined = (long)ret; | |
1111 | return offlined; | |
1112 | } | |
1113 | ||
09285af7 LJ |
1114 | #ifdef CONFIG_MOVABLE_NODE |
1115 | /* when CONFIG_MOVABLE_NODE, we allow online node don't have normal memory */ | |
1116 | static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) | |
1117 | { | |
1118 | return true; | |
1119 | } | |
1120 | #else /* #ifdef CONFIG_MOVABLE_NODE */ | |
74d42d8f LJ |
1121 | /* ensure the node has NORMAL memory if it is still online */ |
1122 | static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) | |
1123 | { | |
1124 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1125 | unsigned long present_pages = 0; | |
1126 | enum zone_type zt; | |
1127 | ||
1128 | for (zt = 0; zt <= ZONE_NORMAL; zt++) | |
1129 | present_pages += pgdat->node_zones[zt].present_pages; | |
1130 | ||
1131 | if (present_pages > nr_pages) | |
1132 | return true; | |
1133 | ||
1134 | present_pages = 0; | |
1135 | for (; zt <= ZONE_MOVABLE; zt++) | |
1136 | present_pages += pgdat->node_zones[zt].present_pages; | |
1137 | ||
1138 | /* | |
1139 | * we can't offline the last normal memory until all | |
1140 | * higher memory is offlined. | |
1141 | */ | |
1142 | return present_pages == 0; | |
1143 | } | |
09285af7 | 1144 | #endif /* #ifdef CONFIG_MOVABLE_NODE */ |
74d42d8f | 1145 | |
d9713679 LJ |
1146 | /* check which state of node_states will be changed when offline memory */ |
1147 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1148 | struct zone *zone, struct memory_notify *arg) | |
1149 | { | |
1150 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1151 | unsigned long present_pages = 0; | |
1152 | enum zone_type zt, zone_last = ZONE_NORMAL; | |
1153 | ||
1154 | /* | |
6715ddf9 LJ |
1155 | * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] |
1156 | * contains nodes which have zones of 0...ZONE_NORMAL, | |
1157 | * set zone_last to ZONE_NORMAL. | |
d9713679 | 1158 | * |
6715ddf9 LJ |
1159 | * If we don't have HIGHMEM nor movable node, |
1160 | * node_states[N_NORMAL_MEMORY] contains nodes which have zones of | |
1161 | * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. | |
d9713679 | 1162 | */ |
6715ddf9 | 1163 | if (N_MEMORY == N_NORMAL_MEMORY) |
d9713679 LJ |
1164 | zone_last = ZONE_MOVABLE; |
1165 | ||
1166 | /* | |
1167 | * check whether node_states[N_NORMAL_MEMORY] will be changed. | |
1168 | * If the memory to be offline is in a zone of 0...zone_last, | |
1169 | * and it is the last present memory, 0...zone_last will | |
1170 | * become empty after offline , thus we can determind we will | |
1171 | * need to clear the node from node_states[N_NORMAL_MEMORY]. | |
1172 | */ | |
1173 | for (zt = 0; zt <= zone_last; zt++) | |
1174 | present_pages += pgdat->node_zones[zt].present_pages; | |
1175 | if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) | |
1176 | arg->status_change_nid_normal = zone_to_nid(zone); | |
1177 | else | |
1178 | arg->status_change_nid_normal = -1; | |
1179 | ||
6715ddf9 LJ |
1180 | #ifdef CONFIG_HIGHMEM |
1181 | /* | |
1182 | * If we have movable node, node_states[N_HIGH_MEMORY] | |
1183 | * contains nodes which have zones of 0...ZONE_HIGHMEM, | |
1184 | * set zone_last to ZONE_HIGHMEM. | |
1185 | * | |
1186 | * If we don't have movable node, node_states[N_NORMAL_MEMORY] | |
1187 | * contains nodes which have zones of 0...ZONE_MOVABLE, | |
1188 | * set zone_last to ZONE_MOVABLE. | |
1189 | */ | |
1190 | zone_last = ZONE_HIGHMEM; | |
1191 | if (N_MEMORY == N_HIGH_MEMORY) | |
1192 | zone_last = ZONE_MOVABLE; | |
1193 | ||
1194 | for (; zt <= zone_last; zt++) | |
1195 | present_pages += pgdat->node_zones[zt].present_pages; | |
1196 | if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) | |
1197 | arg->status_change_nid_high = zone_to_nid(zone); | |
1198 | else | |
1199 | arg->status_change_nid_high = -1; | |
1200 | #else | |
1201 | arg->status_change_nid_high = arg->status_change_nid_normal; | |
1202 | #endif | |
1203 | ||
d9713679 LJ |
1204 | /* |
1205 | * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE | |
1206 | */ | |
1207 | zone_last = ZONE_MOVABLE; | |
1208 | ||
1209 | /* | |
1210 | * check whether node_states[N_HIGH_MEMORY] will be changed | |
1211 | * If we try to offline the last present @nr_pages from the node, | |
1212 | * we can determind we will need to clear the node from | |
1213 | * node_states[N_HIGH_MEMORY]. | |
1214 | */ | |
1215 | for (; zt <= zone_last; zt++) | |
1216 | present_pages += pgdat->node_zones[zt].present_pages; | |
1217 | if (nr_pages >= present_pages) | |
1218 | arg->status_change_nid = zone_to_nid(zone); | |
1219 | else | |
1220 | arg->status_change_nid = -1; | |
1221 | } | |
1222 | ||
1223 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1224 | { | |
1225 | if (arg->status_change_nid_normal >= 0) | |
1226 | node_clear_state(node, N_NORMAL_MEMORY); | |
1227 | ||
6715ddf9 LJ |
1228 | if ((N_MEMORY != N_NORMAL_MEMORY) && |
1229 | (arg->status_change_nid_high >= 0)) | |
d9713679 | 1230 | node_clear_state(node, N_HIGH_MEMORY); |
6715ddf9 LJ |
1231 | |
1232 | if ((N_MEMORY != N_HIGH_MEMORY) && | |
1233 | (arg->status_change_nid >= 0)) | |
1234 | node_clear_state(node, N_MEMORY); | |
d9713679 LJ |
1235 | } |
1236 | ||
a16cee10 | 1237 | static int __ref __offline_pages(unsigned long start_pfn, |
0c0e6195 KH |
1238 | unsigned long end_pfn, unsigned long timeout) |
1239 | { | |
1240 | unsigned long pfn, nr_pages, expire; | |
1241 | long offlined_pages; | |
7b78d335 | 1242 | int ret, drain, retry_max, node; |
0c0e6195 | 1243 | struct zone *zone; |
7b78d335 | 1244 | struct memory_notify arg; |
0c0e6195 KH |
1245 | |
1246 | BUG_ON(start_pfn >= end_pfn); | |
1247 | /* at least, alignment against pageblock is necessary */ | |
1248 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | |
1249 | return -EINVAL; | |
1250 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | |
1251 | return -EINVAL; | |
1252 | /* This makes hotplug much easier...and readable. | |
1253 | we assume this for now. .*/ | |
1254 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | |
1255 | return -EINVAL; | |
7b78d335 | 1256 | |
20d6c96b | 1257 | lock_memory_hotplug(); |
6ad696d2 | 1258 | |
7b78d335 YG |
1259 | zone = page_zone(pfn_to_page(start_pfn)); |
1260 | node = zone_to_nid(zone); | |
1261 | nr_pages = end_pfn - start_pfn; | |
1262 | ||
74d42d8f LJ |
1263 | ret = -EINVAL; |
1264 | if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) | |
1265 | goto out; | |
1266 | ||
0c0e6195 | 1267 | /* set above range as isolated */ |
b023f468 WC |
1268 | ret = start_isolate_page_range(start_pfn, end_pfn, |
1269 | MIGRATE_MOVABLE, true); | |
0c0e6195 | 1270 | if (ret) |
6ad696d2 | 1271 | goto out; |
7b78d335 YG |
1272 | |
1273 | arg.start_pfn = start_pfn; | |
1274 | arg.nr_pages = nr_pages; | |
d9713679 | 1275 | node_states_check_changes_offline(nr_pages, zone, &arg); |
7b78d335 YG |
1276 | |
1277 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1278 | ret = notifier_to_errno(ret); | |
1279 | if (ret) | |
1280 | goto failed_removal; | |
1281 | ||
0c0e6195 KH |
1282 | pfn = start_pfn; |
1283 | expire = jiffies + timeout; | |
1284 | drain = 0; | |
1285 | retry_max = 5; | |
1286 | repeat: | |
1287 | /* start memory hot removal */ | |
1288 | ret = -EAGAIN; | |
1289 | if (time_after(jiffies, expire)) | |
1290 | goto failed_removal; | |
1291 | ret = -EINTR; | |
1292 | if (signal_pending(current)) | |
1293 | goto failed_removal; | |
1294 | ret = 0; | |
1295 | if (drain) { | |
1296 | lru_add_drain_all(); | |
0c0e6195 | 1297 | cond_resched(); |
9f8f2172 | 1298 | drain_all_pages(); |
0c0e6195 KH |
1299 | } |
1300 | ||
1301 | pfn = scan_lru_pages(start_pfn, end_pfn); | |
1302 | if (pfn) { /* We have page on LRU */ | |
1303 | ret = do_migrate_range(pfn, end_pfn); | |
1304 | if (!ret) { | |
1305 | drain = 1; | |
1306 | goto repeat; | |
1307 | } else { | |
1308 | if (ret < 0) | |
1309 | if (--retry_max == 0) | |
1310 | goto failed_removal; | |
1311 | yield(); | |
1312 | drain = 1; | |
1313 | goto repeat; | |
1314 | } | |
1315 | } | |
b3834be5 | 1316 | /* drain all zone's lru pagevec, this is asynchronous... */ |
0c0e6195 | 1317 | lru_add_drain_all(); |
0c0e6195 | 1318 | yield(); |
b3834be5 | 1319 | /* drain pcp pages, this is synchronous. */ |
9f8f2172 | 1320 | drain_all_pages(); |
0c0e6195 KH |
1321 | /* check again */ |
1322 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | |
1323 | if (offlined_pages < 0) { | |
1324 | ret = -EBUSY; | |
1325 | goto failed_removal; | |
1326 | } | |
1327 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | |
b3834be5 | 1328 | /* Ok, all of our target is isolated. |
0c0e6195 KH |
1329 | We cannot do rollback at this point. */ |
1330 | offline_isolated_pages(start_pfn, end_pfn); | |
dbc0e4ce | 1331 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
0815f3d8 | 1332 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
0c0e6195 | 1333 | /* removal success */ |
9feedc9d | 1334 | zone->managed_pages -= offlined_pages; |
0c0e6195 KH |
1335 | zone->present_pages -= offlined_pages; |
1336 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
1337 | totalram_pages -= offlined_pages; | |
7b78d335 | 1338 | |
1b79acc9 KM |
1339 | init_per_zone_wmark_min(); |
1340 | ||
1e8537ba | 1341 | if (!populated_zone(zone)) { |
340175b7 | 1342 | zone_pcp_reset(zone); |
1e8537ba XQ |
1343 | mutex_lock(&zonelists_mutex); |
1344 | build_all_zonelists(NULL, NULL); | |
1345 | mutex_unlock(&zonelists_mutex); | |
1346 | } else | |
1347 | zone_pcp_update(zone); | |
340175b7 | 1348 | |
d9713679 LJ |
1349 | node_states_clear_node(node, &arg); |
1350 | if (arg.status_change_nid >= 0) | |
8fe23e05 | 1351 | kswapd_stop(node); |
bce7394a | 1352 | |
0c0e6195 KH |
1353 | vm_total_pages = nr_free_pagecache_pages(); |
1354 | writeback_set_ratelimit(); | |
7b78d335 YG |
1355 | |
1356 | memory_notify(MEM_OFFLINE, &arg); | |
20d6c96b | 1357 | unlock_memory_hotplug(); |
0c0e6195 KH |
1358 | return 0; |
1359 | ||
1360 | failed_removal: | |
a62e2f4f BH |
1361 | printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", |
1362 | (unsigned long long) start_pfn << PAGE_SHIFT, | |
1363 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); | |
7b78d335 | 1364 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
0c0e6195 | 1365 | /* pushback to free area */ |
0815f3d8 | 1366 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
7b78d335 | 1367 | |
6ad696d2 | 1368 | out: |
20d6c96b | 1369 | unlock_memory_hotplug(); |
0c0e6195 KH |
1370 | return ret; |
1371 | } | |
71088785 | 1372 | |
a16cee10 WC |
1373 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
1374 | { | |
1375 | return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); | |
1376 | } | |
1377 | ||
71088785 BP |
1378 | int remove_memory(u64 start, u64 size) |
1379 | { | |
e90bdb7f WC |
1380 | struct memory_block *mem = NULL; |
1381 | struct mem_section *section; | |
71088785 | 1382 | unsigned long start_pfn, end_pfn; |
e90bdb7f WC |
1383 | unsigned long pfn, section_nr; |
1384 | int ret; | |
71088785 BP |
1385 | |
1386 | start_pfn = PFN_DOWN(start); | |
1387 | end_pfn = start_pfn + PFN_DOWN(size); | |
e90bdb7f WC |
1388 | |
1389 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
1390 | section_nr = pfn_to_section_nr(pfn); | |
1391 | if (!present_section_nr(section_nr)) | |
1392 | continue; | |
1393 | ||
1394 | section = __nr_to_section(section_nr); | |
1395 | /* same memblock? */ | |
1396 | if (mem) | |
1397 | if ((section_nr >= mem->start_section_nr) && | |
1398 | (section_nr <= mem->end_section_nr)) | |
1399 | continue; | |
1400 | ||
1401 | mem = find_memory_block_hinted(section, mem); | |
1402 | if (!mem) | |
1403 | continue; | |
1404 | ||
1405 | ret = offline_memory_block(mem); | |
1406 | if (ret) { | |
1407 | kobject_put(&mem->dev.kobj); | |
1408 | return ret; | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | if (mem) | |
1413 | kobject_put(&mem->dev.kobj); | |
1414 | ||
1415 | return 0; | |
71088785 | 1416 | } |
48e94196 | 1417 | #else |
a16cee10 WC |
1418 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
1419 | { | |
1420 | return -EINVAL; | |
1421 | } | |
48e94196 KH |
1422 | int remove_memory(u64 start, u64 size) |
1423 | { | |
1424 | return -EINVAL; | |
1425 | } | |
0c0e6195 | 1426 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
71088785 | 1427 | EXPORT_SYMBOL_GPL(remove_memory); |