]>
Commit | Line | Data |
---|---|---|
3947be19 DH |
1 | /* |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
3947be19 DH |
7 | #include <linux/stddef.h> |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/pagevec.h> | |
2d1d43f6 | 16 | #include <linux/writeback.h> |
3947be19 DH |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memory_hotplug.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/vmalloc.h> | |
0a547039 | 24 | #include <linux/ioport.h> |
38837fc7 | 25 | #include <linux/cpuset.h> |
0c0e6195 KH |
26 | #include <linux/delay.h> |
27 | #include <linux/migrate.h> | |
28 | #include <linux/page-isolation.h> | |
3947be19 DH |
29 | |
30 | #include <asm/tlbflush.h> | |
31 | ||
45e0b78b KM |
32 | /* add this memory to iomem resource */ |
33 | static struct resource *register_memory_resource(u64 start, u64 size) | |
34 | { | |
35 | struct resource *res; | |
36 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
37 | BUG_ON(!res); | |
38 | ||
39 | res->name = "System RAM"; | |
40 | res->start = start; | |
41 | res->end = start + size - 1; | |
887c3cb1 | 42 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
45e0b78b KM |
43 | if (request_resource(&iomem_resource, res) < 0) { |
44 | printk("System RAM resource %llx - %llx cannot be added\n", | |
45 | (unsigned long long)res->start, (unsigned long long)res->end); | |
46 | kfree(res); | |
47 | res = NULL; | |
48 | } | |
49 | return res; | |
50 | } | |
51 | ||
52 | static void release_memory_resource(struct resource *res) | |
53 | { | |
54 | if (!res) | |
55 | return; | |
56 | release_resource(res); | |
57 | kfree(res); | |
58 | return; | |
59 | } | |
60 | ||
53947027 | 61 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
04753278 YG |
62 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
63 | static void get_page_bootmem(unsigned long info, struct page *page, int magic) | |
64 | { | |
65 | atomic_set(&page->_mapcount, magic); | |
66 | SetPagePrivate(page); | |
67 | set_page_private(page, info); | |
68 | atomic_inc(&page->_count); | |
69 | } | |
70 | ||
71 | void put_page_bootmem(struct page *page) | |
72 | { | |
73 | int magic; | |
74 | ||
75 | magic = atomic_read(&page->_mapcount); | |
76 | BUG_ON(magic >= -1); | |
77 | ||
78 | if (atomic_dec_return(&page->_count) == 1) { | |
79 | ClearPagePrivate(page); | |
80 | set_page_private(page, 0); | |
81 | reset_page_mapcount(page); | |
82 | __free_pages_bootmem(page, 0); | |
83 | } | |
84 | ||
85 | } | |
86 | ||
87 | void register_page_bootmem_info_section(unsigned long start_pfn) | |
88 | { | |
89 | unsigned long *usemap, mapsize, section_nr, i; | |
90 | struct mem_section *ms; | |
91 | struct page *page, *memmap; | |
92 | ||
93 | if (!pfn_valid(start_pfn)) | |
94 | return; | |
95 | ||
96 | section_nr = pfn_to_section_nr(start_pfn); | |
97 | ms = __nr_to_section(section_nr); | |
98 | ||
99 | /* Get section's memmap address */ | |
100 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
101 | ||
102 | /* | |
103 | * Get page for the memmap's phys address | |
104 | * XXX: need more consideration for sparse_vmemmap... | |
105 | */ | |
106 | page = virt_to_page(memmap); | |
107 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
108 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
109 | ||
110 | /* remember memmap's page */ | |
111 | for (i = 0; i < mapsize; i++, page++) | |
112 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
113 | ||
114 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
115 | page = virt_to_page(usemap); | |
116 | ||
117 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
118 | ||
119 | for (i = 0; i < mapsize; i++, page++) | |
120 | get_page_bootmem(section_nr, page, MIX_INFO); | |
121 | ||
122 | } | |
123 | ||
124 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
125 | { | |
126 | unsigned long i, pfn, end_pfn, nr_pages; | |
127 | int node = pgdat->node_id; | |
128 | struct page *page; | |
129 | struct zone *zone; | |
130 | ||
131 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
132 | page = virt_to_page(pgdat); | |
133 | ||
134 | for (i = 0; i < nr_pages; i++, page++) | |
135 | get_page_bootmem(node, page, NODE_INFO); | |
136 | ||
137 | zone = &pgdat->node_zones[0]; | |
138 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | |
139 | if (zone->wait_table) { | |
140 | nr_pages = zone->wait_table_hash_nr_entries | |
141 | * sizeof(wait_queue_head_t); | |
142 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | |
143 | page = virt_to_page(zone->wait_table); | |
144 | ||
145 | for (i = 0; i < nr_pages; i++, page++) | |
146 | get_page_bootmem(node, page, NODE_INFO); | |
147 | } | |
148 | } | |
149 | ||
150 | pfn = pgdat->node_start_pfn; | |
151 | end_pfn = pfn + pgdat->node_spanned_pages; | |
152 | ||
153 | /* register_section info */ | |
154 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | |
155 | register_page_bootmem_info_section(pfn); | |
156 | ||
157 | } | |
158 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
159 | ||
718127cc | 160 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
3947be19 DH |
161 | { |
162 | struct pglist_data *pgdat = zone->zone_pgdat; | |
163 | int nr_pages = PAGES_PER_SECTION; | |
164 | int nid = pgdat->node_id; | |
165 | int zone_type; | |
166 | ||
167 | zone_type = zone - pgdat->node_zones; | |
13466c84 | 168 | if (!zone->wait_table) { |
718127cc | 169 | int ret = 0; |
a2f3aa02 DH |
170 | ret = init_currently_empty_zone(zone, phys_start_pfn, |
171 | nr_pages, MEMMAP_HOTPLUG); | |
718127cc YG |
172 | if (ret < 0) |
173 | return ret; | |
174 | } | |
a2f3aa02 DH |
175 | memmap_init_zone(nr_pages, nid, zone_type, |
176 | phys_start_pfn, MEMMAP_HOTPLUG); | |
718127cc | 177 | return 0; |
3947be19 DH |
178 | } |
179 | ||
3947be19 DH |
180 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) |
181 | { | |
3947be19 | 182 | int nr_pages = PAGES_PER_SECTION; |
3947be19 DH |
183 | int ret; |
184 | ||
ebd15302 KH |
185 | if (pfn_valid(phys_start_pfn)) |
186 | return -EEXIST; | |
187 | ||
0b0acbec | 188 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
3947be19 DH |
189 | |
190 | if (ret < 0) | |
191 | return ret; | |
192 | ||
718127cc YG |
193 | ret = __add_zone(zone, phys_start_pfn); |
194 | ||
195 | if (ret < 0) | |
196 | return ret; | |
197 | ||
3947be19 DH |
198 | return register_new_memory(__pfn_to_section(phys_start_pfn)); |
199 | } | |
200 | ||
ea01ea93 BP |
201 | static int __remove_section(struct zone *zone, struct mem_section *ms) |
202 | { | |
203 | unsigned long flags; | |
204 | struct pglist_data *pgdat = zone->zone_pgdat; | |
205 | int ret = -EINVAL; | |
206 | ||
207 | if (!valid_section(ms)) | |
208 | return ret; | |
209 | ||
210 | ret = unregister_memory_section(ms); | |
211 | if (ret) | |
212 | return ret; | |
213 | ||
214 | pgdat_resize_lock(pgdat, &flags); | |
215 | sparse_remove_one_section(zone, ms); | |
216 | pgdat_resize_unlock(pgdat, &flags); | |
217 | return 0; | |
218 | } | |
219 | ||
3947be19 DH |
220 | /* |
221 | * Reasonably generic function for adding memory. It is | |
222 | * expected that archs that support memory hotplug will | |
223 | * call this function after deciding the zone to which to | |
224 | * add the new pages. | |
225 | */ | |
226 | int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |
227 | unsigned long nr_pages) | |
228 | { | |
229 | unsigned long i; | |
230 | int err = 0; | |
6f712711 KH |
231 | int start_sec, end_sec; |
232 | /* during initialize mem_map, align hot-added range to section */ | |
233 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
234 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
3947be19 | 235 | |
6f712711 KH |
236 | for (i = start_sec; i <= end_sec; i++) { |
237 | err = __add_section(zone, i << PFN_SECTION_SHIFT); | |
3947be19 | 238 | |
6f712711 | 239 | /* |
183ff22b | 240 | * EEXIST is finally dealt with by ioresource collision |
6f712711 KH |
241 | * check. see add_memory() => register_memory_resource() |
242 | * Warning will be printed if there is collision. | |
bed120c6 JS |
243 | */ |
244 | if (err && (err != -EEXIST)) | |
3947be19 | 245 | break; |
6f712711 | 246 | err = 0; |
3947be19 DH |
247 | } |
248 | ||
249 | return err; | |
250 | } | |
bed120c6 | 251 | EXPORT_SYMBOL_GPL(__add_pages); |
3947be19 | 252 | |
ea01ea93 BP |
253 | /** |
254 | * __remove_pages() - remove sections of pages from a zone | |
255 | * @zone: zone from which pages need to be removed | |
256 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
257 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
258 | * | |
259 | * Generic helper function to remove section mappings and sysfs entries | |
260 | * for the section of the memory we are removing. Caller needs to make | |
261 | * sure that pages are marked reserved and zones are adjust properly by | |
262 | * calling offline_pages(). | |
263 | */ | |
264 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
265 | unsigned long nr_pages) | |
266 | { | |
267 | unsigned long i, ret = 0; | |
268 | int sections_to_remove; | |
269 | ||
270 | /* | |
271 | * We can only remove entire sections | |
272 | */ | |
273 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
274 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
275 | ||
276 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | |
277 | ||
278 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | |
279 | for (i = 0; i < sections_to_remove; i++) { | |
280 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
281 | ret = __remove_section(zone, __pfn_to_section(pfn)); | |
282 | if (ret) | |
283 | break; | |
284 | } | |
285 | return ret; | |
286 | } | |
287 | EXPORT_SYMBOL_GPL(__remove_pages); | |
288 | ||
3947be19 DH |
289 | static void grow_zone_span(struct zone *zone, |
290 | unsigned long start_pfn, unsigned long end_pfn) | |
291 | { | |
292 | unsigned long old_zone_end_pfn; | |
293 | ||
294 | zone_span_writelock(zone); | |
295 | ||
296 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
297 | if (start_pfn < zone->zone_start_pfn) | |
298 | zone->zone_start_pfn = start_pfn; | |
299 | ||
25a6df95 YG |
300 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - |
301 | zone->zone_start_pfn; | |
3947be19 DH |
302 | |
303 | zone_span_writeunlock(zone); | |
304 | } | |
305 | ||
306 | static void grow_pgdat_span(struct pglist_data *pgdat, | |
307 | unsigned long start_pfn, unsigned long end_pfn) | |
308 | { | |
309 | unsigned long old_pgdat_end_pfn = | |
310 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
311 | ||
312 | if (start_pfn < pgdat->node_start_pfn) | |
313 | pgdat->node_start_pfn = start_pfn; | |
314 | ||
25a6df95 YG |
315 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - |
316 | pgdat->node_start_pfn; | |
3947be19 DH |
317 | } |
318 | ||
180c06ef JF |
319 | void online_page(struct page *page) |
320 | { | |
321 | totalram_pages++; | |
322 | num_physpages++; | |
323 | ||
324 | #ifdef CONFIG_HIGHMEM | |
325 | if (PageHighMem(page)) | |
326 | totalhigh_pages++; | |
327 | #endif | |
328 | ||
329 | #ifdef CONFIG_FLATMEM | |
330 | max_mapnr = max(page_to_pfn(page), max_mapnr); | |
331 | #endif | |
332 | ||
333 | ClearPageReserved(page); | |
334 | init_page_count(page); | |
335 | __free_page(page); | |
336 | } | |
337 | ||
75884fb1 KH |
338 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, |
339 | void *arg) | |
3947be19 DH |
340 | { |
341 | unsigned long i; | |
75884fb1 KH |
342 | unsigned long onlined_pages = *(unsigned long *)arg; |
343 | struct page *page; | |
344 | if (PageReserved(pfn_to_page(start_pfn))) | |
345 | for (i = 0; i < nr_pages; i++) { | |
346 | page = pfn_to_page(start_pfn + i); | |
347 | online_page(page); | |
348 | onlined_pages++; | |
349 | } | |
350 | *(unsigned long *)arg = onlined_pages; | |
351 | return 0; | |
352 | } | |
353 | ||
354 | ||
355 | int online_pages(unsigned long pfn, unsigned long nr_pages) | |
356 | { | |
3947be19 DH |
357 | unsigned long flags; |
358 | unsigned long onlined_pages = 0; | |
359 | struct zone *zone; | |
6811378e | 360 | int need_zonelists_rebuild = 0; |
7b78d335 YG |
361 | int nid; |
362 | int ret; | |
363 | struct memory_notify arg; | |
364 | ||
365 | arg.start_pfn = pfn; | |
366 | arg.nr_pages = nr_pages; | |
367 | arg.status_change_nid = -1; | |
368 | ||
369 | nid = page_to_nid(pfn_to_page(pfn)); | |
370 | if (node_present_pages(nid) == 0) | |
371 | arg.status_change_nid = nid; | |
3947be19 | 372 | |
7b78d335 YG |
373 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
374 | ret = notifier_to_errno(ret); | |
375 | if (ret) { | |
376 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
377 | return ret; | |
378 | } | |
3947be19 DH |
379 | /* |
380 | * This doesn't need a lock to do pfn_to_page(). | |
381 | * The section can't be removed here because of the | |
da19cbcf | 382 | * memory_block->state_mutex. |
3947be19 DH |
383 | */ |
384 | zone = page_zone(pfn_to_page(pfn)); | |
385 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
386 | grow_zone_span(zone, pfn, pfn + nr_pages); | |
387 | grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); | |
388 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
389 | ||
6811378e YG |
390 | /* |
391 | * If this zone is not populated, then it is not in zonelist. | |
392 | * This means the page allocator ignores this zone. | |
393 | * So, zonelist must be updated after online. | |
394 | */ | |
395 | if (!populated_zone(zone)) | |
396 | need_zonelists_rebuild = 1; | |
397 | ||
75884fb1 KH |
398 | walk_memory_resource(pfn, nr_pages, &onlined_pages, |
399 | online_pages_range); | |
3947be19 | 400 | zone->present_pages += onlined_pages; |
f2937be5 | 401 | zone->zone_pgdat->node_present_pages += onlined_pages; |
3947be19 | 402 | |
61b13993 | 403 | setup_per_zone_pages_min(); |
7ea1530a CL |
404 | if (onlined_pages) { |
405 | kswapd_run(zone_to_nid(zone)); | |
406 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | |
407 | } | |
61b13993 | 408 | |
6811378e YG |
409 | if (need_zonelists_rebuild) |
410 | build_all_zonelists(); | |
5a4d4361 | 411 | vm_total_pages = nr_free_pagecache_pages(); |
2d1d43f6 | 412 | writeback_set_ratelimit(); |
7b78d335 YG |
413 | |
414 | if (onlined_pages) | |
415 | memory_notify(MEM_ONLINE, &arg); | |
416 | ||
3947be19 DH |
417 | return 0; |
418 | } | |
53947027 | 419 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 420 | |
9af3c2de YG |
421 | static pg_data_t *hotadd_new_pgdat(int nid, u64 start) |
422 | { | |
423 | struct pglist_data *pgdat; | |
424 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | |
425 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | |
426 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
427 | ||
428 | pgdat = arch_alloc_nodedata(nid); | |
429 | if (!pgdat) | |
430 | return NULL; | |
431 | ||
432 | arch_refresh_nodedata(nid, pgdat); | |
433 | ||
434 | /* we can use NODE_DATA(nid) from here */ | |
435 | ||
436 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
437 | free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); | |
438 | ||
439 | return pgdat; | |
440 | } | |
441 | ||
442 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |
443 | { | |
444 | arch_refresh_nodedata(nid, NULL); | |
445 | arch_free_nodedata(pgdat); | |
446 | return; | |
447 | } | |
448 | ||
0a547039 | 449 | |
bc02af93 YG |
450 | int add_memory(int nid, u64 start, u64 size) |
451 | { | |
9af3c2de YG |
452 | pg_data_t *pgdat = NULL; |
453 | int new_pgdat = 0; | |
ebd15302 | 454 | struct resource *res; |
bc02af93 YG |
455 | int ret; |
456 | ||
ebd15302 KH |
457 | res = register_memory_resource(start, size); |
458 | if (!res) | |
459 | return -EEXIST; | |
460 | ||
9af3c2de YG |
461 | if (!node_online(nid)) { |
462 | pgdat = hotadd_new_pgdat(nid, start); | |
463 | if (!pgdat) | |
464 | return -ENOMEM; | |
465 | new_pgdat = 1; | |
9af3c2de YG |
466 | } |
467 | ||
bc02af93 YG |
468 | /* call arch's memory hotadd */ |
469 | ret = arch_add_memory(nid, start, size); | |
470 | ||
9af3c2de YG |
471 | if (ret < 0) |
472 | goto error; | |
473 | ||
0fc44159 | 474 | /* we online node here. we can't roll back from here. */ |
9af3c2de YG |
475 | node_set_online(nid); |
476 | ||
38837fc7 PJ |
477 | cpuset_track_online_nodes(); |
478 | ||
0fc44159 YG |
479 | if (new_pgdat) { |
480 | ret = register_one_node(nid); | |
481 | /* | |
482 | * If sysfs file of new node can't create, cpu on the node | |
483 | * can't be hot-added. There is no rollback way now. | |
484 | * So, check by BUG_ON() to catch it reluctantly.. | |
485 | */ | |
486 | BUG_ON(ret); | |
487 | } | |
488 | ||
9af3c2de YG |
489 | return ret; |
490 | error: | |
491 | /* rollback pgdat allocation and others */ | |
492 | if (new_pgdat) | |
493 | rollback_node_hotadd(nid, pgdat); | |
ebd15302 KH |
494 | if (res) |
495 | release_memory_resource(res); | |
9af3c2de | 496 | |
bc02af93 YG |
497 | return ret; |
498 | } | |
499 | EXPORT_SYMBOL_GPL(add_memory); | |
0c0e6195 KH |
500 | |
501 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
502 | /* | |
503 | * Confirm all pages in a range [start, end) is belongs to the same zone. | |
504 | */ | |
505 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |
506 | { | |
507 | unsigned long pfn; | |
508 | struct zone *zone = NULL; | |
509 | struct page *page; | |
510 | int i; | |
511 | for (pfn = start_pfn; | |
512 | pfn < end_pfn; | |
513 | pfn += MAX_ORDER_NR_PAGES) { | |
514 | i = 0; | |
515 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
516 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | |
517 | i++; | |
518 | if (i == MAX_ORDER_NR_PAGES) | |
519 | continue; | |
520 | page = pfn_to_page(pfn + i); | |
521 | if (zone && page_zone(page) != zone) | |
522 | return 0; | |
523 | zone = page_zone(page); | |
524 | } | |
525 | return 1; | |
526 | } | |
527 | ||
528 | /* | |
529 | * Scanning pfn is much easier than scanning lru list. | |
530 | * Scan pfn from start to end and Find LRU page. | |
531 | */ | |
532 | int scan_lru_pages(unsigned long start, unsigned long end) | |
533 | { | |
534 | unsigned long pfn; | |
535 | struct page *page; | |
536 | for (pfn = start; pfn < end; pfn++) { | |
537 | if (pfn_valid(pfn)) { | |
538 | page = pfn_to_page(pfn); | |
539 | if (PageLRU(page)) | |
540 | return pfn; | |
541 | } | |
542 | } | |
543 | return 0; | |
544 | } | |
545 | ||
546 | static struct page * | |
547 | hotremove_migrate_alloc(struct page *page, | |
548 | unsigned long private, | |
549 | int **x) | |
550 | { | |
551 | /* This should be improoooooved!! */ | |
552 | return alloc_page(GFP_HIGHUSER_PAGECACHE); | |
553 | } | |
554 | ||
555 | ||
556 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | |
557 | static int | |
558 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
559 | { | |
560 | unsigned long pfn; | |
561 | struct page *page; | |
562 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | |
563 | int not_managed = 0; | |
564 | int ret = 0; | |
565 | LIST_HEAD(source); | |
566 | ||
567 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | |
568 | if (!pfn_valid(pfn)) | |
569 | continue; | |
570 | page = pfn_to_page(pfn); | |
571 | if (!page_count(page)) | |
572 | continue; | |
573 | /* | |
574 | * We can skip free pages. And we can only deal with pages on | |
575 | * LRU. | |
576 | */ | |
577 | ret = isolate_lru_page(page, &source); | |
578 | if (!ret) { /* Success */ | |
579 | move_pages--; | |
580 | } else { | |
581 | /* Becasue we don't have big zone->lock. we should | |
582 | check this again here. */ | |
583 | if (page_count(page)) | |
584 | not_managed++; | |
585 | #ifdef CONFIG_DEBUG_VM | |
586 | printk(KERN_INFO "removing from LRU failed" | |
587 | " %lx/%d/%lx\n", | |
588 | pfn, page_count(page), page->flags); | |
589 | #endif | |
590 | } | |
591 | } | |
592 | ret = -EBUSY; | |
593 | if (not_managed) { | |
594 | if (!list_empty(&source)) | |
595 | putback_lru_pages(&source); | |
596 | goto out; | |
597 | } | |
598 | ret = 0; | |
599 | if (list_empty(&source)) | |
600 | goto out; | |
601 | /* this function returns # of failed pages */ | |
602 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0); | |
603 | ||
604 | out: | |
605 | return ret; | |
606 | } | |
607 | ||
608 | /* | |
609 | * remove from free_area[] and mark all as Reserved. | |
610 | */ | |
611 | static int | |
612 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
613 | void *data) | |
614 | { | |
615 | __offline_isolated_pages(start, start + nr_pages); | |
616 | return 0; | |
617 | } | |
618 | ||
619 | static void | |
620 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |
621 | { | |
622 | walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, | |
623 | offline_isolated_pages_cb); | |
624 | } | |
625 | ||
626 | /* | |
627 | * Check all pages in range, recoreded as memory resource, are isolated. | |
628 | */ | |
629 | static int | |
630 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
631 | void *data) | |
632 | { | |
633 | int ret; | |
634 | long offlined = *(long *)data; | |
635 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); | |
636 | offlined = nr_pages; | |
637 | if (!ret) | |
638 | *(long *)data += offlined; | |
639 | return ret; | |
640 | } | |
641 | ||
642 | static long | |
643 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
644 | { | |
645 | long offlined = 0; | |
646 | int ret; | |
647 | ||
648 | ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, | |
649 | check_pages_isolated_cb); | |
650 | if (ret < 0) | |
651 | offlined = (long)ret; | |
652 | return offlined; | |
653 | } | |
654 | ||
0c0e6195 KH |
655 | int offline_pages(unsigned long start_pfn, |
656 | unsigned long end_pfn, unsigned long timeout) | |
657 | { | |
658 | unsigned long pfn, nr_pages, expire; | |
659 | long offlined_pages; | |
7b78d335 | 660 | int ret, drain, retry_max, node; |
0c0e6195 | 661 | struct zone *zone; |
7b78d335 | 662 | struct memory_notify arg; |
0c0e6195 KH |
663 | |
664 | BUG_ON(start_pfn >= end_pfn); | |
665 | /* at least, alignment against pageblock is necessary */ | |
666 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | |
667 | return -EINVAL; | |
668 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | |
669 | return -EINVAL; | |
670 | /* This makes hotplug much easier...and readable. | |
671 | we assume this for now. .*/ | |
672 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | |
673 | return -EINVAL; | |
7b78d335 YG |
674 | |
675 | zone = page_zone(pfn_to_page(start_pfn)); | |
676 | node = zone_to_nid(zone); | |
677 | nr_pages = end_pfn - start_pfn; | |
678 | ||
0c0e6195 KH |
679 | /* set above range as isolated */ |
680 | ret = start_isolate_page_range(start_pfn, end_pfn); | |
681 | if (ret) | |
682 | return ret; | |
7b78d335 YG |
683 | |
684 | arg.start_pfn = start_pfn; | |
685 | arg.nr_pages = nr_pages; | |
686 | arg.status_change_nid = -1; | |
687 | if (nr_pages >= node_present_pages(node)) | |
688 | arg.status_change_nid = node; | |
689 | ||
690 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
691 | ret = notifier_to_errno(ret); | |
692 | if (ret) | |
693 | goto failed_removal; | |
694 | ||
0c0e6195 KH |
695 | pfn = start_pfn; |
696 | expire = jiffies + timeout; | |
697 | drain = 0; | |
698 | retry_max = 5; | |
699 | repeat: | |
700 | /* start memory hot removal */ | |
701 | ret = -EAGAIN; | |
702 | if (time_after(jiffies, expire)) | |
703 | goto failed_removal; | |
704 | ret = -EINTR; | |
705 | if (signal_pending(current)) | |
706 | goto failed_removal; | |
707 | ret = 0; | |
708 | if (drain) { | |
709 | lru_add_drain_all(); | |
710 | flush_scheduled_work(); | |
711 | cond_resched(); | |
9f8f2172 | 712 | drain_all_pages(); |
0c0e6195 KH |
713 | } |
714 | ||
715 | pfn = scan_lru_pages(start_pfn, end_pfn); | |
716 | if (pfn) { /* We have page on LRU */ | |
717 | ret = do_migrate_range(pfn, end_pfn); | |
718 | if (!ret) { | |
719 | drain = 1; | |
720 | goto repeat; | |
721 | } else { | |
722 | if (ret < 0) | |
723 | if (--retry_max == 0) | |
724 | goto failed_removal; | |
725 | yield(); | |
726 | drain = 1; | |
727 | goto repeat; | |
728 | } | |
729 | } | |
730 | /* drain all zone's lru pagevec, this is asyncronous... */ | |
731 | lru_add_drain_all(); | |
732 | flush_scheduled_work(); | |
733 | yield(); | |
734 | /* drain pcp pages , this is synchrouns. */ | |
9f8f2172 | 735 | drain_all_pages(); |
0c0e6195 KH |
736 | /* check again */ |
737 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | |
738 | if (offlined_pages < 0) { | |
739 | ret = -EBUSY; | |
740 | goto failed_removal; | |
741 | } | |
742 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | |
743 | /* Ok, all of our target is islaoted. | |
744 | We cannot do rollback at this point. */ | |
745 | offline_isolated_pages(start_pfn, end_pfn); | |
dbc0e4ce KH |
746 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
747 | undo_isolate_page_range(start_pfn, end_pfn); | |
0c0e6195 | 748 | /* removal success */ |
0c0e6195 KH |
749 | zone->present_pages -= offlined_pages; |
750 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
751 | totalram_pages -= offlined_pages; | |
752 | num_physpages -= offlined_pages; | |
7b78d335 | 753 | |
0c0e6195 KH |
754 | vm_total_pages = nr_free_pagecache_pages(); |
755 | writeback_set_ratelimit(); | |
7b78d335 YG |
756 | |
757 | memory_notify(MEM_OFFLINE, &arg); | |
0c0e6195 KH |
758 | return 0; |
759 | ||
760 | failed_removal: | |
761 | printk(KERN_INFO "memory offlining %lx to %lx failed\n", | |
762 | start_pfn, end_pfn); | |
7b78d335 | 763 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
0c0e6195 KH |
764 | /* pushback to free area */ |
765 | undo_isolate_page_range(start_pfn, end_pfn); | |
7b78d335 | 766 | |
0c0e6195 KH |
767 | return ret; |
768 | } | |
48e94196 KH |
769 | #else |
770 | int remove_memory(u64 start, u64 size) | |
771 | { | |
772 | return -EINVAL; | |
773 | } | |
774 | EXPORT_SYMBOL_GPL(remove_memory); | |
0c0e6195 | 775 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |