]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/migrate.c
[PATCH] page migration cleanup: pass "mapping" to migration functions
[mirror_ubuntu-artful-kernel.git] / mm / migrate.c
CommitLineData
b20a3503
CL
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
e23ca00b 19#include <linux/buffer_head.h>
b20a3503
CL
20#include <linux/mm_inline.h>
21#include <linux/pagevec.h>
22#include <linux/rmap.h>
23#include <linux/topology.h>
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/swapops.h>
27
28#include "internal.h"
29
b20a3503
CL
30/* The maximum number of pages to take off the LRU for migration */
31#define MIGRATE_CHUNK_SIZE 256
32
33#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
34
35/*
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
38 *
39 * Result:
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
42 */
43int isolate_lru_page(struct page *page, struct list_head *pagelist)
44{
45 int ret = -EBUSY;
46
47 if (PageLRU(page)) {
48 struct zone *zone = page_zone(page);
49
50 spin_lock_irq(&zone->lru_lock);
51 if (PageLRU(page)) {
52 ret = 0;
53 get_page(page);
54 ClearPageLRU(page);
55 if (PageActive(page))
56 del_page_from_active_list(zone, page);
57 else
58 del_page_from_inactive_list(zone, page);
59 list_add_tail(&page->lru, pagelist);
60 }
61 spin_unlock_irq(&zone->lru_lock);
62 }
63 return ret;
64}
65
66/*
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
69 * to migrate_pages().
70 */
71int migrate_prep(void)
72{
73 /* Must have swap device for migration */
74 if (nr_swap_pages <= 0)
75 return -ENODEV;
76
77 /*
78 * Clear the LRU lists so pages can be isolated.
79 * Note that pages may be moved off the LRU after we have
80 * drained them. Those pages will fail to migrate like other
81 * pages that may be busy.
82 */
83 lru_add_drain_all();
84
85 return 0;
86}
87
88static inline void move_to_lru(struct page *page)
89{
90 list_del(&page->lru);
91 if (PageActive(page)) {
92 /*
93 * lru_cache_add_active checks that
94 * the PG_active bit is off.
95 */
96 ClearPageActive(page);
97 lru_cache_add_active(page);
98 } else {
99 lru_cache_add(page);
100 }
101 put_page(page);
102}
103
104/*
105 * Add isolated pages on the list back to the LRU.
106 *
107 * returns the number of pages put back.
108 */
109int putback_lru_pages(struct list_head *l)
110{
111 struct page *page;
112 struct page *page2;
113 int count = 0;
114
115 list_for_each_entry_safe(page, page2, l, lru) {
116 move_to_lru(page);
117 count++;
118 }
119 return count;
120}
121
b20a3503
CL
122/*
123 * swapout a single page
124 * page is locked upon entry, unlocked on exit
125 */
126static int swap_page(struct page *page)
127{
128 struct address_space *mapping = page_mapping(page);
129
130 if (page_mapped(page) && mapping)
131 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
132 goto unlock_retry;
133
134 if (PageDirty(page)) {
135 /* Page is dirty, try to write it out here */
136 switch(pageout(page, mapping)) {
137 case PAGE_KEEP:
138 case PAGE_ACTIVATE:
139 goto unlock_retry;
140
141 case PAGE_SUCCESS:
142 goto retry;
143
144 case PAGE_CLEAN:
145 ; /* try to free the page below */
146 }
147 }
148
149 if (PagePrivate(page)) {
150 if (!try_to_release_page(page, GFP_KERNEL) ||
151 (!mapping && page_count(page) == 1))
152 goto unlock_retry;
153 }
154
155 if (remove_mapping(mapping, page)) {
156 /* Success */
157 unlock_page(page);
158 return 0;
159 }
160
161unlock_retry:
162 unlock_page(page);
163
164retry:
165 return -EAGAIN;
166}
b20a3503
CL
167
168/*
c3fcf8a5 169 * Replace the page in the mapping.
5b5c7120
CL
170 *
171 * The number of remaining references must be:
172 * 1 for anonymous pages without a mapping
173 * 2 for pages with a mapping
174 * 3 for pages with a mapping and PagePrivate set.
b20a3503 175 */
2d1db3b1
CL
176static int migrate_page_move_mapping(struct address_space *mapping,
177 struct page *newpage, struct page *page)
b20a3503 178{
b20a3503
CL
179 struct page **radix_pointer;
180
b20a3503
CL
181 write_lock_irq(&mapping->tree_lock);
182
183 radix_pointer = (struct page **)radix_tree_lookup_slot(
184 &mapping->page_tree,
185 page_index(page));
186
5b5c7120
CL
187 if (!page_mapping(page) ||
188 page_count(page) != 2 + !!PagePrivate(page) ||
b20a3503
CL
189 *radix_pointer != page) {
190 write_unlock_irq(&mapping->tree_lock);
e23ca00b 191 return -EAGAIN;
b20a3503
CL
192 }
193
194 /*
195 * Now we know that no one else is looking at the page.
b20a3503
CL
196 */
197 get_page(newpage);
b20a3503
CL
198 if (PageSwapCache(page)) {
199 SetPageSwapCache(newpage);
200 set_page_private(newpage, page_private(page));
201 }
202
203 *radix_pointer = newpage;
204 __put_page(page);
205 write_unlock_irq(&mapping->tree_lock);
206
207 return 0;
208}
b20a3503
CL
209
210/*
211 * Copy the page to its new location
212 */
e7340f73 213static void migrate_page_copy(struct page *newpage, struct page *page)
b20a3503
CL
214{
215 copy_highpage(newpage, page);
216
217 if (PageError(page))
218 SetPageError(newpage);
219 if (PageReferenced(page))
220 SetPageReferenced(newpage);
221 if (PageUptodate(page))
222 SetPageUptodate(newpage);
223 if (PageActive(page))
224 SetPageActive(newpage);
225 if (PageChecked(page))
226 SetPageChecked(newpage);
227 if (PageMappedToDisk(page))
228 SetPageMappedToDisk(newpage);
229
230 if (PageDirty(page)) {
231 clear_page_dirty_for_io(page);
232 set_page_dirty(newpage);
233 }
234
235 ClearPageSwapCache(page);
236 ClearPageActive(page);
237 ClearPagePrivate(page);
238 set_page_private(page, 0);
239 page->mapping = NULL;
240
241 /*
242 * If any waiters have accumulated on the new page then
243 * wake them up.
244 */
245 if (PageWriteback(newpage))
246 end_page_writeback(newpage);
247}
b20a3503 248
1d8b85cc
CL
249/************************************************************
250 * Migration functions
251 ***********************************************************/
252
253/* Always fail migration. Used for mappings that are not movable */
2d1db3b1
CL
254int fail_migrate_page(struct address_space *mapping,
255 struct page *newpage, struct page *page)
1d8b85cc
CL
256{
257 return -EIO;
258}
259EXPORT_SYMBOL(fail_migrate_page);
260
b20a3503
CL
261/*
262 * Common logic to directly migrate a single page suitable for
263 * pages that do not use PagePrivate.
264 *
265 * Pages are locked upon entry and exit.
266 */
2d1db3b1
CL
267int migrate_page(struct address_space *mapping,
268 struct page *newpage, struct page *page)
b20a3503
CL
269{
270 int rc;
271
272 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
273
2d1db3b1 274 rc = migrate_page_move_mapping(mapping, newpage, page);
b20a3503
CL
275
276 if (rc)
277 return rc;
278
279 migrate_page_copy(newpage, page);
280
281 /*
282 * Remove auxiliary swap entries and replace
283 * them with real ptes.
284 *
285 * Note that a real pte entry will allow processes that are not
286 * waiting on the page lock to use the new page via the page tables
287 * before the new page is unlocked.
288 */
289 remove_from_swap(newpage);
290 return 0;
291}
292EXPORT_SYMBOL(migrate_page);
293
1d8b85cc
CL
294/*
295 * Migration function for pages with buffers. This function can only be used
296 * if the underlying filesystem guarantees that no other references to "page"
297 * exist.
298 */
2d1db3b1
CL
299int buffer_migrate_page(struct address_space *mapping,
300 struct page *newpage, struct page *page)
1d8b85cc 301{
1d8b85cc
CL
302 struct buffer_head *bh, *head;
303 int rc;
304
1d8b85cc 305 if (!page_has_buffers(page))
2d1db3b1 306 return migrate_page(mapping, newpage, page);
1d8b85cc
CL
307
308 head = page_buffers(page);
309
2d1db3b1 310 rc = migrate_page_move_mapping(mapping, newpage, page);
1d8b85cc
CL
311
312 if (rc)
313 return rc;
314
315 bh = head;
316 do {
317 get_bh(bh);
318 lock_buffer(bh);
319 bh = bh->b_this_page;
320
321 } while (bh != head);
322
323 ClearPagePrivate(page);
324 set_page_private(newpage, page_private(page));
325 set_page_private(page, 0);
326 put_page(page);
327 get_page(newpage);
328
329 bh = head;
330 do {
331 set_bh_page(bh, newpage, bh_offset(bh));
332 bh = bh->b_this_page;
333
334 } while (bh != head);
335
336 SetPagePrivate(newpage);
337
338 migrate_page_copy(newpage, page);
339
340 bh = head;
341 do {
342 unlock_buffer(bh);
343 put_bh(bh);
344 bh = bh->b_this_page;
345
346 } while (bh != head);
347
348 return 0;
349}
350EXPORT_SYMBOL(buffer_migrate_page);
351
b20a3503
CL
352/*
353 * migrate_pages
354 *
355 * Two lists are passed to this function. The first list
356 * contains the pages isolated from the LRU to be migrated.
357 * The second list contains new pages that the pages isolated
358 * can be moved to. If the second list is NULL then all
359 * pages are swapped out.
360 *
361 * The function returns after 10 attempts or if no pages
362 * are movable anymore because to has become empty
363 * or no retryable pages exist anymore.
364 *
365 * Return: Number of pages not migrated when "to" ran empty.
366 */
367int migrate_pages(struct list_head *from, struct list_head *to,
368 struct list_head *moved, struct list_head *failed)
369{
370 int retry;
371 int nr_failed = 0;
372 int pass = 0;
373 struct page *page;
374 struct page *page2;
375 int swapwrite = current->flags & PF_SWAPWRITE;
376 int rc;
377
378 if (!swapwrite)
379 current->flags |= PF_SWAPWRITE;
380
381redo:
382 retry = 0;
383
384 list_for_each_entry_safe(page, page2, from, lru) {
385 struct page *newpage = NULL;
386 struct address_space *mapping;
387
388 cond_resched();
389
390 rc = 0;
391 if (page_count(page) == 1)
392 /* page was freed from under us. So we are done. */
393 goto next;
394
395 if (to && list_empty(to))
396 break;
397
398 /*
399 * Skip locked pages during the first two passes to give the
400 * functions holding the lock time to release the page. Later we
401 * use lock_page() to have a higher chance of acquiring the
402 * lock.
403 */
404 rc = -EAGAIN;
405 if (pass > 2)
406 lock_page(page);
407 else
408 if (TestSetPageLocked(page))
409 goto next;
410
411 /*
412 * Only wait on writeback if we have already done a pass where
413 * we we may have triggered writeouts for lots of pages.
414 */
415 if (pass > 0) {
416 wait_on_page_writeback(page);
417 } else {
418 if (PageWriteback(page))
419 goto unlock_page;
420 }
421
422 /*
423 * Anonymous pages must have swap cache references otherwise
424 * the information contained in the page maps cannot be
425 * preserved.
426 */
427 if (PageAnon(page) && !PageSwapCache(page)) {
428 if (!add_to_swap(page, GFP_KERNEL)) {
429 rc = -ENOMEM;
430 goto unlock_page;
431 }
432 }
433
434 if (!to) {
435 rc = swap_page(page);
436 goto next;
437 }
438
c3fcf8a5
CL
439 /*
440 * Establish swap ptes for anonymous pages or destroy pte
441 * maps for files.
442 *
443 * In order to reestablish file backed mappings the fault handlers
444 * will take the radix tree_lock which may then be used to stop
445 * processses from accessing this page until the new page is ready.
446 *
447 * A process accessing via a swap pte (an anonymous page) will take a
448 * page_lock on the old page which will block the process until the
449 * migration attempt is complete. At that time the PageSwapCache bit
450 * will be examined. If the page was migrated then the PageSwapCache
451 * bit will be clear and the operation to retrieve the page will be
452 * retried which will find the new page in the radix tree. Then a new
453 * direct mapping may be generated based on the radix tree contents.
454 *
455 * If the page was not migrated then the PageSwapCache bit
456 * is still set and the operation may continue.
457 */
458 rc = -EPERM;
459 if (try_to_unmap(page, 1) == SWAP_FAIL)
460 /* A vma has VM_LOCKED set -> permanent failure */
2d1db3b1 461 goto unlock_page;
c3fcf8a5
CL
462
463 rc = -EAGAIN;
464 if (page_mapped(page))
2d1db3b1
CL
465 goto unlock_page;
466
467 newpage = lru_to_page(to);
468 lock_page(newpage);
469 /* Prepare mapping for the new page.*/
470 newpage->index = page->index;
471 newpage->mapping = page->mapping;
472
b20a3503
CL
473 /*
474 * Pages are properly locked and writeback is complete.
475 * Try to migrate the page.
476 */
477 mapping = page_mapping(page);
478 if (!mapping)
479 goto unlock_both;
480
481 if (mapping->a_ops->migratepage) {
482 /*
483 * Most pages have a mapping and most filesystems
484 * should provide a migration function. Anonymous
485 * pages are part of swap space which also has its
486 * own migration function. This is the most common
487 * path for page migration.
488 */
2d1db3b1
CL
489 rc = mapping->a_ops->migratepage(mapping,
490 newpage, page);
b20a3503
CL
491 goto unlock_both;
492 }
493
494 /*
495 * Default handling if a filesystem does not provide
496 * a migration function. We can only migrate clean
497 * pages so try to write out any dirty pages first.
498 */
499 if (PageDirty(page)) {
500 switch (pageout(page, mapping)) {
501 case PAGE_KEEP:
502 case PAGE_ACTIVATE:
503 goto unlock_both;
504
505 case PAGE_SUCCESS:
506 unlock_page(newpage);
507 goto next;
508
509 case PAGE_CLEAN:
510 ; /* try to migrate the page below */
511 }
512 }
513
514 /*
515 * Buffers are managed in a filesystem specific way.
516 * We must have no buffers or drop them.
517 */
518 if (!page_has_buffers(page) ||
519 try_to_release_page(page, GFP_KERNEL)) {
2d1db3b1 520 rc = migrate_page(mapping, newpage, page);
b20a3503
CL
521 goto unlock_both;
522 }
523
524 /*
525 * On early passes with mapped pages simply
526 * retry. There may be a lock held for some
527 * buffers that may go away. Later
528 * swap them out.
529 */
530 if (pass > 4) {
531 /*
532 * Persistently unable to drop buffers..... As a
533 * measure of last resort we fall back to
534 * swap_page().
535 */
536 unlock_page(newpage);
537 newpage = NULL;
538 rc = swap_page(page);
539 goto next;
540 }
541
542unlock_both:
543 unlock_page(newpage);
544
545unlock_page:
546 unlock_page(page);
547
548next:
2d1db3b1
CL
549 if (rc) {
550 if (newpage)
551 newpage->mapping = NULL;
552
553 if (rc == -EAGAIN)
554 retry++;
555 else {
556 /* Permanent failure */
557 list_move(&page->lru, failed);
558 nr_failed++;
559 }
b20a3503
CL
560 } else {
561 if (newpage) {
562 /* Successful migration. Return page to LRU */
563 move_to_lru(newpage);
564 }
565 list_move(&page->lru, moved);
566 }
567 }
568 if (retry && pass++ < 10)
569 goto redo;
570
571 if (!swapwrite)
572 current->flags &= ~PF_SWAPWRITE;
573
574 return nr_failed + retry;
575}
576
b20a3503
CL
577/*
578 * Migrate the list 'pagelist' of pages to a certain destination.
579 *
580 * Specify destination with either non-NULL vma or dest_node >= 0
581 * Return the number of pages not migrated or error code
582 */
583int migrate_pages_to(struct list_head *pagelist,
584 struct vm_area_struct *vma, int dest)
585{
586 LIST_HEAD(newlist);
587 LIST_HEAD(moved);
588 LIST_HEAD(failed);
589 int err = 0;
590 unsigned long offset = 0;
591 int nr_pages;
592 struct page *page;
593 struct list_head *p;
594
595redo:
596 nr_pages = 0;
597 list_for_each(p, pagelist) {
598 if (vma) {
599 /*
600 * The address passed to alloc_page_vma is used to
601 * generate the proper interleave behavior. We fake
602 * the address here by an increasing offset in order
603 * to get the proper distribution of pages.
604 *
605 * No decision has been made as to which page
606 * a certain old page is moved to so we cannot
607 * specify the correct address.
608 */
609 page = alloc_page_vma(GFP_HIGHUSER, vma,
610 offset + vma->vm_start);
611 offset += PAGE_SIZE;
612 }
613 else
614 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
615
616 if (!page) {
617 err = -ENOMEM;
618 goto out;
619 }
620 list_add_tail(&page->lru, &newlist);
621 nr_pages++;
622 if (nr_pages > MIGRATE_CHUNK_SIZE)
623 break;
624 }
625 err = migrate_pages(pagelist, &newlist, &moved, &failed);
626
627 putback_lru_pages(&moved); /* Call release pages instead ?? */
628
629 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
630 goto redo;
631out:
632 /* Return leftover allocated pages */
633 while (!list_empty(&newlist)) {
634 page = list_entry(newlist.next, struct page, lru);
635 list_del(&page->lru);
636 __free_page(page);
637 }
638 list_splice(&failed, pagelist);
639 if (err < 0)
640 return err;
641
642 /* Calculate number of leftover pages */
643 nr_pages = 0;
644 list_for_each(p, pagelist)
645 nr_pages++;
646 return nr_pages;
647}