]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/migrate.c
2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/pagevec.h>
23 #include <linux/rmap.h>
24 #include <linux/topology.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
30 /* The maximum number of pages to take off the LRU for migration */
31 #define MIGRATE_CHUNK_SIZE 256
33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
43 int isolate_lru_page(struct page
*page
, struct list_head
*pagelist
)
48 struct zone
*zone
= page_zone(page
);
50 spin_lock_irq(&zone
->lru_lock
);
56 del_page_from_active_list(zone
, page
);
58 del_page_from_inactive_list(zone
, page
);
59 list_add_tail(&page
->lru
, pagelist
);
61 spin_unlock_irq(&zone
->lru_lock
);
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
71 int migrate_prep(void)
74 * Clear the LRU lists so pages can be isolated.
75 * Note that pages may be moved off the LRU after we have
76 * drained them. Those pages will fail to migrate like other
77 * pages that may be busy.
84 static inline void move_to_lru(struct page
*page
)
87 if (PageActive(page
)) {
89 * lru_cache_add_active checks that
90 * the PG_active bit is off.
92 ClearPageActive(page
);
93 lru_cache_add_active(page
);
101 * Add isolated pages on the list back to the LRU.
103 * returns the number of pages put back.
105 int putback_lru_pages(struct list_head
*l
)
111 list_for_each_entry_safe(page
, page2
, l
, lru
) {
118 static inline int is_swap_pte(pte_t pte
)
120 return !pte_none(pte
) && !pte_present(pte
) && !pte_file(pte
);
124 * Restore a potential migration pte to a working pte entry
126 static void remove_migration_pte(struct vm_area_struct
*vma
, unsigned long addr
,
127 struct page
*old
, struct page
*new)
129 struct mm_struct
*mm
= vma
->vm_mm
;
137 pgd
= pgd_offset(mm
, addr
);
138 if (!pgd_present(*pgd
))
141 pud
= pud_offset(pgd
, addr
);
142 if (!pud_present(*pud
))
145 pmd
= pmd_offset(pud
, addr
);
146 if (!pmd_present(*pmd
))
149 ptep
= pte_offset_map(pmd
, addr
);
151 if (!is_swap_pte(*ptep
)) {
156 ptl
= pte_lockptr(mm
, pmd
);
159 if (!is_swap_pte(pte
))
162 entry
= pte_to_swp_entry(pte
);
164 if (!is_migration_entry(entry
) || migration_entry_to_page(entry
) != old
)
167 inc_mm_counter(mm
, anon_rss
);
169 pte
= pte_mkold(mk_pte(new, vma
->vm_page_prot
));
170 if (is_write_migration_entry(entry
))
171 pte
= pte_mkwrite(pte
);
172 set_pte_at(mm
, addr
, ptep
, pte
);
173 page_add_anon_rmap(new, vma
, addr
);
175 pte_unmap_unlock(ptep
, ptl
);
179 * Get rid of all migration entries and replace them by
180 * references to the indicated page.
182 * Must hold mmap_sem lock on at least one of the vmas containing
183 * the page so that the anon_vma cannot vanish.
185 static void remove_migration_ptes(struct page
*old
, struct page
*new)
187 struct anon_vma
*anon_vma
;
188 struct vm_area_struct
*vma
;
189 unsigned long mapping
;
191 mapping
= (unsigned long)new->mapping
;
193 if (!mapping
|| (mapping
& PAGE_MAPPING_ANON
) == 0)
197 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
199 anon_vma
= (struct anon_vma
*) (mapping
- PAGE_MAPPING_ANON
);
200 spin_lock(&anon_vma
->lock
);
202 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
)
203 remove_migration_pte(vma
, page_address_in_vma(new, vma
),
206 spin_unlock(&anon_vma
->lock
);
210 * Something used the pte of a page under migration. We need to
211 * get to the page and wait until migration is finished.
212 * When we return from this function the fault will be retried.
214 * This function is called from do_swap_page().
216 void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
217 unsigned long address
)
224 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
226 if (!is_swap_pte(pte
))
229 entry
= pte_to_swp_entry(pte
);
230 if (!is_migration_entry(entry
))
233 page
= migration_entry_to_page(entry
);
236 pte_unmap_unlock(ptep
, ptl
);
237 wait_on_page_locked(page
);
241 pte_unmap_unlock(ptep
, ptl
);
245 * Replace the page in the mapping.
247 * The number of remaining references must be:
248 * 1 for anonymous pages without a mapping
249 * 2 for pages with a mapping
250 * 3 for pages with a mapping and PagePrivate set.
252 static int migrate_page_move_mapping(struct address_space
*mapping
,
253 struct page
*newpage
, struct page
*page
)
255 struct page
**radix_pointer
;
257 write_lock_irq(&mapping
->tree_lock
);
259 radix_pointer
= (struct page
**)radix_tree_lookup_slot(
263 if (!page_mapping(page
) ||
264 page_count(page
) != 2 + !!PagePrivate(page
) ||
265 *radix_pointer
!= page
) {
266 write_unlock_irq(&mapping
->tree_lock
);
271 * Now we know that no one else is looking at the page.
274 if (PageSwapCache(page
)) {
275 SetPageSwapCache(newpage
);
276 set_page_private(newpage
, page_private(page
));
279 *radix_pointer
= newpage
;
281 write_unlock_irq(&mapping
->tree_lock
);
287 * Copy the page to its new location
289 static void migrate_page_copy(struct page
*newpage
, struct page
*page
)
291 copy_highpage(newpage
, page
);
294 SetPageError(newpage
);
295 if (PageReferenced(page
))
296 SetPageReferenced(newpage
);
297 if (PageUptodate(page
))
298 SetPageUptodate(newpage
);
299 if (PageActive(page
))
300 SetPageActive(newpage
);
301 if (PageChecked(page
))
302 SetPageChecked(newpage
);
303 if (PageMappedToDisk(page
))
304 SetPageMappedToDisk(newpage
);
306 if (PageDirty(page
)) {
307 clear_page_dirty_for_io(page
);
308 set_page_dirty(newpage
);
311 ClearPageSwapCache(page
);
312 ClearPageActive(page
);
313 ClearPagePrivate(page
);
314 set_page_private(page
, 0);
315 page
->mapping
= NULL
;
318 * If any waiters have accumulated on the new page then
321 if (PageWriteback(newpage
))
322 end_page_writeback(newpage
);
325 /************************************************************
326 * Migration functions
327 ***********************************************************/
329 /* Always fail migration. Used for mappings that are not movable */
330 int fail_migrate_page(struct address_space
*mapping
,
331 struct page
*newpage
, struct page
*page
)
335 EXPORT_SYMBOL(fail_migrate_page
);
338 * Common logic to directly migrate a single page suitable for
339 * pages that do not use PagePrivate.
341 * Pages are locked upon entry and exit.
343 int migrate_page(struct address_space
*mapping
,
344 struct page
*newpage
, struct page
*page
)
348 BUG_ON(PageWriteback(page
)); /* Writeback must be complete */
350 rc
= migrate_page_move_mapping(mapping
, newpage
, page
);
355 migrate_page_copy(newpage
, page
);
358 * Remove auxiliary swap entries and replace
359 * them with real ptes.
361 * Note that a real pte entry will allow processes that are not
362 * waiting on the page lock to use the new page via the page tables
363 * before the new page is unlocked.
365 remove_from_swap(newpage
);
368 EXPORT_SYMBOL(migrate_page
);
371 * Migration function for pages with buffers. This function can only be used
372 * if the underlying filesystem guarantees that no other references to "page"
375 int buffer_migrate_page(struct address_space
*mapping
,
376 struct page
*newpage
, struct page
*page
)
378 struct buffer_head
*bh
, *head
;
381 if (!page_has_buffers(page
))
382 return migrate_page(mapping
, newpage
, page
);
384 head
= page_buffers(page
);
386 rc
= migrate_page_move_mapping(mapping
, newpage
, page
);
395 bh
= bh
->b_this_page
;
397 } while (bh
!= head
);
399 ClearPagePrivate(page
);
400 set_page_private(newpage
, page_private(page
));
401 set_page_private(page
, 0);
407 set_bh_page(bh
, newpage
, bh_offset(bh
));
408 bh
= bh
->b_this_page
;
410 } while (bh
!= head
);
412 SetPagePrivate(newpage
);
414 migrate_page_copy(newpage
, page
);
420 bh
= bh
->b_this_page
;
422 } while (bh
!= head
);
426 EXPORT_SYMBOL(buffer_migrate_page
);
428 static int fallback_migrate_page(struct address_space
*mapping
,
429 struct page
*newpage
, struct page
*page
)
432 * Default handling if a filesystem does not provide
433 * a migration function. We can only migrate clean
434 * pages so try to write out any dirty pages first.
436 if (PageDirty(page
)) {
437 switch (pageout(page
, mapping
)) {
443 /* Relock since we lost the lock */
445 /* Must retry since page state may have changed */
449 ; /* try to migrate the page below */
454 * Buffers may be managed in a filesystem specific way.
455 * We must have no buffers or drop them.
457 if (page_has_buffers(page
) &&
458 !try_to_release_page(page
, GFP_KERNEL
))
461 return migrate_page(mapping
, newpage
, page
);
467 * Two lists are passed to this function. The first list
468 * contains the pages isolated from the LRU to be migrated.
469 * The second list contains new pages that the pages isolated
472 * The function returns after 10 attempts or if no pages
473 * are movable anymore because to has become empty
474 * or no retryable pages exist anymore.
476 * Return: Number of pages not migrated when "to" ran empty.
478 int migrate_pages(struct list_head
*from
, struct list_head
*to
,
479 struct list_head
*moved
, struct list_head
*failed
)
486 int swapwrite
= current
->flags
& PF_SWAPWRITE
;
490 current
->flags
|= PF_SWAPWRITE
;
495 list_for_each_entry_safe(page
, page2
, from
, lru
) {
496 struct page
*newpage
= NULL
;
497 struct address_space
*mapping
;
502 if (page_count(page
) == 1)
503 /* page was freed from under us. So we are done. */
506 if (to
&& list_empty(to
))
510 * Skip locked pages during the first two passes to give the
511 * functions holding the lock time to release the page. Later we
512 * use lock_page() to have a higher chance of acquiring the
519 if (TestSetPageLocked(page
))
523 * Only wait on writeback if we have already done a pass where
524 * we we may have triggered writeouts for lots of pages.
527 wait_on_page_writeback(page
);
529 if (PageWriteback(page
))
533 * Establish swap ptes for anonymous pages or destroy pte
536 * In order to reestablish file backed mappings the fault handlers
537 * will take the radix tree_lock which may then be used to stop
538 * processses from accessing this page until the new page is ready.
540 * A process accessing via a swap pte (an anonymous page) will take a
541 * page_lock on the old page which will block the process until the
542 * migration attempt is complete. At that time the PageSwapCache bit
543 * will be examined. If the page was migrated then the PageSwapCache
544 * bit will be clear and the operation to retrieve the page will be
545 * retried which will find the new page in the radix tree. Then a new
546 * direct mapping may be generated based on the radix tree contents.
548 * If the page was not migrated then the PageSwapCache bit
549 * is still set and the operation may continue.
552 if (try_to_unmap(page
, 1) == SWAP_FAIL
)
553 /* A vma has VM_LOCKED set -> permanent failure */
557 if (page_mapped(page
))
560 newpage
= lru_to_page(to
);
562 /* Prepare mapping for the new page.*/
563 newpage
->index
= page
->index
;
564 newpage
->mapping
= page
->mapping
;
567 * Pages are properly locked and writeback is complete.
568 * Try to migrate the page.
570 mapping
= page_mapping(page
);
574 if (mapping
->a_ops
->migratepage
)
576 * Most pages have a mapping and most filesystems
577 * should provide a migration function. Anonymous
578 * pages are part of swap space which also has its
579 * own migration function. This is the most common
580 * path for page migration.
582 rc
= mapping
->a_ops
->migratepage(mapping
,
585 rc
= fallback_migrate_page(mapping
, newpage
, page
);
588 unlock_page(newpage
);
596 newpage
->mapping
= NULL
;
601 /* Permanent failure */
602 list_move(&page
->lru
, failed
);
607 /* Successful migration. Return page to LRU */
608 move_to_lru(newpage
);
610 list_move(&page
->lru
, moved
);
613 if (retry
&& pass
++ < 10)
617 current
->flags
&= ~PF_SWAPWRITE
;
619 return nr_failed
+ retry
;
623 * Migrate the list 'pagelist' of pages to a certain destination.
625 * Specify destination with either non-NULL vma or dest_node >= 0
626 * Return the number of pages not migrated or error code
628 int migrate_pages_to(struct list_head
*pagelist
,
629 struct vm_area_struct
*vma
, int dest
)
635 unsigned long offset
= 0;
642 list_for_each(p
, pagelist
) {
645 * The address passed to alloc_page_vma is used to
646 * generate the proper interleave behavior. We fake
647 * the address here by an increasing offset in order
648 * to get the proper distribution of pages.
650 * No decision has been made as to which page
651 * a certain old page is moved to so we cannot
652 * specify the correct address.
654 page
= alloc_page_vma(GFP_HIGHUSER
, vma
,
655 offset
+ vma
->vm_start
);
659 page
= alloc_pages_node(dest
, GFP_HIGHUSER
, 0);
665 list_add_tail(&page
->lru
, &newlist
);
667 if (nr_pages
> MIGRATE_CHUNK_SIZE
)
670 err
= migrate_pages(pagelist
, &newlist
, &moved
, &failed
);
672 putback_lru_pages(&moved
); /* Call release pages instead ?? */
674 if (err
>= 0 && list_empty(&newlist
) && !list_empty(pagelist
))
677 /* Return leftover allocated pages */
678 while (!list_empty(&newlist
)) {
679 page
= list_entry(newlist
.next
, struct page
, lru
);
680 list_del(&page
->lru
);
683 list_splice(&failed
, pagelist
);
687 /* Calculate number of leftover pages */
689 list_for_each(p
, pagelist
)