* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local()
*/
-int migrate_prep(void)
+void migrate_prep(void)
{
/*
* Clear the LRU lists so pages can be isolated.
* pages that may be busy.
*/
lru_add_drain_all();
-
- return 0;
}
/* Do the necessary work of migrate_prep but not if it involves other CPUs */
-int migrate_prep_local(void)
+void migrate_prep_local(void)
{
lru_add_drain();
-
- return 0;
}
int isolate_movable_page(struct page *page, isolate_mode_t mode)
struct page *newpage = NULL;
if (!thp_migration_supported() && PageTransHuge(page))
- return -ENOMEM;
+ return -ENOSYS;
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
return rc;
}
+static inline int try_split_thp(struct page *page, struct page **page2,
+ struct list_head *from)
+{
+ int rc = 0;
+
+ lock_page(page);
+ rc = split_huge_page_to_list(page, from);
+ unlock_page(page);
+ if (!rc)
+ list_safe_reset_next(page, *page2, lru);
+
+ return rc;
+}
+
/*
* migrate_pages - migrate the pages specified in a list, to the free pages
* supplied as the target for the page migration
* from list
*/
switch(rc) {
+ /*
+ * THP migration might be unsupported or the
+ * allocation could've failed so we should
+ * retry on the same page with the THP split
+ * to base pages.
+ *
+ * Head page is retried immediately and tail
+ * pages are added to the tail of the list so
+ * we encounter them after the rest of the list
+ * is processed.
+ */
+ case -ENOSYS:
+ /* THP migration is unsupported */
+ if (is_thp) {
+ if (!try_split_thp(page, &page2, from)) {
+ nr_thp_split++;
+ goto retry;
+ }
+
+ nr_thp_failed++;
+ nr_failed += nr_subpages;
+ break;
+ }
+
+ /* Hugetlb migration is unsupported */
+ nr_failed++;
+ break;
case -ENOMEM:
/*
- * THP migration might be unsupported or the
- * allocation could've failed so we should
- * retry on the same page with the THP split
- * to base pages.
- *
- * Head page is retried immediately and tail
- * pages are added to the tail of the list so
- * we encounter them after the rest of the list
- * is processed.
+ * When memory is low, don't bother to try to migrate
+ * other pages, just exit.
*/
if (is_thp) {
- lock_page(page);
- rc = split_huge_page_to_list(page, from);
- unlock_page(page);
- if (!rc) {
- list_safe_reset_next(page, page2, lru);
+ if (!try_split_thp(page, &page2, from)) {
nr_thp_split++;
goto retry;
}
break;
default:
/*
- * Permanent failure (-EBUSY, -ENOSYS, etc.):
+ * Permanent failure (-EBUSY, etc.):
* unlike -EAGAIN case, the failed page is
* removed from migration page list and not
* retried in the next outer loop.
return PageLocked(page);
}
+static inline bool is_shared_exec_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ if (page_mapcount(page) != 1 &&
+ (page_is_file_lru(page) || vma_is_shmem(vma)) &&
+ (vma->vm_flags & VM_EXEC))
+ return true;
+
+ return false;
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
- (vma->vm_flags & VM_EXEC))
+ if (is_shared_exec_page(vma, page))
goto out;
/*
int page_lru = page_is_file_lru(page);
unsigned long start = address & HPAGE_PMD_MASK;
+ if (is_shared_exec_page(vma, page))
+ goto out;
+
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
HPAGE_PMD_ORDER);
out_unlock:
unlock_page(page);
+out:
put_page(page);
return 0;
}
* will bump the page reference count. Sadly there is no way to
* differentiate a regular pin from migration wait. Hence to
* avoid 2 racing thread trying to migrate back to CPU to enter
- * infinite loop (one stoping migration because the other is
+ * infinite loop (one stopping migration because the other is
* waiting on pte migration entry). We always return true here.
*
* FIXME proper solution is to rework migration_entry_wait() so
static void migrate_vma_insert_page(struct migrate_vma *migrate,
unsigned long addr,
struct page *page,
- unsigned long *src,
- unsigned long *dst)
+ unsigned long *src)
{
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(&range);
}
migrate_vma_insert_page(migrate, addr, newpage,
- &migrate->src[i],
- &migrate->dst[i]);
+ &migrate->src[i]);
continue;
}