]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - mm/migrate.c
KVM: x86: Allow guests to see MSR_IA32_TSX_CTRL even if tsx=off
[mirror_ubuntu-jammy-kernel.git] / mm / migrate.c
index 47aa50d263c2e783abdfc7bd1a204709cf432a67..ee5e612b4cd87bcab7b72948bee803d4fb346615 100644 (file)
@@ -62,7 +62,7 @@
  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
  * undesirable, use migrate_prep_local()
  */
-int migrate_prep(void)
+void migrate_prep(void)
 {
        /*
         * Clear the LRU lists so pages can be isolated.
@@ -71,16 +71,12 @@ int migrate_prep(void)
         * pages that may be busy.
         */
        lru_add_drain_all();
-
-       return 0;
 }
 
 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
-int migrate_prep_local(void)
+void migrate_prep_local(void)
 {
        lru_add_drain();
-
-       return 0;
 }
 
 int isolate_movable_page(struct page *page, isolate_mode_t mode)
@@ -1175,7 +1171,7 @@ static int unmap_and_move(new_page_t get_new_page,
        struct page *newpage = NULL;
 
        if (!thp_migration_supported() && PageTransHuge(page))
-               return -ENOMEM;
+               return -ENOSYS;
 
        if (page_count(page) == 1) {
                /* page was freed from under us. So we are done. */
@@ -1382,6 +1378,20 @@ out:
        return rc;
 }
 
+static inline int try_split_thp(struct page *page, struct page **page2,
+                               struct list_head *from)
+{
+       int rc = 0;
+
+       lock_page(page);
+       rc = split_huge_page_to_list(page, from);
+       unlock_page(page);
+       if (!rc)
+               list_safe_reset_next(page, *page2, lru);
+
+       return rc;
+}
+
 /*
  * migrate_pages - migrate the pages specified in a list, to the free pages
  *                supplied as the target for the page migration
@@ -1459,24 +1469,40 @@ retry:
                         *                   from list
                         */
                        switch(rc) {
+                       /*
+                        * THP migration might be unsupported or the
+                        * allocation could've failed so we should
+                        * retry on the same page with the THP split
+                        * to base pages.
+                        *
+                        * Head page is retried immediately and tail
+                        * pages are added to the tail of the list so
+                        * we encounter them after the rest of the list
+                        * is processed.
+                        */
+                       case -ENOSYS:
+                               /* THP migration is unsupported */
+                               if (is_thp) {
+                                       if (!try_split_thp(page, &page2, from)) {
+                                               nr_thp_split++;
+                                               goto retry;
+                                       }
+
+                                       nr_thp_failed++;
+                                       nr_failed += nr_subpages;
+                                       break;
+                               }
+
+                               /* Hugetlb migration is unsupported */
+                               nr_failed++;
+                               break;
                        case -ENOMEM:
                                /*
-                                * THP migration might be unsupported or the
-                                * allocation could've failed so we should
-                                * retry on the same page with the THP split
-                                * to base pages.
-                                *
-                                * Head page is retried immediately and tail
-                                * pages are added to the tail of the list so
-                                * we encounter them after the rest of the list
-                                * is processed.
+                                * When memory is low, don't bother to try to migrate
+                                * other pages, just exit.
                                 */
                                if (is_thp) {
-                                       lock_page(page);
-                                       rc = split_huge_page_to_list(page, from);
-                                       unlock_page(page);
-                                       if (!rc) {
-                                               list_safe_reset_next(page, page2, lru);
+                                       if (!try_split_thp(page, &page2, from)) {
                                                nr_thp_split++;
                                                goto retry;
                                        }
@@ -1504,7 +1530,7 @@ retry:
                                break;
                        default:
                                /*
-                                * Permanent failure (-EBUSY, -ENOSYS, etc.):
+                                * Permanent failure (-EBUSY, etc.):
                                 * unlike -EAGAIN case, the failed page is
                                 * removed from migration page list and not
                                 * retried in the next outer loop.
@@ -2071,6 +2097,17 @@ bool pmd_trans_migrating(pmd_t pmd)
        return PageLocked(page);
 }
 
+static inline bool is_shared_exec_page(struct vm_area_struct *vma,
+                                      struct page *page)
+{
+       if (page_mapcount(page) != 1 &&
+           (page_is_file_lru(page) || vma_is_shmem(vma)) &&
+           (vma->vm_flags & VM_EXEC))
+               return true;
+
+       return false;
+}
+
 /*
  * Attempt to migrate a misplaced page to the specified destination
  * node. Caller is expected to have an elevated reference count on
@@ -2088,8 +2125,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
         * Don't migrate file pages that are mapped in multiple processes
         * with execute permissions as they are probably shared libraries.
         */
-       if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
-           (vma->vm_flags & VM_EXEC))
+       if (is_shared_exec_page(vma, page))
                goto out;
 
        /*
@@ -2144,6 +2180,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        int page_lru = page_is_file_lru(page);
        unsigned long start = address & HPAGE_PMD_MASK;
 
+       if (is_shared_exec_page(vma, page))
+               goto out;
+
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
                HPAGE_PMD_ORDER);
@@ -2255,6 +2294,7 @@ out_fail:
 
 out_unlock:
        unlock_page(page);
+out:
        put_page(page);
        return 0;
 }
@@ -2554,7 +2594,7 @@ static bool migrate_vma_check_page(struct page *page)
                 * will bump the page reference count. Sadly there is no way to
                 * differentiate a regular pin from migration wait. Hence to
                 * avoid 2 racing thread trying to migrate back to CPU to enter
-                * infinite loop (one stoping migration because the other is
+                * infinite loop (one stopping migration because the other is
                 * waiting on pte migration entry). We always return true here.
                 *
                 * FIXME proper solution is to rework migration_entry_wait() so
@@ -2854,8 +2894,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
 static void migrate_vma_insert_page(struct migrate_vma *migrate,
                                    unsigned long addr,
                                    struct page *page,
-                                   unsigned long *src,
-                                   unsigned long *dst)
+                                   unsigned long *src)
 {
        struct vm_area_struct *vma = migrate->vma;
        struct mm_struct *mm = vma->vm_mm;
@@ -3016,8 +3055,7 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                                mmu_notifier_invalidate_range_start(&range);
                        }
                        migrate_vma_insert_page(migrate, addr, newpage,
-                                               &migrate->src[i],
-                                               &migrate->dst[i]);
+                                               &migrate->src[i]);
                        continue;
                }