]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - mm/khugepaged.c
tunnels: do not assume mac header is set in skb_tunnel_check_pmtu()
[mirror_ubuntu-jammy-kernel.git] / mm / khugepaged.c
index 045cc579f724ecfebfb5668d8325838cfee578d1..8a8b3aa929370433867f218c0f10d7578c94af96 100644 (file)
@@ -445,22 +445,25 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
        if (!transhuge_vma_enabled(vma, vm_flags))
                return false;
 
+       if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
+                               vma->vm_pgoff, HPAGE_PMD_NR))
+               return false;
+
        /* Enabled via shmem mount options or sysfs settings. */
-       if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
-               return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
-                               HPAGE_PMD_NR);
-       }
+       if (shmem_file(vma->vm_file))
+               return shmem_huge_enabled(vma);
 
        /* THP settings require madvise. */
        if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
                return false;
 
-       /* Read-only file mappings need to be aligned for THP to work. */
+       /* Only regular file is valid */
        if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
-           !inode_is_open_for_write(vma->vm_file->f_inode) &&
            (vm_flags & VM_EXEC)) {
-               return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
-                               HPAGE_PMD_NR);
+               struct inode *inode = vma->vm_file->f_inode;
+
+               return !inode_is_open_for_write(inode) &&
+                       S_ISREG(inode->i_mode);
        }
 
        if (!vma->anon_vma || vma->vm_ops)
@@ -1763,6 +1766,10 @@ static void collapse_file(struct mm_struct *mm,
                                filemap_flush(mapping);
                                result = SCAN_FAIL;
                                goto xa_unlocked;
+                       } else if (PageWriteback(page)) {
+                               xas_unlock_irq(&xas);
+                               result = SCAN_FAIL;
+                               goto xa_unlocked;
                        } else if (trylock_page(page)) {
                                get_page(page);
                                xas_unlock_irq(&xas);
@@ -1798,7 +1805,8 @@ static void collapse_file(struct mm_struct *mm,
                        goto out_unlock;
                }
 
-               if (!is_shmem && PageDirty(page)) {
+               if (!is_shmem && (PageDirty(page) ||
+                                 PageWriteback(page))) {
                        /*
                         * khugepaged only works on read-only fd, so this
                         * page is dirty because it hasn't been flushed