]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths
authorJann Horn <jannh@google.com>
Tue, 6 Dec 2022 17:16:05 +0000 (18:16 +0100)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Wed, 14 Dec 2022 13:02:53 +0000 (14:02 +0100)
commit f268f6cf875f3220afc77bdd0bf1bb136eb54db9 upstream.

Any codepath that zaps page table entries must invoke MMU notifiers to
ensure that secondary MMUs (like KVM) don't keep accessing pages which
aren't mapped anymore.  Secondary MMUs don't hold their own references to
pages that are mirrored over, so failing to notify them can lead to page
use-after-free.

I'm marking this as addressing an issue introduced in commit f3f0e1d2150b
("khugepaged: add support of collapse for tmpfs/shmem pages"), but most of
the security impact of this only came in commit 27e1f8273113 ("khugepaged:
enable collapse pmd for pte-mapped THP"), which actually omitted flushes
for the removal of present PTEs, not just for the removal of empty page
tables.

Link: https://lkml.kernel.org/r/20221129154730.2274278-3-jannh@google.com
Link: https://lkml.kernel.org/r/20221128180252.1684965-3-jannh@google.com
Link: https://lkml.kernel.org/r/20221125213714.4115729-3-jannh@google.com
Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[manual backport: this code was refactored from two copies into a common
helper between 5.15 and 6.0]
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
(cherry picked from commit 1a3f8c6cd29d9078cc81b29d39d0e9ae1d6a03c3)
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
mm/khugepaged.c

index 1735123e462ad68c4dfd2f26c008451f16250538..fd25d12e85b3362fbeecdb6eb52f2704e79d40ba 100644 (file)
@@ -1443,6 +1443,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
        spinlock_t *ptl;
        int count = 0;
        int i;
+       struct mmu_notifier_range range;
 
        if (!vma || !vma->vm_file ||
            !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
@@ -1536,9 +1537,13 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
        }
 
        /* step 4: collapse pmd */
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
+                               haddr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
        _pmd = pmdp_collapse_flush(vma, haddr, pmd);
        mm_dec_nr_ptes(mm);
        tlb_remove_table_sync_one();
+       mmu_notifier_invalidate_range_end(&range);
        pte_free(mm, pmd_pgtable(_pmd));
 
        i_mmap_unlock_write(vma->vm_file->f_mapping);
@@ -1622,11 +1627,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                 */
                if (mmap_write_trylock(mm)) {
                        if (!khugepaged_test_exit(mm)) {
+                               struct mmu_notifier_range range;
+
+                               mmu_notifier_range_init(&range,
+                                                       MMU_NOTIFY_CLEAR, 0,
+                                                       NULL, mm, addr,
+                                                       addr + HPAGE_PMD_SIZE);
+                               mmu_notifier_invalidate_range_start(&range);
                                /* assume page table is clear */
                                _pmd = pmdp_collapse_flush(vma, addr, pmd);
                                mm_dec_nr_ptes(mm);
                                tlb_remove_table_sync_one();
                                pte_free(mm, pmd_pgtable(_pmd));
+                               mmu_notifier_invalidate_range_end(&range);
                        }
                        mmap_write_unlock(mm);
                } else {