]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - mm/memory.c
tunnels: do not assume mac header is set in skb_tunnel_check_pmtu()
[mirror_ubuntu-jammy-kernel.git] / mm / memory.c
index adf9b9ef8277da3dab7500ee26b0067bd6763e71..a48f3c43eea82572de1a4221d6ff46f770c589c1 100644 (file)
@@ -1301,6 +1301,17 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
        return ret;
 }
 
+/* Whether we should zap all COWed (private) pages too */
+static inline bool should_zap_cows(struct zap_details *details)
+{
+       /* By default, zap all pages */
+       if (!details)
+               return true;
+
+       /* Or, we zap COWed pages only if the caller wants to */
+       return !details->check_mapping;
+}
+
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
@@ -1396,16 +1407,18 @@ again:
                        continue;
                }
 
-               /* If details->check_mapping, we leave swap entries. */
-               if (unlikely(details))
-                       continue;
-
-               if (!non_swap_entry(entry))
+               if (!non_swap_entry(entry)) {
+                       /* Genuine swap entry, hence a private anon page */
+                       if (!should_zap_cows(details))
+                               continue;
                        rss[MM_SWAPENTS]--;
-               else if (is_migration_entry(entry)) {
+               else if (is_migration_entry(entry)) {
                        struct page *page;
 
                        page = pfn_swap_entry_to_page(entry);
+                       if (details && details->check_mapping &&
+                           details->check_mapping != page_rmapping(page))
+                               continue;
                        rss[mm_counter(page)]--;
                }
                if (unlikely(!free_swap_and_cache(entry)))
@@ -1655,6 +1668,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
 }
+EXPORT_SYMBOL(zap_page_range);
 
 /**
  * zap_page_range_single - remove user pages in a given range
@@ -3861,11 +3875,20 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
-               if (ret & VM_FAULT_LOCKED)
-                       unlock_page(vmf->page);
-               put_page(vmf->page);
+               struct page *page = vmf->page;
+               vm_fault_t poisonret = VM_FAULT_HWPOISON;
+               if (ret & VM_FAULT_LOCKED) {
+                       if (page_mapped(page))
+                               unmap_mapping_pages(page_mapping(page),
+                                                   page->index, 1, false);
+                       /* Retry if a clean page was removed from the cache. */
+                       if (invalidate_inode_page(page))
+                               poisonret = VM_FAULT_NOPAGE;
+                       unlock_page(page);
+               }
+               put_page(page);
                vmf->page = NULL;
-               return VM_FAULT_HWPOISON;
+               return poisonret;
        }
 
        if (unlikely(!(ret & VM_FAULT_LOCKED)))
@@ -3906,6 +3929,15 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
        if (compound_order(page) != HPAGE_PMD_ORDER)
                return ret;
 
+       /*
+        * Just backoff if any subpage of a THP is corrupted otherwise
+        * the corrupted page may mapped by PMD silently to escape the
+        * check.  This kind of THP just can be PTE mapped.  Access to
+        * the corrupted subpage should trigger SIGBUS as expected.
+        */
+       if (unlikely(PageHasHWPoisoned(page)))
+               return ret;
+
        /*
         * Archs like ppc64 need additional space to store information
         * related to pte entry. Use the preallocated table for that.
@@ -5436,6 +5468,8 @@ long copy_huge_page_from_user(struct page *dst_page,
                if (rc)
                        break;
 
+               flush_dcache_page(subpage);
+
                cond_resched();
        }
        return ret_val;