]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: add vma_alloc_zeroed_movable_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 16 Jan 2023 19:18:09 +0000 (19:18 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:18 +0000 (22:33 -0800)
Replace alloc_zeroed_user_highpage_movable().  The main difference is
returning a folio containing a single page instead of returning the page,
but take the opportunity to rename the function to match other allocation
functions a little better and rewrite the documentation to place more
emphasis on the zeroing rather than the highmem aspect.

Link: https://lkml.kernel.org/r/20230116191813.2145215-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/alpha/include/asm/page.h
arch/arm64/include/asm/page.h
arch/arm64/mm/fault.c
arch/ia64/include/asm/page.h
arch/m68k/include/asm/page_no.h
arch/s390/include/asm/page.h
arch/x86/include/asm/page.h
include/linux/highmem.h
mm/memory.c

index 8f3f5eecba28bcc4aecb6ffb9d99a48c62f6cd6d..bc5256fba8f041f943247786b308ec69b0577009 100644 (file)
@@ -17,9 +17,8 @@
 extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 extern void copy_page(void * _to, void * _from);
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
index 993a27ea6f543c3087f2f28c7990db54dca3ca4f..2312e6ee595fda5b818e4ea9e2f057b44ffd735c 100644 (file)
@@ -29,9 +29,9 @@ void copy_user_highpage(struct page *to, struct page *from,
 void copy_highpage(struct page *to, struct page *from);
 #define __HAVE_ARCH_COPY_HIGHPAGE
 
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                                unsigned long vaddr);
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
 
 void tag_clear_highpage(struct page *to);
 #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
index 596f46dabe4ef2f16f5d10b05e5f9983878961bb..f4cb0f85ccf495e8c1a379fe49b7b635a74ff4b1 100644 (file)
@@ -925,7 +925,7 @@ NOKPROBE_SYMBOL(do_debug_exception);
 /*
  * Used during anonymous page fault handling.
  */
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                                unsigned long vaddr)
 {
        gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
@@ -938,7 +938,7 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_MTE)
                flags |= __GFP_ZEROTAGS;
 
-       return alloc_page_vma(flags, vma, vaddr);
+       return vma_alloc_folio(flags, 0, vma, vaddr, false);
 }
 
 void tag_clear_highpage(struct page *page)
index 1b990466d5404e6030324412a4f2b5f3ebe42986..ba0b365cf2b2b00277c6de3a9a9dc39481433501 100644 (file)
@@ -82,17 +82,15 @@ do {                                                \
 } while (0)
 
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr)                 \
+#define vma_alloc_zeroed_movable_folio(vma, vaddr)                     \
 ({                                                                     \
-       struct page *page = alloc_page_vma(                             \
-               GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr);         \
-       if (page)                                                       \
-               flush_dcache_page(page);                                \
-       page;                                                           \
+       struct folio *folio = vma_alloc_folio(                          \
+               GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
+       if (folio)                                                      \
+               flush_dcache_folio(folio);                              \
+       folio;                                                          \
 })
 
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
-
 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #include <asm-generic/memory_model.h>
index c9d0d84158a4ef74a45ba8bff13283c9d866f3b0..abd2c3aeb015137b612a7b63d017911713d4ed96 100644 (file)
@@ -13,9 +13,8 @@ extern unsigned long memory_end;
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
 #define __va(paddr)            ((void *)((unsigned long)(paddr)))
index 61dea67bb9c74a68f7a589a8503708cd55b6e175..8a2a3b5d1e293e89082a21b070ef8e51746a72e7 100644 (file)
@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 /*
  * These are used to make use of C type-checking..
index 9cc82f305f4bf0862f7b8de0f4c1799e3ee41104..d18e5c332cb9f443b2279d0545779b67dcb7ec19 100644 (file)
@@ -34,9 +34,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
        copy_page(to, from);
 }
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 #ifndef __pa
 #define __pa(x)                __phys_addr((unsigned long)(x))
index d7097b8158f202ca834945eb10cf537f20bb5a7f..e22509420ac6d923ee1bfb62898a724df91101c2 100644 (file)
@@ -207,31 +207,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 }
 #endif
 
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#ifndef vma_alloc_zeroed_movable_folio
 /**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
  *
- * Returns: The allocated and zeroed HIGHMEM page
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address.  It may be allocated from highmem or
+ * the movable zone.  An architecture may provide its own implementation.
  *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
- * implementation.
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
  */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                   unsigned long vaddr)
 {
-       struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+       struct folio *folio;
 
-       if (page)
-               clear_user_highpage(page, vaddr);
+       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+       if (folio)
+               clear_user_highpage(&folio->page, vaddr);
 
-       return page;
+       return folio;
 }
 #endif
 
index 87b33b4967c23e8aa027285b461b343e1d997389..b6358ffbccaa8434f01d06c77b9cd2ee283e6d05 100644 (file)
@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                goto oom;
 
        if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-               new_page = alloc_zeroed_user_highpage_movable(vma,
-                                                             vmf->address);
-               if (!new_page)
+               struct folio *new_folio;
+
+               new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+               if (!new_folio)
                        goto oom;
+               new_page = &new_folio->page;
        } else {
                new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
                                vmf->address);
@@ -3995,6 +3997,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct page *page;
+       struct folio *folio;
        vm_fault_t ret = 0;
        pte_t entry;
 
@@ -4044,11 +4047,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        /* Allocate our own private page. */
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
-       if (!page)
+       folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+       if (!folio)
                goto oom;
 
-       if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+       page = &folio->page;
+       if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
        cgroup_throttle_swaprate(page, GFP_KERNEL);