]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
dax: don't use set_huge_zero_page()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 8 Sep 2015 21:59:34 +0000 (14:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Sep 2015 22:35:28 +0000 (15:35 -0700)
This is another place where DAX assumed that pgtable_t was a pointer.
Open code the important parts of set_huge_zero_page() in DAX and make
set_huge_zero_page() static again.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/dax.c
include/linux/huge_mm.h
mm/huge_memory.c

index 9593f4bee32774e06fa5c8be7d1b24634cb8d0c0..d778e5f1a01cf3ff76e4f222f43bed90bf5d2774 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -572,18 +572,24 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
 
        if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
-               bool set;
                spinlock_t *ptl;
-               struct mm_struct *mm = vma->vm_mm;
+               pmd_t entry;
                struct page *zero_page = get_huge_zero_page();
+
                if (unlikely(!zero_page))
                        goto fallback;
 
-               ptl = pmd_lock(mm, pmd);
-               set = set_huge_zero_page(NULL, mm, vma, pmd_addr, pmd,
-                                                               zero_page);
-               spin_unlock(ptl);
+               ptl = pmd_lock(vma->vm_mm, pmd);
+               if (!pmd_none(*pmd)) {
+                       spin_unlock(ptl);
+                       goto fallback;
+               }
+
+               entry = mk_pmd(zero_page, vma->vm_page_prot);
+               entry = pmd_mkhuge(entry);
+               set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
                result = VM_FAULT_NOPAGE;
+               spin_unlock(ptl);
        } else {
                sector = bh.b_blocknr << (blkbits - 9);
                length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
index f9b612fec4dd8242ca20fe2d0e45c8b520ad273f..ecb080d6ff42077513f03b95537dc108bded9e07 100644 (file)
@@ -163,9 +163,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
 }
 
 struct page *get_huge_zero_page(void);
-bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long haddr,
-               pmd_t *pmd, struct page *zero_page);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
index 96dfd9d81fcb2231209167f402b4c9f9f2308fbc..3e574efad8f853eb0545490d9a4b0ce4ef909df4 100644 (file)
@@ -790,7 +790,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 }
 
 /* Caller must hold page table lock. */
-bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
                struct page *zero_page)
 {