]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
device-dax: set mapping prior to vmf_insert_pfn{,_pmd,pud}()
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 14 Jan 2022 22:04:40 +0000 (14:04 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Jan 2022 14:30:26 +0000 (16:30 +0200)
Normally, the @page mapping is set prior to inserting the page into a
page table entry.  Make device-dax adhere to the same ordering, rather
than setting mapping after the PTE is inserted.

The address_space never changes and it is always associated with the
same inode and underlying pages.  So, the page mapping is set once but
cleared when the struct pages are removed/freed (i.e.  after
{devm_}memunmap_pages()).

Link: https://lkml.kernel.org/r/20211202204422.26777-10-joao.m.martins@oracle.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/dax/device.c

index 9c87927d4bc2834b6568d634a02dcf957e5655f7..19a6b86486ce4012b088a58dcba6245e161e6613 100644 (file)
@@ -121,6 +121,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
 
        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
 
+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
 }
 
@@ -161,6 +163,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
 
        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
 
+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }
 
@@ -203,6 +207,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
 
        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
 
+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }
 #else
@@ -217,7 +223,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size)
 {
        struct file *filp = vmf->vma->vm_file;
-       unsigned long fault_size;
        vm_fault_t rc = VM_FAULT_SIGBUS;
        int id;
        pfn_t pfn;
@@ -230,23 +235,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
        id = dax_read_lock();
        switch (pe_size) {
        case PE_SIZE_PTE:
-               fault_size = PAGE_SIZE;
                rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
                break;
        case PE_SIZE_PMD:
-               fault_size = PMD_SIZE;
                rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
                break;
        case PE_SIZE_PUD:
-               fault_size = PUD_SIZE;
                rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
                break;
        default:
                rc = VM_FAULT_SIGBUS;
        }
 
-       if (rc == VM_FAULT_NOPAGE)
-               dax_set_mapping(vmf, pfn, fault_size);
        dax_read_unlock(id);
 
        return rc;