]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
libnvdimm, pmem: disable dax flushing when pmem is fronting a volatile region
authorDan Williams <dan.j.williams@intel.com>
Fri, 9 Jun 2017 16:46:50 +0000 (09:46 -0700)
committerDan Williams <dan.j.williams@intel.com>
Thu, 29 Jun 2017 16:29:50 +0000 (09:29 -0700)
The pmem driver attaches to both persistent and volatile memory ranges
advertised by the ACPI NFIT. When the region is volatile it is redundant
to spend cycles flushing caches at fsync(). Check if the hosting region
is volatile and do not set dax_write_cache() if it is.

Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/nvdimm/pmem.c
drivers/nvdimm/region_devs.c
include/linux/libnvdimm.h

index 7339d184070e242ad8e919cb6ea0edfacba2961f..e7a40f77f7299379b1b96388bbf74d21b4a5e316 100644 (file)
@@ -284,10 +284,10 @@ static int pmem_attach_disk(struct device *dev,
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        struct nd_region *nd_region = to_nd_region(dev->parent);
        struct vmem_altmap __altmap, *altmap = NULL;
+       int nid = dev_to_node(dev), fua, wbc;
        struct resource *res = &nsio->res;
        struct nd_pfn *nd_pfn = NULL;
        struct dax_device *dax_dev;
-       int nid = dev_to_node(dev);
        struct nd_pfn_sb *pfn_sb;
        struct pmem_device *pmem;
        struct resource pfn_res;
@@ -314,9 +314,12 @@ static int pmem_attach_disk(struct device *dev,
        dev_set_drvdata(dev, pmem);
        pmem->phys_addr = res->start;
        pmem->size = resource_size(res);
-       if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE)
-                       || nvdimm_has_flush(nd_region) < 0)
+       fua = nvdimm_has_flush(nd_region);
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
                dev_warn(dev, "unable to guarantee persistence of writes\n");
+               fua = 0;
+       }
+       wbc = nvdimm_has_cache(nd_region);
 
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                                dev_name(&ndns->dev))) {
@@ -360,7 +363,7 @@ static int pmem_attach_disk(struct device *dev,
                return PTR_ERR(addr);
        pmem->virt_addr = addr;
 
-       blk_queue_write_cache(q, true, true);
+       blk_queue_write_cache(q, wbc, fua);
        blk_queue_make_request(q, pmem_make_request);
        blk_queue_physical_block_size(q, PAGE_SIZE);
        blk_queue_max_hw_sectors(q, UINT_MAX);
@@ -390,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
                put_disk(disk);
                return -ENOMEM;
        }
-       dax_write_cache(dax_dev, true);
+       dax_write_cache(dax_dev, wbc);
        pmem->dax_dev = dax_dev;
 
        gendev = disk_to_dev(disk);
index 53a64a16aba448bc6ad29def9c8224d36eeb7aef..0c3b089b280a9d8cf114381fdd80e0b0249599b7 100644 (file)
@@ -1060,6 +1060,12 @@ int nvdimm_has_flush(struct nd_region *nd_region)
 }
 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
 
+int nvdimm_has_cache(struct nd_region *nd_region)
+{
+       return is_nd_pmem(&nd_region->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+
 void __exit nd_region_devs_exit(void)
 {
        ida_destroy(&region_ida);
index b2f659bd661de8e57661a3d71098b320280a7866..a8ee1d0afd701088df79acc241af5aedb1de8673 100644 (file)
@@ -165,4 +165,5 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
 u64 nd_fletcher64(void *addr, size_t len, bool le);
 void nvdimm_flush(struct nd_region *nd_region);
 int nvdimm_has_flush(struct nd_region *nd_region);
+int nvdimm_has_cache(struct nd_region *nd_region);
 #endif /* __LIBNVDIMM_H__ */