]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
libnvdimm, pmem: Unconditionally deep flush on *sync
authorRoss Zwisler <ross.zwisler@linux.intel.com>
Wed, 6 Jun 2018 16:45:13 +0000 (10:45 -0600)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Mon, 14 Jan 2019 09:28:55 +0000 (09:28 +0000)
BugLink: http://bugs.launchpad.net/bugs/1807469
commit ce7f11a230d5b7165480b96c0cc7a90358b5b5e2 upstream.

Prior to this commit we would only do a "deep flush" (have nvdimm_flush()
write to each of the flush hints for a region) in response to an
msync/fsync/sync call if the nvdimm_has_cache() returned true at the time
we were setting up the request queue.  This happens due to the write cache
value passed in to blk_queue_write_cache(), which then causes the block
layer to send down BIOs with REQ_FUA and REQ_PREFLUSH set.  We do have a
"write_cache" sysfs entry for namespaces, i.e.:

  /sys/bus/nd/devices/pfn0.1/block/pmem0/dax/write_cache

which can be used to control whether or not the kernel thinks a given
namespace has a write cache, but this didn't modify the deep flush behavior
that we set up when the driver was initialized.  Instead, it only modified
whether or not DAX would flush CPU caches via dax_flush() in response to
*sync calls.

Simplify this by making the *sync deep flush always happen, regardless of
the write cache setting of a namespace.  The DAX CPU cache flushing will
still be controlled the write_cache setting of the namespace.

Cc: <stable@vger.kernel.org>
Fixes: 5fdf8e5ba566 ("libnvdimm: re-enable deep flush for pmem devices via fsync()")
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/nvdimm/pmem.c

index ad6bea6c3e26b4dc872c3b7f8637c7eced694eb5..8309187005a6542068e24e0b3d2498b0607fe0c9 100644 (file)
@@ -300,7 +300,7 @@ static int pmem_attach_disk(struct device *dev,
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        struct nd_region *nd_region = to_nd_region(dev->parent);
        struct vmem_altmap __altmap, *altmap = NULL;
-       int nid = dev_to_node(dev), fua, wbc;
+       int nid = dev_to_node(dev), fua;
        struct resource *res = &nsio->res;
        struct nd_pfn *nd_pfn = NULL;
        struct dax_device *dax_dev;
@@ -335,7 +335,6 @@ static int pmem_attach_disk(struct device *dev,
                dev_warn(dev, "unable to guarantee persistence of writes\n");
                fua = 0;
        }
-       wbc = nvdimm_has_cache(nd_region);
 
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                                dev_name(&ndns->dev))) {
@@ -379,7 +378,7 @@ static int pmem_attach_disk(struct device *dev,
                return PTR_ERR(addr);
        pmem->virt_addr = addr;
 
-       blk_queue_write_cache(q, wbc, fua);
+       blk_queue_write_cache(q, true, fua);
        blk_queue_make_request(q, pmem_make_request);
        blk_queue_physical_block_size(q, PAGE_SIZE);
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
@@ -410,7 +409,7 @@ static int pmem_attach_disk(struct device *dev,
                put_disk(disk);
                return -ENOMEM;
        }
-       dax_write_cache(dax_dev, wbc);
+       dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
        pmem->dax_dev = dax_dev;
 
        gendev = disk_to_dev(disk);