]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
nvmet: add buffered I/O support for file backed ns
authorChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Wed, 20 Jun 2018 04:01:41 +0000 (00:01 -0400)
committerChristoph Hellwig <hch@lst.de>
Mon, 23 Jul 2018 07:35:14 +0000 (09:35 +0200)
Add a new "buffered_io" attribute, which disabled direct I/O and thus
enables page cache based caching when enabled.   The attribute can only
be changed when the namespace is disabled as the file has to be reopend
for the change to take effect.

The possibly blocking read/write are deferred to a newly introduced
global workqueue.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/nvmet.h

index d3f3b3ec4d1afaf3d7ed3626f3211ccba54c87e4..fee56b3a23bc7d57cb186ab54fb9b920c6f2e4bd 100644 (file)
@@ -407,11 +407,40 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_ns_, enable);
 
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+       return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct nvmet_ns *ns = to_nvmet_ns(item);
+       bool val;
+
+       if (strtobool(page, &val))
+               return -EINVAL;
+
+       mutex_lock(&ns->subsys->lock);
+       if (ns->enabled) {
+               pr_err("disable ns before setting buffered_io value.\n");
+               mutex_unlock(&ns->subsys->lock);
+               return -EINVAL;
+       }
+
+       ns->buffered_io = val;
+       mutex_unlock(&ns->subsys->lock);
+       return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
 static struct configfs_attribute *nvmet_ns_attrs[] = {
        &nvmet_ns_attr_device_path,
        &nvmet_ns_attr_device_nguid,
        &nvmet_ns_attr_device_uuid,
        &nvmet_ns_attr_enable,
+       &nvmet_ns_attr_buffered_io,
        NULL,
 };
 
index 74d4b785d2daac7d203108f06286221b5337f993..96eafbd419e7364c4d00a509de360ba418613a1d 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "nvmet.h"
 
+struct workqueue_struct *buffered_io_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
@@ -437,6 +438,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
        ns->nsid = nsid;
        ns->subsys = subsys;
        uuid_gen(&ns->uuid);
+       ns->buffered_io = false;
 
        return ns;
 }
@@ -1109,6 +1111,12 @@ static int __init nvmet_init(void)
 {
        int error;
 
+       buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+                       WQ_MEM_RECLAIM, 0);
+       if (!buffered_io_wq) {
+               error = -ENOMEM;
+               goto out;
+       }
        error = nvmet_init_discovery();
        if (error)
                goto out;
@@ -1129,6 +1137,7 @@ static void __exit nvmet_exit(void)
        nvmet_exit_configfs();
        nvmet_exit_discovery();
        ida_destroy(&cntlid_ida);
+       destroy_workqueue(buffered_io_wq);
 
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
index 8c42b3a8c420ab29a0b79f29df5c9e55c1ce73d9..57c660e3245d76ba00b449003bd58a7b6e6a6220 100644 (file)
@@ -16,6 +16,8 @@
 void nvmet_file_ns_disable(struct nvmet_ns *ns)
 {
        if (ns->file) {
+               if (ns->buffered_io)
+                       flush_workqueue(buffered_io_wq);
                mempool_destroy(ns->bvec_pool);
                ns->bvec_pool = NULL;
                kmem_cache_destroy(ns->bvec_cache);
@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
 
 int nvmet_file_ns_enable(struct nvmet_ns *ns)
 {
-       int ret;
+       int flags = O_RDWR | O_LARGEFILE;
        struct kstat stat;
+       int ret;
+
+       if (!ns->buffered_io)
+               flags |= O_DIRECT;
 
-       ns->file = filp_open(ns->device_path,
-                       O_RDWR | O_LARGEFILE | O_DIRECT, 0);
+       ns->file = filp_open(ns->device_path, flags, 0);
        if (IS_ERR(ns->file)) {
                pr_err("failed to open file %s: (%ld)\n",
                                ns->device_path, PTR_ERR(ns->file));
@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
 
        iocb->ki_pos = pos;
        iocb->ki_filp = req->ns->file;
-       iocb->ki_flags = IOCB_DIRECT | ki_flags;
+       iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
 
        ret = call_iter(iocb, &iter);
 
@@ -189,6 +194,19 @@ out:
        nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
 }
 
+static void nvmet_file_buffered_io_work(struct work_struct *w)
+{
+       struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+       nvmet_file_execute_rw(req);
+}
+
+static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+{
+       INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+       queue_work(buffered_io_wq, &req->f.work);
+}
+
 static void nvmet_file_flush_work(struct work_struct *w)
 {
        struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
@@ -280,7 +298,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
        switch (cmd->common.opcode) {
        case nvme_cmd_read:
        case nvme_cmd_write:
-               req->execute = nvmet_file_execute_rw;
+               if (req->ns->buffered_io)
+                       req->execute = nvmet_file_execute_rw_buffered_io;
+               else
+                       req->execute = nvmet_file_execute_rw;
                req->data_len = nvmet_rw_len(req);
                return 0;
        case nvme_cmd_flush:
index 480dfe10fad943e269449111f2372e8a876a0f95..5efb98ec95df8da77dd41599af624c9a51124500 100644 (file)
@@ -65,6 +65,7 @@ struct nvmet_ns {
        u8                      nguid[16];
        uuid_t                  uuid;
 
+       bool                    buffered_io;
        bool                    enabled;
        struct nvmet_subsys     *subsys;
        const char              *device_path;
@@ -269,6 +270,8 @@ struct nvmet_req {
        const struct nvmet_fabrics_ops *ops;
 };
 
+extern struct workqueue_struct *buffered_io_wq;
+
 static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
 {
        req->rsp->status = cpu_to_le16(status << 1);