]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge tag 'for-linus-20180309' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Mar 2018 16:48:01 +0000 (08:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Mar 2018 16:48:01 +0000 (08:48 -0800)
Pull block fixes from Jens Axboe:

 - a xen-blkfront fix from Bhavesh with a multiqueue fix when
   detaching/re-attaching

 - a few important NVMe fixes, including a revert for a sysfs fix that
   caused some user space confusion

 - two bcache fixes by way of Michael Lyle

 - a loop regression fix, fixing an issue with lost writes on DAX.

* tag 'for-linus-20180309' of git://git.kernel.dk/linux-block:
  loop: Fix lost writes caused by missing flag
  nvme_fc: rework sqsize handling
  nvme-fabrics: Ignore nr_io_queues option for discovery controllers
  xen-blkfront: move negotiate_mq to cover all cases of new VBDs
  Revert "nvme: create 'slaves' and 'holders' entries for hidden controllers"
  bcache: don't attach backing with duplicate UUID
  bcache: fix crashes in duplicate cache device register
  nvme: pci: pass max vectors as num_possible_cpus() to pci_alloc_irq_vectors
  nvme-pci: Fix EEH failure on ppc

drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/md/bcache/super.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c

index 87855b5123a6307cb014f78af2ae07b43ef77cf4..ee62d2d517bf4537c60cdd83c70363382403fc57 100644 (file)
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
        struct iov_iter i;
        ssize_t bw;
 
-       iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+       iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
 
        file_start_write(file);
        bw = vfs_iter_write(file, &i, ppos, 0);
index e126e4cac2ca499566da91a6e3da01d0b1e4381e..92ec1bbece51d31c44f88eb6a2037333dd7a9f40 100644 (file)
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
 
 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
 static void blkfront_gather_backend_features(struct blkfront_info *info);
+static int negotiate_mq(struct blkfront_info *info);
 
 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
 {
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
        unsigned int i, max_page_order;
        unsigned int ring_page_order;
 
+       if (!info)
+               return -ENODEV;
+
        max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
                                              "max-ring-page-order", 0);
        ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
        info->nr_ring_pages = 1 << ring_page_order;
 
+       err = negotiate_mq(info);
+       if (err)
+               goto destroy_blkring;
+
        for (i = 0; i < info->nr_rings; i++) {
                struct blkfront_ring_info *rinfo = &info->rinfo[i];
 
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
        }
 
        info->xbdev = dev;
-       err = negotiate_mq(info);
-       if (err) {
-               kfree(info);
-               return err;
-       }
 
        mutex_init(&info->mutex);
        info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
 
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
-       err = negotiate_mq(info);
-       if (err)
-               return err;
-
        err = talk_to_blkback(dev, info);
        if (!err)
                blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
index 4d1d8dfb2d2a4d1de7d6ee59e2b8d802a68827e5..f2273143b3cb2384109ba47bf3518adf31337ad8 100644 (file)
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        uint32_t rtime = cpu_to_le32(get_seconds());
        struct uuid_entry *u;
        char buf[BDEVNAME_SIZE];
+       struct cached_dev *exist_dc, *t;
 
        bdevname(dc->bdev, buf);
 
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
                return -EINVAL;
        }
 
+       /* Check whether already attached */
+       list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+               if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+                       pr_err("Tried to attach %s but duplicate UUID already attached",
+                               buf);
+
+                       return -EINVAL;
+               }
+       }
+
        u = uuid_find(c, dc->sb.uuid);
 
        if (u &&
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
 
        return;
 err:
-       pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+       pr_notice("error %s: %s", bdevname(bdev, name), err);
        bcache_device_stop(&dc->disk);
 }
 
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
        const char *err = NULL; /* must be set for any error case */
        int ret = 0;
 
+       bdevname(bdev, name);
+
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
        ca->bdev = bdev;
        ca->bdev->bd_holder = ca;
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
        bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
        get_page(sb_page);
 
-       if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+       if (blk_queue_discard(bdev_get_queue(bdev)))
                ca->discard = CACHE_DISCARD(&ca->sb);
 
        ret = cache_alloc(ca);
        if (ret != 0) {
+               blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
                if (ret == -ENOMEM)
                        err = "cache_alloc(): -ENOMEM";
                else
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                goto out;
        }
 
-       pr_info("registered cache device %s", bdevname(bdev, name));
+       pr_info("registered cache device %s", name);
 
 out:
        kobject_put(&ca->kobj);
 
 err:
        if (err)
-               pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+               pr_notice("error %s: %s", name, err);
 
        return ret;
 }
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
        if (err)
                goto err_close;
 
+       err = "failed to register device";
        if (SB_IS_BDEV(sb)) {
                struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
                if (!dc)
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                        goto err_close;
 
                if (register_cache(sb, sb_page, bdev, ca) != 0)
-                       goto err_close;
+                       goto err;
        }
 out:
        if (sb_page)
@@ -2041,7 +2056,7 @@ out:
 err_close:
        blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 err:
-       pr_info("error opening %s: %s", path, err);
+       pr_info("error %s: %s", path, err);
        ret = -EINVAL;
        goto out;
 }
index 817e5e2766da36b7312e6992952b073ae2e0111a..7aeca5db791613f345f733513f2558d4de14e2ae 100644 (file)
@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
                        ns->disk->disk_name);
 
        nvme_mpath_add_disk(ns->head);
-       nvme_mpath_add_disk_links(ns);
        return;
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
                if (ns->ndev)
index a1c58e35075e9e180cae240d6951c807bb7a4a82..8f0f34d06d46965168e4472ea2f9f5b5daca6a20 100644 (file)
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -EINVAL;
                                goto out;
                        }
+                       if (opts->discovery_nqn) {
+                               pr_debug("Ignoring nr_io_queues value for discovery controller\n");
+                               break;
+                       }
+
                        opts->nr_io_queues = min_t(unsigned int,
                                        num_online_cpus(), token);
                        break;
index 7f51f8414b97238e647ef37f13942755e4b83a16..1dc1387b71342e67bb0f6848104e2ee4ab901661 100644 (file)
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
                                sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
 
        assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
-       assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+       assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
        /* Linux supports only Dynamic controllers */
        assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
        uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                                sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
        conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
        conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
-       conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+       conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
 
        lsop->queue = queue;
        lsreq->rqstaddr = conn_rqst;
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
                goto out_free_tag_set;
        }
 
-       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_cleanup_blk_queue;
 
-       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_delete_hw_queues;
 
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_free_io_queues;
 
-       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_free_io_queues;
 
-       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_delete_hw_queues;
 
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        nvme_fc_init_queue(ctrl, 0);
 
        ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
-                               NVME_AQ_BLK_MQ_DEPTH);
+                               NVME_AQ_DEPTH);
        if (ret)
                goto out_free_queue;
 
        ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
-                               NVME_AQ_BLK_MQ_DEPTH,
-                               (NVME_AQ_BLK_MQ_DEPTH / 4));
+                               NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
        if (ret)
                goto out_delete_hw_queue;
 
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
 
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
        if (ret)
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
                opts->queue_size = ctrl->ctrl.maxcmd;
        }
 
+       if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+               /* warn if sqsize is lower than queue_size */
+               dev_warn(ctrl->ctrl.device,
+                       "queue_size %zu > ctrl sqsize %u, clamping down\n",
+                       opts->queue_size, ctrl->ctrl.sqsize + 1);
+               opts->queue_size = ctrl->ctrl.sqsize + 1;
+       }
+
        ret = nvme_fc_init_aen_ops(ctrl);
        if (ret)
                goto out_term_aen_ops;
index b7e5c6db4d92fe61313fdb9120cef307204bfd70..060f69e0342761c4767c90afc09a73a10546e1dd 100644 (file)
@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
        mutex_unlock(&head->subsys->lock);
 }
 
-void nvme_mpath_add_disk_links(struct nvme_ns *ns)
-{
-       struct kobject *slave_disk_kobj, *holder_disk_kobj;
-
-       if (!ns->head->disk)
-               return;
-
-       slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
-       if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
-                       kobject_name(slave_disk_kobj)))
-               return;
-
-       holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
-       if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
-                       kobject_name(holder_disk_kobj)))
-               sysfs_remove_link(ns->head->disk->slave_dir,
-                       kobject_name(slave_disk_kobj));
-}
-
 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
        blk_cleanup_queue(head->disk->queue);
        put_disk(head->disk);
 }
-
-void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
-{
-       if (!ns->head->disk)
-               return;
-
-       sysfs_remove_link(ns->disk->part0.holder_dir,
-                       kobject_name(&disk_to_dev(ns->head->disk)->kobj));
-       sysfs_remove_link(ns->head->disk->slave_dir,
-                       kobject_name(&disk_to_dev(ns->disk)->kobj));
-}
index 0521e4707d1cfe193a2193d48e1332e174087e3a..d733b14ede9dc10022e0ae14da8cb4550c46831b 100644 (file)
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
 void nvme_mpath_add_disk(struct nvme_ns_head *head);
-void nvme_mpath_add_disk_links(struct nvme_ns *ns);
 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
-void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
 
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
 }
-static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
-{
-}
-static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
-{
-}
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
 }
index 5933a5c732e8349731766eeb7b7c0b0b9b6bea6c..b6f43b738f03ae3b6008cd188d467623c1e70ecf 100644 (file)
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        if (!(csts & NVME_CSTS_CFS) && !nssro)
                return false;
 
-       /* If PCI error recovery process is happening, we cannot reset or
-        * the recovery mechanism will surely fail.
-        */
-       if (pci_channel_offline(to_pci_dev(dev->dev)))
-               return false;
-
        return true;
 }
 
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_command cmd;
        u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
+       /* If PCI error recovery process is happening, we cannot reset or
+        * the recovery mechanism will surely fail.
+        */
+       mb();
+       if (pci_channel_offline(to_pci_dev(dev->dev)))
+               return BLK_EH_RESET_TIMER;
+
        /*
         * Reset immediately if the controller is failed
         */
@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        int result, nr_io_queues;
        unsigned long size;
 
-       nr_io_queues = num_present_cpus();
+       nr_io_queues = num_possible_cpus();
        result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
        if (result < 0)
                return result;