]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Jun 2016 01:42:59 +0000 (18:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Jun 2016 01:42:59 +0000 (18:42 -0700)
Pull block layer fixes from Jens Axboe:
 "A small collection of fixes for the current series.  This contains:

   - Two fixes for xen-blkfront, from Bob Liu.

   - A bug fix for NVMe, releasing only the specific resources we
     requested.

   - Fix for a debugfs flags entry for nbd, from Josef.

   - Plug fix from Omar, fixing up a case of code being switched between
     two functions.

   - A missing bio_put() for the new discard callers of
     submit_bio_wait(), fixing a regression causing a leak of the bio.
     From Shaun.

   - Improve dirty limit calculation precision in the writeback code,
     fixing a case where setting a limit lower than 1% of memory would
     end up being zero.  From Tejun"

* 'for-linus' of git://git.kernel.dk/linux-block:
  NVMe: Only release requested regions
  xen-blkfront: fix resume issues after a migration
  xen-blkfront: don't call talk_to_blkback when already connected to blkback
  nbd: pass the nbd pointer for flags debugfs
  block: missing bio_put following submit_bio_wait
  blk-mq: really fix plug list flushing for nomerge queues
  writeback: use higher precision calculation in domain_dirty_limits()

block/blk-lib.c
block/blk-mq.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/nvme/host/pci.c
mm/page-writeback.c

index 23d7f301a1967483ec79a383a1a317881caf3358..9e29dc35169560a223ae7eea9dffc80da15fc7f9 100644 (file)
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                ret = submit_bio_wait(type, bio);
                if (ret == -EOPNOTSUPP)
                        ret = 0;
+               bio_put(bio);
        }
        blk_finish_plug(&plug);
 
@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
+       if (bio) {
                ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+               bio_put(bio);
+       }
        return ret != -EOPNOTSUPP ? ret : 0;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
-               return submit_bio_wait(WRITE, bio);
+       if (bio) {
+               ret = submit_bio_wait(WRITE, bio);
+               bio_put(bio);
+               return ret;
+       }
        return 0;
 }
 
index 29cbc1b5fbdba0ee89874c662ab441b2a817e177..f9b9049b1284cc8adf65c1eed611e4c09d2d6584 100644 (file)
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q)) {
-               if (blk_attempt_plug_merge(q, bio, &request_count,
-                                          &same_queue_rq))
-                       return BLK_QC_T_NONE;
-       } else
-               request_count = blk_plug_queued_count(q);
+       if (!is_flush_fua && !blk_queue_nomerges(q) &&
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+               return BLK_QC_T_NONE;
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return BLK_QC_T_NONE;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return BLK_QC_T_NONE;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
index 31e73a7a40f20eb6de0337c79aade131c4b70df4..6a48ed41963ff9d215665b1423d20567fc50ee06 100644 (file)
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
        debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
        debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
        debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
-       debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+       debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
 
        return 0;
 }
index ca13df8546396cf6a7dd72c3a288f27623681baf..2e6d1e9c3345f190b01f9d9caf8d27e6f5f8b9ce 100644 (file)
@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *qd)
 {
        unsigned long flags;
-       struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+       int qid = hctx->queue_num;
+       struct blkfront_info *info = hctx->queue->queuedata;
+       struct blkfront_ring_info *rinfo = NULL;
 
+       BUG_ON(info->nr_rings <= qid);
+       rinfo = &info->rinfo[qid];
        blk_mq_start_request(qd->rq);
        spin_lock_irqsave(&rinfo->ring_lock, flags);
        if (RING_FULL(&rinfo->ring))
@@ -901,20 +905,9 @@ out_busy:
        return BLK_MQ_RQ_QUEUE_BUSY;
 }
 
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-                           unsigned int index)
-{
-       struct blkfront_info *info = (struct blkfront_info *)data;
-
-       BUG_ON(info->nr_rings <= index);
-       hctx->driver_data = &info->rinfo[index];
-       return 0;
-}
-
 static struct blk_mq_ops blkfront_mq_ops = {
        .queue_rq = blkif_queue_rq,
        .map_queue = blk_mq_map_queue,
-       .init_hctx = blk_mq_init_hctx,
 };
 
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
                return PTR_ERR(rq);
        }
 
+       rq->queuedata = info;
        queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
        if (info->feature_discard) {
@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
                return err;
 
        err = talk_to_blkback(dev, info);
+       if (!err)
+               blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
 
        /*
         * We have to wait for the backend to switch to
@@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               if (dev->state != XenbusStateInitialised) {
+               /*
+                * talk_to_blkback sets state to XenbusStateInitialised
+                * and blkfront_connect sets it to XenbusStateConnected
+                * (if connection went OK).
+                *
+                * If the backend (or toolstack) decides to poke at backend
+                * state (and re-trigger the watch by setting the state repeatedly
+                * to XenbusStateConnected (4)) we need to deal with this.
+                * This is allowed as this is used to communicate to the guest
+                * that the size of disk has changed!
+                */
+               if ((dev->state != XenbusStateInitialised) &&
+                   (dev->state != XenbusStateConnected)) {
                        if (talk_to_blkback(dev, info))
                                break;
                }
+
                blkfront_connect(info);
                break;
 
index 78dca3193ca4ccc61df9784ff7699834c61e08b2..befac5b19490ee91e13dec00c471931005b871b7 100644 (file)
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+       int bars;
+
        if (dev->bar)
                iounmap(dev->bar);
-       pci_release_regions(to_pci_dev(dev->dev));
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       pci_release_selected_regions(pdev, bars);
 }
 
 static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
        return 0;
   release:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, bars);
        return -ENODEV;
 }
 
index b9956fdee8f5d0ed55713eef51e13a6ea42b2e0d..e2481949494c4e6bf4cd0fd0799d178bd2203008 100644 (file)
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
        unsigned long bytes = vm_dirty_bytes;
        unsigned long bg_bytes = dirty_background_bytes;
-       unsigned long ratio = vm_dirty_ratio;
-       unsigned long bg_ratio = dirty_background_ratio;
+       /* convert ratios to per-PAGE_SIZE for higher precision */
+       unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
+       unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
        unsigned long thresh;
        unsigned long bg_thresh;
        struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
                /*
                 * The byte settings can't be applied directly to memcg
                 * domains.  Convert them to ratios by scaling against
-                * globally available memory.
+                * globally available memory.  As the ratios are in
+                * per-PAGE_SIZE, they can be obtained by dividing bytes by
+                * number of pages.
                 */
                if (bytes)
-                       ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
-                                   global_avail, 100UL);
+                       ratio = min(DIV_ROUND_UP(bytes, global_avail),
+                                   PAGE_SIZE);
                if (bg_bytes)
-                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
-                                      global_avail, 100UL);
+                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
+                                      PAGE_SIZE);
                bytes = bg_bytes = 0;
        }
 
        if (bytes)
                thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
        else
-               thresh = (ratio * available_memory) / 100;
+               thresh = (ratio * available_memory) / PAGE_SIZE;
 
        if (bg_bytes)
                bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
        else
-               bg_thresh = (bg_ratio * available_memory) / 100;
+               bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
        if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;