]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-lib.c
blk-mq: move hctx lock/unlock into a helper
[mirror_ubuntu-bionic-kernel.git] / block / blk-lib.c
index 63fb971d65745ac0621c69b6bc22ad5b0b76dd84..012425f5bd8c5e01e22e1cd2a146221f4e25a260 100644 (file)
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 {
        struct request_queue *q = bdev_get_queue(bdev);
        struct bio *bio = *biop;
-       unsigned int granularity;
        unsigned int op;
-       int alignment;
        sector_t bs_mask;
 
        if (!q)
@@ -51,30 +49,15 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       /* Zero-sector (unknown) and one-sector granularities are the same.  */
-       granularity = max(q->limits.discard_granularity >> 9, 1U);
-       alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
        while (nr_sects) {
-               unsigned int req_sects;
-               sector_t end_sect, tmp;
+               unsigned int req_sects = nr_sects;
+               sector_t end_sect;
 
-               /* Make sure bi_size doesn't overflow */
-               req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+               req_sects = min(req_sects, bio_allowed_max_sectors(q));
+               if (!req_sects)
+                       goto fail;
 
-               /**
-                * If splitting a request, and the next starting sector would be
-                * misaligned, stop the discard at the previous aligned sector.
-                */
                end_sect = sector + req_sects;
-               tmp = end_sect;
-               if (req_sects < nr_sects &&
-                   sector_div(tmp, granularity) != alignment) {
-                       end_sect = end_sect - alignment;
-                       sector_div(end_sect, granularity);
-                       end_sect = end_sect * granularity + alignment;
-                       req_sects = end_sect - sector;
-               }
 
                bio = next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
@@ -96,6 +79,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        *biop = bio;
        return 0;
+
+fail:
+       if (bio) {
+               submit_bio_wait(bio);
+               bio_put(bio);
+       }
+       *biop = NULL;
+       return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(__blkdev_issue_discard);
 
@@ -164,7 +155,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                return -EOPNOTSUPP;
 
        /* Ensure that max_write_same_sectors doesn't overflow bi_size */
-       max_write_same_sectors = UINT_MAX >> 9;
+       max_write_same_sectors = bio_allowed_max_sectors(q);
 
        while (nr_sects) {
                bio = next_bio(bio, 1, gfp_mask);
@@ -275,6 +266,40 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
        return min(pages, (sector_t)BIO_MAX_PAGES);
 }
 
+static int __blkdev_issue_zero_pages(struct block_device *bdev,
+               sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+               struct bio **biop)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+       struct bio *bio = *biop;
+       int bi_size = 0;
+       unsigned int sz;
+
+       if (!q)
+               return -ENXIO;
+
+       while (nr_sects != 0) {
+               bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
+                              gfp_mask);
+               bio->bi_iter.bi_sector = sector;
+               bio_set_dev(bio, bdev);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+               while (nr_sects != 0) {
+                       sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
+                       bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
+                       nr_sects -= bi_size >> 9;
+                       sector += bi_size >> 9;
+                       if (bi_size < sz)
+                               break;
+               }
+               cond_resched();
+       }
+
+       *biop = bio;
+       return 0;
+}
+
 /**
  * __blkdev_issue_zeroout - generate number of zero filed write bios
  * @bdev:      blockdev to issue
@@ -288,12 +313,6 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
  *  Zero-fill a block range, either using hardware offload or by explicitly
  *  writing zeroes to the device.
  *
- *  Note that this function may fail with -EOPNOTSUPP if the driver signals
- *  zeroing offload support, but the device fails to process the command (for
- *  some devices there is no non-destructive way to verify whether this
- *  operation is actually supported).  In this case the caller should call
- *  retry the call to blkdev_issue_zeroout() and the fallback path will be used.
- *
  *  If a device is using logical block provisioning, the underlying space will
  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
  *
@@ -305,9 +324,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                unsigned flags)
 {
        int ret;
-       int bi_size = 0;
-       struct bio *bio = *biop;
-       unsigned int sz;
        sector_t bs_mask;
 
        bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
@@ -317,30 +333,10 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
                        biop, flags);
        if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
-               goto out;
-
-       ret = 0;
-       while (nr_sects != 0) {
-               bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
-                              gfp_mask);
-               bio->bi_iter.bi_sector = sector;
-               bio_set_dev(bio, bdev);
-               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
-               while (nr_sects != 0) {
-                       sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
-                       bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
-                       nr_sects -= bi_size >> 9;
-                       sector += bi_size >> 9;
-                       if (bi_size < sz)
-                               break;
-               }
-               cond_resched();
-       }
+               return ret;
 
-       *biop = bio;
-out:
-       return ret;
+       return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
+                                        biop);
 }
 EXPORT_SYMBOL(__blkdev_issue_zeroout);
 
@@ -360,18 +356,49 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout);
 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
 {
-       int ret;
-       struct bio *bio = NULL;
+       int ret = 0;
+       sector_t bs_mask;
+       struct bio *bio;
        struct blk_plug plug;
+       bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
 
+       bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+       if ((sector | nr_sects) & bs_mask)
+               return -EINVAL;
+
+retry:
+       bio = NULL;
        blk_start_plug(&plug);
-       ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
-                       &bio, flags);
+       if (try_write_zeroes) {
+               ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
+                                                 gfp_mask, &bio, flags);
+       } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
+               ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
+                                               gfp_mask, &bio);
+       } else {
+               /* No zeroing offload support */
+               ret = -EOPNOTSUPP;
+       }
        if (ret == 0 && bio) {
                ret = submit_bio_wait(bio);
                bio_put(bio);
        }
        blk_finish_plug(&plug);
+       if (ret && try_write_zeroes) {
+               if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
+                       try_write_zeroes = false;
+                       goto retry;
+               }
+               if (!bdev_write_zeroes_sectors(bdev)) {
+                       /*
+                        * Zeroing offload support was indicated, but the
+                        * device reported ILLEGAL REQUEST (for some devices
+                        * there is no non-destructive way to verify whether
+                        * WRITE ZEROES is actually supported).
+                        */
+                       ret = -EOPNOTSUPP;
+               }
+       }
 
        return ret;
 }