]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
dm: support REQ_OP_WRITE_ZEROES
authorChristoph Hellwig <hch@lst.de>
Wed, 5 Apr 2017 17:21:05 +0000 (19:21 +0200)
committerJens Axboe <axboe@fb.com>
Sat, 8 Apr 2017 17:25:38 +0000 (11:25 -0600)
Copy & paste from the REQ_OP_WRITE_SAME code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/md/dm-core.h
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-mpath.c
drivers/md/dm-rq.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm.c
include/linux/device-mapper.h

index 136fda3ff9e55d46a2ef686e8655ba50754d5c20..fea5bd52ada8fa6c01cf57fc681746854c635569 100644 (file)
@@ -132,6 +132,7 @@ void dm_init_md_queue(struct mapped_device *md);
 void dm_init_normal_md_queue(struct mapped_device *md);
 int md_in_flight(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
+void disable_write_zeroes(struct mapped_device *md);
 
 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
 {
index b808cbe22678b121ed2f12e3bdd7068f3b8cb27b..3702e502466d37a902c64a74a1f5ad7b516770bb 100644 (file)
@@ -312,9 +312,12 @@ static void do_region(int op, int op_flags, unsigned region,
         */
        if (op == REQ_OP_DISCARD)
                special_cmd_max_sectors = q->limits.max_discard_sectors;
+       else if (op == REQ_OP_WRITE_ZEROES)
+               special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
        else if (op == REQ_OP_WRITE_SAME)
                special_cmd_max_sectors = q->limits.max_write_same_sectors;
-       if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
+       if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
+            op == REQ_OP_WRITE_SAME)  &&
            special_cmd_max_sectors == 0) {
                dec_count(io, region, -EOPNOTSUPP);
                return;
@@ -330,6 +333,7 @@ static void do_region(int op, int op_flags, unsigned region,
                 */
                switch (op) {
                case REQ_OP_DISCARD:
+               case REQ_OP_WRITE_ZEROES:
                        num_bvecs = 0;
                        break;
                case REQ_OP_WRITE_SAME:
@@ -347,7 +351,7 @@ static void do_region(int op, int op_flags, unsigned region,
                bio_set_op_attrs(bio, op, op_flags);
                store_io_and_region_in_bio(bio, io, region);
 
-               if (op == REQ_OP_DISCARD) {
+               if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
                        num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
                        bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
                        remaining -= num_sectors;
index 4788b0b989a9bac661f07a8deb2c7a86a96c8677..e17fd44ceef534352dfd2b9b15d787399bbaf065 100644 (file)
@@ -59,6 +59,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
        ti->num_write_same_bios = 1;
+       ti->num_write_zeroes_bios = 1;
        ti->private = lc;
        return 0;
 
index 7f223dbed49f61d5797d4edba728989c9e435d3d..ab55955ed704904d7cdabfd2fd06e8762fe6474b 100644 (file)
@@ -1103,6 +1103,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
        ti->num_write_same_bios = 1;
+       ti->num_write_zeroes_bios = 1;
        if (m->queue_mode == DM_TYPE_BIO_BASED)
                ti->per_io_data_size = multipath_per_bio_data_size();
        else
index d19af1d21f4c4d29dc97b2d34ea00c180f6884b4..e60f1b6845be12fbb5c99caba22a40776cc0d892 100644 (file)
@@ -298,9 +298,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
                        r = rq_end_io(tio->ti, clone, error, &tio->info);
        }
 
-       if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
-                    !clone->q->limits.max_write_same_sectors))
-               disable_write_same(tio->md);
+       if (unlikely(r == -EREMOTEIO)) {
+               if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                   !clone->q->limits.max_write_same_sectors)
+                       disable_write_same(tio->md);
+               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                   !clone->q->limits.max_write_zeroes_sectors)
+                       disable_write_zeroes(tio->md);
+       }
 
        if (r <= 0)
                /* The target wants to complete the I/O */
index 28193a57bf471e30da0bedd944fe148283584b9c..5ef49c121d9955dfafaf7b4ba30c50f3449c1df0 100644 (file)
@@ -169,6 +169,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->num_flush_bios = stripes;
        ti->num_discard_bios = stripes;
        ti->num_write_same_bios = stripes;
+       ti->num_write_zeroes_bios = stripes;
 
        sc->chunk_size = chunk_size;
        if (chunk_size & (chunk_size - 1))
@@ -293,6 +294,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
        if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
+           unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
            unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
                target_bio_nr = dm_bio_get_target_bio_nr(bio);
                BUG_ON(target_bio_nr >= sc->stripes);
index 3ad16d9c9d5aae956afc7b3c7ab820b4f52de4ed..5cd665c91ead89f3fd82176cae0d94f425973325 100644 (file)
@@ -1533,6 +1533,34 @@ static bool dm_table_supports_write_same(struct dm_table *t)
        return true;
 }
 
+static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
+                                          sector_t start, sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && !q->limits.max_write_zeroes_sectors;
+}
+
+static bool dm_table_supports_write_zeroes(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i = 0;
+
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+
+               if (!ti->num_write_zeroes_bios)
+                       return false;
+
+               if (!ti->type->iterate_devices ||
+                   ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
+                       return false;
+       }
+
+       return true;
+}
+
+
 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
                                  sector_t start, sector_t len, void *data)
 {
@@ -1603,6 +1631,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (!dm_table_supports_write_same(t))
                q->limits.max_write_same_sectors = 0;
+       if (!dm_table_supports_write_zeroes(t))
+               q->limits.max_write_zeroes_sectors = 0;
 
        if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
                queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
index cd93a3b9cecacde1b38b2052666e9c06f5e46966..8bf397729bbd28a964f58fe3bba3ca885bfc08ab 100644 (file)
@@ -824,6 +824,14 @@ void disable_write_same(struct mapped_device *md)
        limits->max_write_same_sectors = 0;
 }
 
+void disable_write_zeroes(struct mapped_device *md)
+{
+       struct queue_limits *limits = dm_get_queue_limits(md);
+
+       /* device doesn't really support WRITE ZEROES, disable it */
+       limits->max_write_zeroes_sectors = 0;
+}
+
 static void clone_endio(struct bio *bio)
 {
        int error = bio->bi_error;
@@ -850,9 +858,14 @@ static void clone_endio(struct bio *bio)
                }
        }
 
-       if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
-                    !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
-               disable_write_same(md);
+       if (unlikely(r == -EREMOTEIO)) {
+               if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+                   !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+                       disable_write_same(md);
+               if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                   !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+                       disable_write_zeroes(md);
+       }
 
        free_tio(tio);
        dec_pending(io, error);
@@ -1201,6 +1214,11 @@ static unsigned get_num_write_same_bios(struct dm_target *ti)
        return ti->num_write_same_bios;
 }
 
+static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
+{
+       return ti->num_write_zeroes_bios;
+}
+
 typedef bool (*is_split_required_fn)(struct dm_target *ti);
 
 static bool is_split_required_for_discard(struct dm_target *ti)
@@ -1255,6 +1273,11 @@ static int __send_write_same(struct clone_info *ci)
        return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
 }
 
+static int __send_write_zeroes(struct clone_info *ci)
+{
+       return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL);
+}
+
 /*
  * Select the correct strategy for processing a non-flush bio.
  */
@@ -1269,6 +1292,8 @@ static int __split_and_process_non_flush(struct clone_info *ci)
                return __send_discard(ci);
        else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
                return __send_write_same(ci);
+       else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+               return __send_write_zeroes(ci);
 
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
index a7e6903866fdc98689bb5189cffd4b8cb7a715fa..3829bee2302a47f3f7772ff377c51e981eb6dfa5 100644 (file)
@@ -254,6 +254,12 @@ struct dm_target {
         */
        unsigned num_write_same_bios;
 
+       /*
+        * The number of WRITE ZEROES bios that will be submitted to the target.
+        * The bio number can be accessed with dm_bio_get_target_bio_nr.
+        */
+       unsigned num_write_zeroes_bios;
+
        /*
         * The minimum number of extra bytes allocated in each io for the
         * target to use.