]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
md/raid5: Refactor add_stripe_bio()
authorLogan Gunthorpe <logang@deltatee.com>
Thu, 16 Jun 2022 19:19:41 +0000 (13:19 -0600)
committerJens Axboe <axboe@kernel.dk>
Tue, 2 Aug 2022 23:14:42 +0000 (17:14 -0600)
Factor out two helper functions from add_stripe_bio(): one to check for
overlap (stripe_bio_overlaps()), and one to actually add the bio to the
stripe (__add_stripe_bio()). The latter function will always succeed.

This will be useful in the next patch so that overlap can be checked for
multiple disks before adding any

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Song Liu <song@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/raid5.c

index 200dc64fa3dc6ddd1ce16189a7ab7033e55bb0a4..f243043274eacbac411bede21bab20b04a7c2c7b 100644 (file)
@@ -3416,39 +3416,32 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                s->locked, s->ops_request);
 }
 
-/*
- * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
- * The bi_next chain must be in order.
- */
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
-                         int forwrite, int previous)
+static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
+                               int dd_idx, int forwrite)
 {
-       struct bio **bip;
        struct r5conf *conf = sh->raid_conf;
-       int firstwrite=0;
+       struct bio **bip;
 
-       pr_debug("adding bi b#%llu to stripe s#%llu\n",
-               (unsigned long long)bi->bi_iter.bi_sector,
-               (unsigned long long)sh->sector);
+       pr_debug("checking bi b#%llu to stripe s#%llu\n",
+                bi->bi_iter.bi_sector, sh->sector);
 
-       spin_lock_irq(&sh->stripe_lock);
        /* Don't allow new IO added to stripes in batch list */
        if (sh->batch_head)
-               goto overlap;
-       if (forwrite) {
+               return true;
+
+       if (forwrite)
                bip = &sh->dev[dd_idx].towrite;
-               if (*bip == NULL)
-                       firstwrite = 1;
-       } else
+       else
                bip = &sh->dev[dd_idx].toread;
+
        while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
                if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
-                       goto overlap;
-               bip = & (*bip)->bi_next;
+                       return true;
+               bip = &(*bip)->bi_next;
        }
+
        if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
-               goto overlap;
+               return true;
 
        if (forwrite && raid5_has_ppl(conf)) {
                /*
@@ -3477,9 +3470,30 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
                }
 
                if (first + conf->chunk_sectors * (count - 1) != last)
-                       goto overlap;
+                       return true;
        }
 
+       return false;
+}
+
+static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+                            int dd_idx, int forwrite, int previous)
+{
+       struct r5conf *conf = sh->raid_conf;
+       struct bio **bip;
+       int firstwrite = 0;
+
+       if (forwrite) {
+               bip = &sh->dev[dd_idx].towrite;
+               if (!*bip)
+                       firstwrite = 1;
+       } else {
+               bip = &sh->dev[dd_idx].toread;
+       }
+
+       while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
+               bip = &(*bip)->bi_next;
+
        if (!forwrite || previous)
                clear_bit(STRIPE_BATCH_READY, &sh->state);
 
@@ -3506,8 +3520,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
        }
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-               (unsigned long long)(*bip)->bi_iter.bi_sector,
-               (unsigned long long)sh->sector, dd_idx);
+                (*bip)->bi_iter.bi_sector, sh->sector, dd_idx);
 
        if (conf->mddev->bitmap && firstwrite) {
                /* Cannot hold spinlock over bitmap_startwrite,
@@ -3515,7 +3528,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
                 * we have added to the bitmap and set bm_seq.
                 * So set STRIPE_BITMAP_PENDING to prevent
                 * batching.
-                * If multiple add_stripe_bio() calls race here they
+                * If multiple __add_stripe_bio() calls race here they
                 * much all set STRIPE_BITMAP_PENDING.  So only the first one
                 * to complete "bitmap_startwrite" gets to set
                 * STRIPE_BIT_DELAY.  This is important as once a stripe
@@ -3533,14 +3546,27 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
                        set_bit(STRIPE_BIT_DELAY, &sh->state);
                }
        }
-       spin_unlock_irq(&sh->stripe_lock);
+}
 
-       return 1;
+/*
+ * Each stripe/dev can have one or more bios attached.
+ * toread/towrite point to the first in a chain.
+ * The bi_next chain must be in order.
+ */
+static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+                          int dd_idx, int forwrite, int previous)
+{
+       spin_lock_irq(&sh->stripe_lock);
 
- overlap:
-       set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+       if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+               set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+               spin_unlock_irq(&sh->stripe_lock);
+               return false;
+       }
+
+       __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
        spin_unlock_irq(&sh->stripe_lock);
-       return 0;
+       return true;
 }
 
 static void end_reshape(struct r5conf *conf);