]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/md/raid5.c
md: don't use flush_signals in userspace processes
[mirror_ubuntu-bionic-kernel.git] / drivers / md / raid5.c
index 9c4f7659f8b1337c99cfd0ab5070012e3f658849..547d5fa45a42d475721df159c261911ca344be66 100644 (file)
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                sh->check_state = check_state_compute_run;
                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
                        }
                } else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                int *target = &sh->ops.target;
 
                                sh->ops.target = -1;
@@ -5469,7 +5479,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
        last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
        bi->bi_next = NULL;
-       md_write_start(mddev, bi);
 
        stripe_sectors = conf->chunk_sectors *
                (conf->raid_disks - conf->max_degraded);
@@ -5539,11 +5548,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                release_stripe_plug(mddev, sh);
        }
 
-       md_write_end(mddev);
        bio_endio(bi);
 }
 
-static void raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
 {
        struct r5conf *conf = mddev->private;
        int dd_idx;
@@ -5559,10 +5567,10 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                int ret = r5l_handle_flush_request(conf->log, bi);
 
                if (ret == 0)
-                       return;
+                       return true;
                if (ret == -ENODEV) {
                        md_flush_request(mddev, bi);
-                       return;
+                       return true;
                }
                /* ret == -EAGAIN, fallback */
                /*
@@ -5572,6 +5580,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                do_flush = bi->bi_opf & REQ_PREFLUSH;
        }
 
+       if (!md_write_start(mddev, bi))
+               return false;
        /*
         * If array is degraded, better not do chunk aligned read because
         * later we might have to read it again in order to reconstruct
@@ -5581,18 +5591,18 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
            mddev->reshape_position == MaxSector) {
                bi = chunk_aligned_read(mddev, bi);
                if (!bi)
-                       return;
+                       return true;
        }
 
        if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
                make_discard_request(mddev, bi);
-               return;
+               md_write_end(mddev);
+               return true;
        }
 
        logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
-       md_write_start(mddev, bi);
 
        prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
@@ -5683,12 +5693,15 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                                 * userspace, we want an interruptible
                                 * wait.
                                 */
-                               flush_signals(current);
                                prepare_to_wait(&conf->wait_for_overlap,
                                                &w, TASK_INTERRUPTIBLE);
                                if (logical_sector >= mddev->suspend_lo &&
                                    logical_sector < mddev->suspend_hi) {
+                                       sigset_t full, old;
+                                       sigfillset(&full);
+                                       sigprocmask(SIG_BLOCK, &full, &old);
                                        schedule();
+                                       sigprocmask(SIG_SETMASK, &old, NULL);
                                        do_prepare = true;
                                }
                                goto retry;
@@ -5730,6 +5743,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        if (rw == WRITE)
                md_write_end(mddev);
        bio_endio(bi);
+       return true;
 }
 
 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
@@ -7108,6 +7122,9 @@ static int raid5_run(struct mddev *mddev)
        long long min_offset_diff = 0;
        int first = 1;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->recovery_cp != MaxSector)
                pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
                          mdname(mddev));