]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/md/raid1.c
md/raid1: store behind-write pages in bi_vecs.
[mirror_ubuntu-artful-kernel.git] / drivers / md / raid1.c
index f7431b6d8447df5979c0b80a5ded841f61a16cc1..b16d2ee5e9dd72d470fe12268ff1778655289b10 100644 (file)
 #include <linux/delay.h>
 #include <linux/blkdev.h>
 #include <linux/seq_file.h>
+#include <linux/ratelimit.h>
 #include "md.h"
 #include "raid1.h"
 #include "bitmap.h"
 
 #define DEBUG 0
-#if DEBUG
-#define PRINTK(x...) printk(x)
-#else
-#define PRINTK(x...)
-#endif
+#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
 
 /*
  * Number of guaranteed r1bios in case of extreme VM load:
@@ -166,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
 
        for (i = 0; i < conf->raid_disks; i++) {
                struct bio **bio = r1_bio->bios + i;
-               if (*bio && *bio != IO_BLOCKED)
+               if (!BIO_SPECIAL(*bio))
                        bio_put(*bio);
                *bio = NULL;
        }
@@ -176,12 +173,6 @@ static void free_r1bio(r1bio_t *r1_bio)
 {
        conf_t *conf = r1_bio->mddev->private;
 
-       /*
-        * Wake up any possible resync thread that waits for the device
-        * to go idle.
-        */
-       allow_barrier(conf);
-
        put_all_bios(conf, r1_bio);
        mempool_free(r1_bio, conf->r1bio_pool);
 }
@@ -222,6 +213,33 @@ static void reschedule_retry(r1bio_t *r1_bio)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
+static void call_bio_endio(r1bio_t *r1_bio)
+{
+       struct bio *bio = r1_bio->master_bio;
+       int done;
+       conf_t *conf = r1_bio->mddev->private;
+
+       if (bio->bi_phys_segments) {
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               bio->bi_phys_segments--;
+               done = (bio->bi_phys_segments == 0);
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+       } else
+               done = 1;
+
+       if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       if (done) {
+               bio_endio(bio, 0);
+               /*
+                * Wake up any possible resync thread that waits for the device
+                * to go idle.
+                */
+               allow_barrier(conf);
+       }
+}
+
 static void raid_end_bio_io(r1bio_t *r1_bio)
 {
        struct bio *bio = r1_bio->master_bio;
@@ -234,8 +252,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
                        (unsigned long long) bio->bi_sector +
                                (bio->bi_size >> 9) - 1);
 
-               bio_endio(bio,
-                       test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
+               call_bio_endio(r1_bio);
        }
        free_r1bio(r1_bio);
 }
@@ -287,10 +304,14 @@ static void raid1_end_read_request(struct bio *bio, int error)
                 * oops, read error:
                 */
                char b[BDEVNAME_SIZE];
-               if (printk_ratelimit())
-                       printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
-                              mdname(conf->mddev),
-                              bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
+               printk_ratelimited(
+                       KERN_ERR "md/raid1:%s: %s: "
+                       "rescheduling sector %llu\n",
+                       mdname(conf->mddev),
+                       bdevname(conf->mirrors[mirror].rdev->bdev,
+                                b),
+                       (unsigned long long)r1_bio->sector);
+               set_bit(R1BIO_ReadError, &r1_bio->state);
                reschedule_retry(r1_bio);
        }
 
@@ -306,9 +327,9 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
                        /* free extra copy of the data pages */
                        int i = r1_bio->behind_page_count;
                        while (i--)
-                               safe_put_page(r1_bio->behind_pages[i]);
-                       kfree(r1_bio->behind_pages);
-                       r1_bio->behind_pages = NULL;
+                               safe_put_page(r1_bio->behind_bvecs[i].bv_page);
+                       kfree(r1_bio->behind_bvecs);
+                       r1_bio->behind_bvecs = NULL;
                }
                /* clear the bitmap if all writes complete successfully */
                bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
@@ -316,7 +337,10 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
                                !test_bit(R1BIO_Degraded, &r1_bio->state),
                                test_bit(R1BIO_BehindIO, &r1_bio->state));
                md_write_end(r1_bio->mddev);
-               raid_end_bio_io(r1_bio);
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else
+                       raid_end_bio_io(r1_bio);
        }
 }
 
@@ -342,7 +366,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
                md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
                /* an I/O failed, we can't clear the bitmap */
                set_bit(R1BIO_Degraded, &r1_bio->state);
-       } else
+       } else {
                /*
                 * Set R1BIO_Uptodate in our master bio, so that we
                 * will return a good error code for to the higher
@@ -353,8 +377,20 @@ static void raid1_end_write_request(struct bio *bio, int error)
                 * to user-side. So if something waits for IO, then it
                 * will wait for the 'master' bio.
                 */
+               sector_t first_bad;
+               int bad_sectors;
+
                set_bit(R1BIO_Uptodate, &r1_bio->state);
 
+               /* Maybe we can clear some bad blocks. */
+               if (is_badblock(conf->mirrors[mirror].rdev,
+                               r1_bio->sector, r1_bio->sectors,
+                               &first_bad, &bad_sectors)) {
+                       r1_bio->bios[mirror] = IO_MADE_GOOD;
+                       set_bit(R1BIO_MadeGood, &r1_bio->state);
+               }
+       }
+
        update_head_pos(mirror, r1_bio);
 
        if (behind) {
@@ -377,11 +413,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
                                       (unsigned long long) mbio->bi_sector,
                                       (unsigned long long) mbio->bi_sector +
                                       (mbio->bi_size >> 9) - 1);
-                               bio_endio(mbio, 0);
+                               call_bio_endio(r1_bio);
                        }
                }
        }
-       rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+       if (r1_bio->bios[mirror] == NULL)
+               rdev_dec_pending(conf->mirrors[mirror].rdev,
+                                conf->mddev);
 
        /*
         * Let's see if all mirrored write operations have finished
@@ -408,10 +446,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
  *
  * The rdev for the device selected will have nr_pending incremented.
  */
-static int read_balance(conf_t *conf, r1bio_t *r1_bio)
+static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
 {
        const sector_t this_sector = r1_bio->sector;
-       const int sectors = r1_bio->sectors;
+       int sectors;
+       int best_good_sectors;
        int start_disk;
        int best_disk;
        int i;
@@ -426,8 +465,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
         * We take the first readable disk when above the resync window.
         */
  retry:
+       sectors = r1_bio->sectors;
        best_disk = -1;
        best_dist = MaxSector;
+       best_good_sectors = 0;
+
        if (conf->mddev->recovery_cp < MaxSector &&
            (this_sector + sectors >= conf->next_resync)) {
                choose_first = 1;
@@ -439,6 +481,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
 
        for (i = 0 ; i < conf->raid_disks ; i++) {
                sector_t dist;
+               sector_t first_bad;
+               int bad_sectors;
+
                int disk = start_disk + i;
                if (disk >= conf->raid_disks)
                        disk -= conf->raid_disks;
@@ -461,6 +506,35 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
                /* This is a reasonable device to use.  It might
                 * even be best.
                 */
+               if (is_badblock(rdev, this_sector, sectors,
+                               &first_bad, &bad_sectors)) {
+                       if (best_dist < MaxSector)
+                               /* already have a better device */
+                               continue;
+                       if (first_bad <= this_sector) {
+                               /* cannot read here. If this is the 'primary'
+                                * device, then we must not read beyond
+                                * bad_sectors from another device..
+                                */
+                               bad_sectors -= (this_sector - first_bad);
+                               if (choose_first && sectors > bad_sectors)
+                                       sectors = bad_sectors;
+                               if (best_good_sectors > sectors)
+                                       best_good_sectors = sectors;
+
+                       } else {
+                               sector_t good_sectors = first_bad - this_sector;
+                               if (good_sectors > best_good_sectors) {
+                                       best_good_sectors = good_sectors;
+                                       best_disk = disk;
+                               }
+                               if (choose_first)
+                                       break;
+                       }
+                       continue;
+               } else
+                       best_good_sectors = sectors;
+
                dist = abs(this_sector - conf->mirrors[disk].head_position);
                if (choose_first
                    /* Don't change to another disk for sequential reads */
@@ -489,10 +563,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
                        rdev_dec_pending(rdev, conf->mddev);
                        goto retry;
                }
+               sectors = best_good_sectors;
                conf->next_seq_sect = this_sector + sectors;
                conf->last_used = best_disk;
        }
        rcu_read_unlock();
+       *max_sectors = sectors;
 
        return best_disk;
 }
@@ -672,30 +748,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
 {
        int i;
        struct bio_vec *bvec;
-       struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
+       struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
                                        GFP_NOIO);
-       if (unlikely(!pages))
+       if (unlikely(!bvecs))
                return;
 
        bio_for_each_segment(bvec, bio, i) {
-               pages[i] = alloc_page(GFP_NOIO);
-               if (unlikely(!pages[i]))
+               bvecs[i] = *bvec;
+               bvecs[i].bv_page = alloc_page(GFP_NOIO);
+               if (unlikely(!bvecs[i].bv_page))
                        goto do_sync_io;
-               memcpy(kmap(pages[i]) + bvec->bv_offset,
-                       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
-               kunmap(pages[i]);
+               memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
+                      kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
+               kunmap(bvecs[i].bv_page);
                kunmap(bvec->bv_page);
        }
-       r1_bio->behind_pages = pages;
+       r1_bio->behind_bvecs = bvecs;
        r1_bio->behind_page_count = bio->bi_vcnt;
        set_bit(R1BIO_BehindIO, &r1_bio->state);
        return;
 
 do_sync_io:
        for (i = 0; i < bio->bi_vcnt; i++)
-               if (pages[i])
-                       put_page(pages[i]);
-       kfree(pages);
+               if (bvecs[i].bv_page)
+                       put_page(bvecs[i].bv_page);
+       kfree(bvecs);
        PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
 }
 
@@ -705,7 +782,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        mirror_info_t *mirror;
        r1bio_t *r1_bio;
        struct bio *read_bio;
-       int i, targets = 0, disks;
+       int i, disks;
        struct bitmap *bitmap;
        unsigned long flags;
        const int rw = bio_data_dir(bio);
@@ -713,6 +790,9 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        mdk_rdev_t *blocked_rdev;
        int plugged;
+       int first_clone;
+       int sectors_handled;
+       int max_sectors;
 
        /*
         * Register the new request and wait if the reconstruction
@@ -759,11 +839,24 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        r1_bio->mddev = mddev;
        r1_bio->sector = bio->bi_sector;
 
+       /* We might need to issue multiple reads to different
+        * devices if there are bad blocks around, so we keep
+        * track of the number of reads in bio->bi_phys_segments.
+        * If this is 0, there is only one r1_bio and no locking
+        * will be needed when requests complete.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
        if (rw == READ) {
                /*
                 * read balancing logic:
                 */
-               int rdisk = read_balance(conf, r1_bio);
+               int rdisk;
+
+read_again:
+               rdisk = read_balance(conf, r1_bio, &max_sectors);
 
                if (rdisk < 0) {
                        /* couldn't find anywhere to read from */
@@ -784,6 +877,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                r1_bio->read_disk = rdisk;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
+                           max_sectors);
 
                r1_bio->bios[rdisk] = read_bio;
 
@@ -793,16 +888,52 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r1_bio;
 
-               generic_make_request(read_bio);
+               if (max_sectors < r1_bio->sectors) {
+                       /* could not read all from this device, so we will
+                        * need another r1_bio.
+                        */
+
+                       sectors_handled = (r1_bio->sector + max_sectors
+                                          - bio->bi_sector);
+                       r1_bio->sectors = max_sectors;
+                       spin_lock_irq(&conf->device_lock);
+                       if (bio->bi_phys_segments == 0)
+                               bio->bi_phys_segments = 2;
+                       else
+                               bio->bi_phys_segments++;
+                       spin_unlock_irq(&conf->device_lock);
+                       /* Cannot call generic_make_request directly
+                        * as that will be queued in __make_request
+                        * and subsequent mempool_alloc might block waiting
+                        * for it.  So hand bio over to raid1d.
+                        */
+                       reschedule_retry(r1_bio);
+
+                       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+                       r1_bio->master_bio = bio;
+                       r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+                       r1_bio->state = 0;
+                       r1_bio->mddev = mddev;
+                       r1_bio->sector = bio->bi_sector + sectors_handled;
+                       goto read_again;
+               } else
+                       generic_make_request(read_bio);
                return 0;
        }
 
        /*
         * WRITE:
         */
-       /* first select target devices under spinlock and
+       /* first select target devices under rcu_lock and
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
+        * If there are known/acknowledged bad blocks on any device on
+        * which we have seen a write error, we want to avoid writing those
+        * blocks.
+        * This potentially requires several writes to write around
+        * the bad blocks.  Each set of writes gets it's own r1bio
+        * with a set of bios attached.
         */
        plugged = mddev_check_plugged(mddev);
 
@@ -810,6 +941,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
  retry_write:
        blocked_rdev = NULL;
        rcu_read_lock();
+       max_sectors = r1_bio->sectors;
        for (i = 0;  i < disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
@@ -817,17 +949,56 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        blocked_rdev = rdev;
                        break;
                }
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       atomic_inc(&rdev->nr_pending);
-                       if (test_bit(Faulty, &rdev->flags)) {
+               r1_bio->bios[i] = NULL;
+               if (!rdev || test_bit(Faulty, &rdev->flags)) {
+                       set_bit(R1BIO_Degraded, &r1_bio->state);
+                       continue;
+               }
+
+               atomic_inc(&rdev->nr_pending);
+               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+                       sector_t first_bad;
+                       int bad_sectors;
+                       int is_bad;
+
+                       is_bad = is_badblock(rdev, r1_bio->sector,
+                                            max_sectors,
+                                            &first_bad, &bad_sectors);
+                       if (is_bad < 0) {
+                               /* mustn't write here until the bad block is
+                                * acknowledged*/
+                               set_bit(BlockedBadBlocks, &rdev->flags);
+                               blocked_rdev = rdev;
+                               break;
+                       }
+                       if (is_bad && first_bad <= r1_bio->sector) {
+                               /* Cannot write here at all */
+                               bad_sectors -= (r1_bio->sector - first_bad);
+                               if (bad_sectors < max_sectors)
+                                       /* mustn't write more than bad_sectors
+                                        * to other devices yet
+                                        */
+                                       max_sectors = bad_sectors;
                                rdev_dec_pending(rdev, mddev);
-                               r1_bio->bios[i] = NULL;
-                       } else {
-                               r1_bio->bios[i] = bio;
-                               targets++;
+                               /* We don't set R1BIO_Degraded as that
+                                * only applies if the disk is
+                                * missing, so it might be re-added,
+                                * and we want to know to recover this
+                                * chunk.
+                                * In this case the device is here,
+                                * and the fact that this chunk is not
+                                * in-sync is recorded in the bad
+                                * block log
+                                */
+                               continue;
                        }
-               } else
-                       r1_bio->bios[i] = NULL;
+                       if (is_bad) {
+                               int good_sectors = first_bad - r1_bio->sector;
+                               if (good_sectors < max_sectors)
+                                       max_sectors = good_sectors;
+                       }
+               }
+               r1_bio->bios[i] = bio;
        }
        rcu_read_unlock();
 
@@ -838,51 +1009,57 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                for (j = 0; j < i; j++)
                        if (r1_bio->bios[j])
                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
-
+               r1_bio->state = 0;
                allow_barrier(conf);
                md_wait_for_blocked_rdev(blocked_rdev, mddev);
                wait_barrier(conf);
                goto retry_write;
        }
 
-       BUG_ON(targets == 0); /* we never fail the last device */
-
-       if (targets < conf->raid_disks) {
-               /* array is degraded, we will not clear the bitmap
-                * on I/O completion (see raid1_end_write_request) */
-               set_bit(R1BIO_Degraded, &r1_bio->state);
+       if (max_sectors < r1_bio->sectors) {
+               /* We are splitting this write into multiple parts, so
+                * we need to prepare for allocating another r1_bio.
+                */
+               r1_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
        }
-
-       /* do behind I/O ?
-        * Not if there are too many, or cannot allocate memory,
-        * or a reader on WriteMostly is waiting for behind writes 
-        * to flush */
-       if (bitmap &&
-           (atomic_read(&bitmap->behind_writes)
-            < mddev->bitmap_info.max_write_behind) &&
-           !waitqueue_active(&bitmap->behind_wait))
-               alloc_behind_pages(bio, r1_bio);
+       sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
 
        atomic_set(&r1_bio->remaining, 1);
        atomic_set(&r1_bio->behind_remaining, 0);
 
-       bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
-                               test_bit(R1BIO_BehindIO, &r1_bio->state));
+       first_clone = 1;
        for (i = 0; i < disks; i++) {
                struct bio *mbio;
                if (!r1_bio->bios[i])
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               r1_bio->bios[i] = mbio;
-
-               mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
-               mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
-               mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync;
-               mbio->bi_private = r1_bio;
-
-               if (r1_bio->behind_pages) {
+               md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+
+               if (first_clone) {
+                       /* do behind I/O ?
+                        * Not if there are too many, or cannot
+                        * allocate memory, or a reader on WriteMostly
+                        * is waiting for behind writes to flush */
+                       if (bitmap &&
+                           (atomic_read(&bitmap->behind_writes)
+                            < mddev->bitmap_info.max_write_behind) &&
+                           !waitqueue_active(&bitmap->behind_wait))
+                               alloc_behind_pages(mbio, r1_bio);
+
+                       bitmap_startwrite(bitmap, r1_bio->sector,
+                                         r1_bio->sectors,
+                                         test_bit(R1BIO_BehindIO,
+                                                  &r1_bio->state));
+                       first_clone = 0;
+               }
+               if (r1_bio->behind_bvecs) {
                        struct bio_vec *bvec;
                        int j;
 
@@ -894,11 +1071,20 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                         * them all
                         */
                        __bio_for_each_segment(bvec, mbio, j, 0)
-                               bvec->bv_page = r1_bio->behind_pages[j];
+                               bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
                        if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
                }
 
+               r1_bio->bios[i] = mbio;
+
+               mbio->bi_sector = (r1_bio->sector +
+                                  conf->mirrors[i].rdev->data_offset);
+               mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+               mbio->bi_end_io = raid1_end_write_request;
+               mbio->bi_rw = WRITE | do_flush_fua | do_sync;
+               mbio->bi_private = r1_bio;
+
                atomic_inc(&r1_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
@@ -909,6 +1095,19 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
+       if (sectors_handled < (bio->bi_size >> 9)) {
+               /* We need another r1_bio.  It has already been counted
+                * in bio->bi_phys_segments
+                */
+               r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+               r1_bio->master_bio = bio;
+               r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+               r1_bio->state = 0;
+               r1_bio->mddev = mddev;
+               r1_bio->sector = bio->bi_sector + sectors_handled;
+               goto retry_write;
+       }
+
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
@@ -952,9 +1151,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
                 * However don't try a recovery from this drive as
                 * it is very likely to fail.
                 */
-               mddev->recovery_disabled = 1;
+               conf->recovery_disabled = mddev->recovery_disabled;
                return;
        }
+       set_bit(Blocked, &rdev->flags);
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
                unsigned long flags;
                spin_lock_irqsave(&conf->device_lock, flags);
@@ -1027,7 +1227,7 @@ static int raid1_spare_active(mddev_t *mddev)
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
                        count++;
-                       sysfs_notify_dirent(rdev->sysfs_state);
+                       sysfs_notify_dirent_safe(rdev->sysfs_state);
                }
        }
        spin_lock_irqsave(&conf->device_lock, flags);
@@ -1048,6 +1248,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
        int first = 0;
        int last = mddev->raid_disks - 1;
 
+       if (mddev->recovery_disabled == conf->recovery_disabled)
+               return -EBUSY;
+
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
@@ -1103,7 +1306,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
                 * is not possible.
                 */
                if (!test_bit(Faulty, &rdev->flags) &&
-                   !mddev->recovery_disabled &&
+                   mddev->recovery_disabled != conf->recovery_disabled &&
                    mddev->degraded < conf->raid_disks) {
                        err = -EBUSY;
                        goto abort;
@@ -1155,6 +1358,8 @@ static void end_sync_write(struct bio *bio, int error)
        conf_t *conf = mddev->private;
        int i;
        int mirror=0;
+       sector_t first_bad;
+       int bad_sectors;
 
        for (i = 0; i < conf->raid_disks; i++)
                if (r1_bio->bios[i] == bio) {
@@ -1173,14 +1378,22 @@ static void end_sync_write(struct bio *bio, int error)
                        sectors_to_go -= sync_blocks;
                } while (sectors_to_go > 0);
                md_error(mddev, conf->mirrors[mirror].rdev);
-       }
+       } else if (is_badblock(conf->mirrors[mirror].rdev,
+                              r1_bio->sector,
+                              r1_bio->sectors,
+                              &first_bad, &bad_sectors))
+               set_bit(R1BIO_MadeGood, &r1_bio->state);
 
        update_head_pos(mirror, r1_bio);
 
        if (atomic_dec_and_test(&r1_bio->remaining)) {
-               sector_t s = r1_bio->sectors;
-               put_buf(r1_bio);
-               md_done_sync(mddev, s, uptodate);
+               int s = r1_bio->sectors;
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else {
+                       put_buf(r1_bio);
+                       md_done_sync(mddev, s, uptodate);
+               }
        }
 }
 
@@ -1193,6 +1406,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
         * We don't need to freeze the array, because being in an
         * active sync request, there is no normal IO, and
         * no overlapping syncs.
+        * We don't need to check is_badblock() again as we
+        * made sure that anything with a bad block in range
+        * will have bi_end_io clear.
         */
        mddev_t *mddev = r1_bio->mddev;
        conf_t *conf = mddev->private;
@@ -1217,9 +1433,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
                                 * active, and resync is currently active
                                 */
                                rdev = conf->mirrors[d].rdev;
-                               if (sync_page_io(rdev,
-                                                sect,
-                                                s<<9,
+                               if (sync_page_io(rdev, sect, s<<9,
                                                 bio->bi_io_vec[idx].bv_page,
                                                 READ, false)) {
                                        success = 1;
@@ -1254,16 +1468,13 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
                        if (r1_bio->bios[d]->bi_end_io != end_sync_read)
                                continue;
                        rdev = conf->mirrors[d].rdev;
-                       if (sync_page_io(rdev,
-                                        sect,
-                                        s<<9,
+                       if (sync_page_io(rdev, sect, s<<9,
                                         bio->bi_io_vec[idx].bv_page,
                                         WRITE, false) == 0) {
                                r1_bio->bios[d]->bi_end_io = NULL;
                                rdev_dec_pending(rdev, mddev);
                                md_error(mddev, rdev);
-                       } else
-                               atomic_add(s, &rdev->corrected_errors);
+                       }
                }
                d = start;
                while (d != r1_bio->read_disk) {
@@ -1273,12 +1484,12 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
                        if (r1_bio->bios[d]->bi_end_io != end_sync_read)
                                continue;
                        rdev = conf->mirrors[d].rdev;
-                       if (sync_page_io(rdev,
-                                        sect,
-                                        s<<9,
+                       if (sync_page_io(rdev, sect, s<<9,
                                         bio->bi_io_vec[idx].bv_page,
                                         READ, false) == 0)
                                md_error(mddev, rdev);
+                       else
+                               atomic_add(s, &rdev->corrected_errors);
                }
                sectors -= s;
                sect += s;
@@ -1420,7 +1631,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
  *
  *     1.      Retries failed read operations on working mirrors.
  *     2.      Updates the raid superblock when problems encounter.
- *     3.      Performs writes following reads for array syncronising.
+ *     3.      Performs writes following reads for array synchronising.
  */
 
 static void fix_read_error(conf_t *conf, int read_disk,
@@ -1443,9 +1654,14 @@ static void fix_read_error(conf_t *conf, int read_disk,
                         * which is the thread that might remove
                         * a device.  If raid1d ever becomes multi-threaded....
                         */
+                       sector_t first_bad;
+                       int bad_sectors;
+
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
                            test_bit(In_sync, &rdev->flags) &&
+                           is_badblock(rdev, sect, s,
+                                       &first_bad, &bad_sectors) == 0 &&
                            sync_page_io(rdev, sect, s<<9,
                                         conf->tmppage, READ, false))
                                success = 1;
@@ -1539,11 +1755,43 @@ static void raid1d(mddev_t *mddev)
 
                mddev = r1_bio->mddev;
                conf = mddev->private;
-               if (test_bit(R1BIO_IsSync, &r1_bio->state))
-                       sync_request_write(mddev, r1_bio);
-               else {
+               if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+                       if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
+                               int m;
+                               int s = r1_bio->sectors;
+                               for (m = 0; m < conf->raid_disks ; m++) {
+                                       struct bio *bio = r1_bio->bios[m];
+                                       if (bio->bi_end_io != NULL &&
+                                           test_bit(BIO_UPTODATE,
+                                                    &bio->bi_flags)) {
+                                               rdev = conf->mirrors[m].rdev;
+                                               rdev_clear_badblocks(
+                                                       rdev,
+                                                       r1_bio->sector,
+                                                       r1_bio->sectors);
+                                       }
+                               }
+                               put_buf(r1_bio);
+                               md_done_sync(mddev, s, 1);
+                       } else
+                               sync_request_write(mddev, r1_bio);
+               } else if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
+                       int m;
+                       for (m = 0; m < conf->raid_disks ; m++)
+                               if (r1_bio->bios[m] == IO_MADE_GOOD) {
+                                       rdev = conf->mirrors[m].rdev;
+                                       rdev_clear_badblocks(
+                                               rdev,
+                                               r1_bio->sector,
+                                               r1_bio->sectors);
+                                       rdev_dec_pending(rdev, mddev);
+                               }
+                       raid_end_bio_io(r1_bio);
+               } else if (test_bit(R1BIO_ReadError, &r1_bio->state)) {
                        int disk;
+                       int max_sectors;
 
+                       clear_bit(R1BIO_ReadError, &r1_bio->state);
                        /* we got a read error. Maybe the drive is bad.  Maybe just
                         * the block and we can fix it.
                         * We freeze all other IO, and try reading the block from
@@ -1563,38 +1811,84 @@ static void raid1d(mddev_t *mddev)
                                         conf->mirrors[r1_bio->read_disk].rdev);
 
                        bio = r1_bio->bios[r1_bio->read_disk];
-                       if ((disk=read_balance(conf, r1_bio)) == -1) {
+                       bdevname(bio->bi_bdev, b);
+read_more:
+                       disk = read_balance(conf, r1_bio, &max_sectors);
+                       if (disk == -1) {
                                printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
                                       " read error for block %llu\n",
-                                      mdname(mddev),
-                                      bdevname(bio->bi_bdev,b),
+                                      mdname(mddev), b,
                                       (unsigned long long)r1_bio->sector);
                                raid_end_bio_io(r1_bio);
                        } else {
                                const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
-                               r1_bio->bios[r1_bio->read_disk] =
-                                       mddev->ro ? IO_BLOCKED : NULL;
+                               if (bio) {
+                                       r1_bio->bios[r1_bio->read_disk] =
+                                               mddev->ro ? IO_BLOCKED : NULL;
+                                       bio_put(bio);
+                               }
                                r1_bio->read_disk = disk;
-                               bio_put(bio);
                                bio = bio_clone_mddev(r1_bio->master_bio,
                                                      GFP_NOIO, mddev);
+                               md_trim_bio(bio,
+                                           r1_bio->sector - bio->bi_sector,
+                                           max_sectors);
                                r1_bio->bios[r1_bio->read_disk] = bio;
                                rdev = conf->mirrors[disk].rdev;
-                               if (printk_ratelimit())
-                                       printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
-                                              " other mirror: %s\n",
-                                              mdname(mddev),
-                                              (unsigned long long)r1_bio->sector,
-                                              bdevname(rdev->bdev,b));
+                               printk_ratelimited(
+                                       KERN_ERR
+                                       "md/raid1:%s: redirecting sector %llu"
+                                       " to other mirror: %s\n",
+                                       mdname(mddev),
+                                       (unsigned long long)r1_bio->sector,
+                                       bdevname(rdev->bdev, b));
                                bio->bi_sector = r1_bio->sector + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                bio->bi_end_io = raid1_end_read_request;
                                bio->bi_rw = READ | do_sync;
                                bio->bi_private = r1_bio;
-                               generic_make_request(bio);
+                               if (max_sectors < r1_bio->sectors) {
+                                       /* Drat - have to split this up more */
+                                       struct bio *mbio = r1_bio->master_bio;
+                                       int sectors_handled =
+                                               r1_bio->sector + max_sectors
+                                               - mbio->bi_sector;
+                                       r1_bio->sectors = max_sectors;
+                                       spin_lock_irq(&conf->device_lock);
+                                       if (mbio->bi_phys_segments == 0)
+                                               mbio->bi_phys_segments = 2;
+                                       else
+                                               mbio->bi_phys_segments++;
+                                       spin_unlock_irq(&conf->device_lock);
+                                       generic_make_request(bio);
+                                       bio = NULL;
+
+                                       r1_bio = mempool_alloc(conf->r1bio_pool,
+                                                              GFP_NOIO);
+
+                                       r1_bio->master_bio = mbio;
+                                       r1_bio->sectors = (mbio->bi_size >> 9)
+                                               - sectors_handled;
+                                       r1_bio->state = 0;
+                                       set_bit(R1BIO_ReadError,
+                                               &r1_bio->state);
+                                       r1_bio->mddev = mddev;
+                                       r1_bio->sector = mbio->bi_sector
+                                               + sectors_handled;
+
+                                       goto read_more;
+                               } else
+                                       generic_make_request(bio);
                        }
+               } else {
+                       /* just a partial read to be scheduled from separate
+                        * context
+                        */
+                       generic_make_request(r1_bio->bios[r1_bio->read_disk]);
                }
                cond_resched();
+               if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+                       md_check_recovery(mddev);
        }
        blk_finish_plug(&plug);
 }
@@ -1636,6 +1930,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        int write_targets = 0, read_targets = 0;
        sector_t sync_blocks;
        int still_degraded = 0;
+       int good_sectors = RESYNC_SECTORS;
+       int min_bad = 0; /* number of sectors that are bad in all devices */
 
        if (!conf->r1buf_pool)
                if (init_resync(conf))
@@ -1723,36 +2019,89 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev == NULL ||
-                          test_bit(Faulty, &rdev->flags)) {
+                   test_bit(Faulty, &rdev->flags)) {
                        still_degraded = 1;
-                       continue;
                } else if (!test_bit(In_sync, &rdev->flags)) {
                        bio->bi_rw = WRITE;
                        bio->bi_end_io = end_sync_write;
                        write_targets ++;
                } else {
                        /* may need to read from here */
-                       bio->bi_rw = READ;
-                       bio->bi_end_io = end_sync_read;
-                       if (test_bit(WriteMostly, &rdev->flags)) {
-                               if (wonly < 0)
-                                       wonly = i;
-                       } else {
-                               if (disk < 0)
-                                       disk = i;
+                       sector_t first_bad = MaxSector;
+                       int bad_sectors;
+
+                       if (is_badblock(rdev, sector_nr, good_sectors,
+                                       &first_bad, &bad_sectors)) {
+                               if (first_bad > sector_nr)
+                                       good_sectors = first_bad - sector_nr;
+                               else {
+                                       bad_sectors -= (sector_nr - first_bad);
+                                       if (min_bad == 0 ||
+                                           min_bad > bad_sectors)
+                                               min_bad = bad_sectors;
+                               }
+                       }
+                       if (sector_nr < first_bad) {
+                               if (test_bit(WriteMostly, &rdev->flags)) {
+                                       if (wonly < 0)
+                                               wonly = i;
+                               } else {
+                                       if (disk < 0)
+                                               disk = i;
+                               }
+                               bio->bi_rw = READ;
+                               bio->bi_end_io = end_sync_read;
+                               read_targets++;
                        }
-                       read_targets++;
                }
-               atomic_inc(&rdev->nr_pending);
-               bio->bi_sector = sector_nr + rdev->data_offset;
-               bio->bi_bdev = rdev->bdev;
-               bio->bi_private = r1_bio;
+               if (bio->bi_end_io) {
+                       atomic_inc(&rdev->nr_pending);
+                       bio->bi_sector = sector_nr + rdev->data_offset;
+                       bio->bi_bdev = rdev->bdev;
+                       bio->bi_private = r1_bio;
+               }
        }
        rcu_read_unlock();
        if (disk < 0)
                disk = wonly;
        r1_bio->read_disk = disk;
 
+       if (read_targets == 0 && min_bad > 0) {
+               /* These sectors are bad on all InSync devices, so we
+                * need to mark them bad on all write targets
+                */
+               int ok = 1;
+               for (i = 0 ; i < conf->raid_disks ; i++)
+                       if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
+                               mdk_rdev_t *rdev =
+                                       rcu_dereference(conf->mirrors[i].rdev);
+                               ok = rdev_set_badblocks(rdev, sector_nr,
+                                                       min_bad, 0
+                                       ) && ok;
+                       }
+               set_bit(MD_CHANGE_DEVS, &mddev->flags);
+               *skipped = 1;
+               put_buf(r1_bio);
+
+               if (!ok) {
+                       /* Cannot record the badblocks, so need to
+                        * abort the resync.
+                        * If there are multiple read targets, could just
+                        * fail the really bad ones ???
+                        */
+                       conf->recovery_disabled = mddev->recovery_disabled;
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       return 0;
+               } else
+                       return min_bad;
+
+       }
+       if (min_bad > 0 && min_bad < good_sectors) {
+               /* only resync enough to reach the next bad->good
+                * transition */
+               good_sectors = min_bad;
+       }
+
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
                /* extra read targets are also write targets */
                write_targets += read_targets-1;
@@ -1769,6 +2118,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
        if (max_sector > mddev->resync_max)
                max_sector = mddev->resync_max; /* Don't do IO beyond here */
+       if (max_sector > sector_nr + good_sectors)
+               max_sector = sector_nr + good_sectors;
        nr_sectors = 0;
        sync_blocks = 0;
        do {
@@ -2154,18 +2505,13 @@ static int raid1_reshape(mddev_t *mddev)
        for (d = d2 = 0; d < conf->raid_disks; d++) {
                mdk_rdev_t *rdev = conf->mirrors[d].rdev;
                if (rdev && rdev->raid_disk != d2) {
-                       char nm[20];
-                       sprintf(nm, "rd%d", rdev->raid_disk);
-                       sysfs_remove_link(&mddev->kobj, nm);
+                       sysfs_unlink_rdev(mddev, rdev);
                        rdev->raid_disk = d2;
-                       sprintf(nm, "rd%d", rdev->raid_disk);
-                       sysfs_remove_link(&mddev->kobj, nm);
-                       if (sysfs_create_link(&mddev->kobj,
-                                             &rdev->kobj, nm))
+                       sysfs_unlink_rdev(mddev, rdev);
+                       if (sysfs_link_rdev(mddev, rdev))
                                printk(KERN_WARNING
-                                      "md/raid1:%s: cannot register "
-                                      "%s\n",
-                                      mdname(mddev), nm);
+                                      "md/raid1:%s: cannot register rd%d\n",
+                                      mdname(mddev), rdev->raid_disk);
                }
                if (rdev)
                        newmirrors[d2++].rdev = rdev;