]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/md/md.c
MD:Update superblock when err == 0 in size_store
[mirror_ubuntu-jammy-kernel.git] / drivers / md / md.c
index c9a475c33cc7401dc067716e6cf3aaf648c46d93..d0c1e79da49ec9be864c5319e0dad5781815bd22 100644 (file)
@@ -307,7 +307,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
  */
 void mddev_suspend(struct mddev *mddev)
 {
-       WARN_ON_ONCE(current == mddev->thread->tsk);
+       WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
        if (mddev->suspended++)
                return;
        synchronize_rcu();
@@ -2291,19 +2291,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
                return;
        }
 
+repeat:
        if (mddev_is_clustered(mddev)) {
                if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
                        force_change = 1;
+               if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+                       nospares = 1;
                ret = md_cluster_ops->metadata_update_start(mddev);
                /* Has someone else has updated the sb */
                if (!does_sb_need_changing(mddev)) {
                        if (ret == 0)
                                md_cluster_ops->metadata_update_cancel(mddev);
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+                       bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+                                                        BIT(MD_CHANGE_DEVS) |
+                                                        BIT(MD_CHANGE_CLEAN));
                        return;
                }
        }
-repeat:
+
        /* First make sure individual recovery_offsets are correct */
        rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk >= 0 &&
@@ -2430,15 +2435,14 @@ repeat:
        md_super_wait(mddev);
        /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
 
-       spin_lock(&mddev->lock);
+       if (mddev_is_clustered(mddev) && ret == 0)
+               md_cluster_ops->metadata_update_finish(mddev);
+
        if (mddev->in_sync != sync_req ||
-           test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+           !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+                              BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
                /* have to write it out again */
-               spin_unlock(&mddev->lock);
                goto repeat;
-       }
-       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-       spin_unlock(&mddev->lock);
        wake_up(&mddev->sb_wait);
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
@@ -2452,9 +2456,6 @@ repeat:
                clear_bit(BlockedBadBlocks, &rdev->flags);
                wake_up(&rdev->blocked_wait);
        }
-
-       if (mddev_is_clustered(mddev) && ret == 0)
-               md_cluster_ops->metadata_update_finish(mddev);
 }
 EXPORT_SYMBOL(md_update_sb);
 
@@ -2477,8 +2478,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
                if (add_journal)
                        mddev_resume(mddev);
                if (err) {
-                       unbind_rdev_from_array(rdev);
-                       export_rdev(rdev);
+                       md_kick_rdev_from_array(rdev);
                        return err;
                }
        }
@@ -4186,7 +4186,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
                return err;
        if (mddev->pers) {
                err = update_size(mddev, sectors);
-               md_update_sb(mddev, 1);
+               if (err == 0)
+                       md_update_sb(mddev, 1);
        } else {
                if (mddev->dev_sectors == 0 ||
                    mddev->dev_sectors > sectors)
@@ -4816,6 +4817,10 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
 
+       /* cluster raid doesn't support change array_sectors */
+       if (mddev_is_clustered(mddev))
+               return -EINVAL;
+
        if (strncmp(buf, "default", 7) == 0) {
                if (mddev->pers)
                        sectors = mddev->pers->size(mddev, 0, 0);
@@ -6437,6 +6442,10 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
        int rv;
        int fit = (num_sectors == 0);
 
+       /* cluster raid doesn't support update size */
+       if (mddev_is_clustered(mddev))
+               return -EINVAL;
+
        if (mddev->pers->resize == NULL)
                return -EINVAL;
        /* The "num_sectors" is the number of sectors of each device that
@@ -7785,7 +7794,7 @@ void md_do_sync(struct md_thread *thread)
        struct md_rdev *rdev;
        char *desc, *action = NULL;
        struct blk_plug plug;
-       bool cluster_resync_finished = false;
+       int ret;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7795,6 +7804,20 @@ void md_do_sync(struct md_thread *thread)
                return;
        }
 
+       if (mddev_is_clustered(mddev)) {
+               ret = md_cluster_ops->resync_start(mddev);
+               if (ret)
+                       goto skip;
+
+               set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
+               if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+                       test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
+                       test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+                    && ((unsigned long long)mddev->curr_resync_completed
+                        < (unsigned long long)mddev->resync_max_sectors))
+                       goto skip;
+       }
+
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
                        desc = "data-check";
@@ -8089,11 +8112,6 @@ void md_do_sync(struct md_thread *thread)
                mddev->curr_resync_completed = mddev->curr_resync;
                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        }
-       /* tell personality and other nodes that we are finished */
-       if (mddev_is_clustered(mddev)) {
-               md_cluster_ops->resync_finish(mddev);
-               cluster_resync_finished = true;
-       }
        mddev->pers->sync_request(mddev, max_sectors, &skipped);
 
        if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
@@ -8130,12 +8148,11 @@ void md_do_sync(struct md_thread *thread)
                }
        }
  skip:
-       set_bit(MD_CHANGE_DEVS, &mddev->flags);
-
-       if (mddev_is_clustered(mddev) &&
-           test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-           !cluster_resync_finished)
-               md_cluster_ops->resync_finish(mddev);
+       /* set CHANGE_PENDING here since maybe another update is needed,
+        * so other nodes are informed. It should be harmless for normal
+        * raid */
+       set_mask_bits(&mddev->flags, 0,
+                     BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
 
        spin_lock(&mddev->lock);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8226,18 +8243,9 @@ static void md_start_sync(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, del_work);
        int ret = 0;
 
-       if (mddev_is_clustered(mddev)) {
-               ret = md_cluster_ops->resync_start(mddev);
-               if (ret) {
-                       mddev->sync_thread = NULL;
-                       goto out;
-               }
-       }
-
        mddev->sync_thread = md_register_thread(md_do_sync,
                                                mddev,
                                                "resync");
-out:
        if (!mddev->sync_thread) {
                if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
                        printk(KERN_ERR "%s: could not start resync"
@@ -8488,6 +8496,11 @@ void md_reap_sync_thread(struct mddev *mddev)
                        rdev->saved_raid_disk = -1;
 
        md_update_sb(mddev, 1);
+       /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+        * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
+        * clustered raid */
+       if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
+               md_cluster_ops->resync_finish(mddev);
        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
        clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -8536,6 +8549,7 @@ EXPORT_SYMBOL(md_finish_reshape);
 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                       int is_new)
 {
+       struct mddev *mddev = rdev->mddev;
        int rv;
        if (is_new)
                s += rdev->new_data_offset;
@@ -8545,8 +8559,8 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
        if (rv == 0) {
                /* Make sure they get written out promptly */
                sysfs_notify_dirent_safe(rdev->sysfs_state);
-               set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
-               set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
+               set_mask_bits(&mddev->flags, 0,
+                             BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
                md_wakeup_thread(rdev->mddev->thread);
                return 1;
        } else
@@ -8680,6 +8694,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
                                ret = remove_and_add_spares(mddev, rdev2);
                                pr_info("Activated spare: %s\n",
                                                bdevname(rdev2->bdev,b));
+                               /* wakeup mddev->thread here, so array could
+                                * perform resync with the new activated disk */
+                               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                               md_wakeup_thread(mddev->thread);
+
                        }
                        /* device faulty
                         * We just want to do the minimum to mark the disk
@@ -8779,6 +8798,7 @@ EXPORT_SYMBOL(md_reload_sb);
  * at boot time.
  */
 
+static DEFINE_MUTEX(detected_devices_mutex);
 static LIST_HEAD(all_detected_devices);
 struct detected_devices_node {
        struct list_head list;
@@ -8792,7 +8812,9 @@ void md_autodetect_dev(dev_t dev)
        node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
        if (node_detected_dev) {
                node_detected_dev->dev = dev;
+               mutex_lock(&detected_devices_mutex);
                list_add_tail(&node_detected_dev->list, &all_detected_devices);
+               mutex_unlock(&detected_devices_mutex);
        } else {
                printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
                        ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
@@ -8811,6 +8833,7 @@ static void autostart_arrays(int part)
 
        printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
 
+       mutex_lock(&detected_devices_mutex);
        while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
                i_scanned++;
                node_detected_dev = list_entry(all_detected_devices.next,
@@ -8829,6 +8852,7 @@ static void autostart_arrays(int part)
                list_add(&rdev->same_set, &pending_raid_disks);
                i_passed++;
        }
+       mutex_unlock(&detected_devices_mutex);
 
        printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
                                                i_scanned, i_passed);