X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=module%2Fzfs%2Fspa.c;h=eb3ff91a073c8af8c3365d8beb5ff4154abc8f52;hb=ca95f70dff2915a1a5d838ecafc28646f5f5a42e;hp=a1851bca25abeb34cb4694935e06103db770ecaf;hpb=52ce99dd617369ff09d8eef8cfd36fa80dbfca4f;p=mirror_zfs.git diff --git a/module/zfs/spa.c b/module/zfs/spa.c index a1851bca2..eb3ff91a0 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -21,7 +21,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2011, 2017 by Delphix. All rights reserved. + * Copyright (c) 2011, 2019 by Delphix. All rights reserved. * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright 2013 Saso Kiselkov. All rights reserved. @@ -56,6 +56,8 @@ #include #include #include +#include +#include #include #include #include @@ -131,7 +133,7 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE * macros. Other operations process a large amount of data; the ZTI_BATCH * macro causes us to create a taskq oriented for throughput. Some operations - * are so high frequency and short-lived that the taskq itself can become a a + * are so high frequency and short-lived that the taskq itself can become a * point of lock contention. The ZTI_P(#, #) macro indicates that we need an * additional degree of parallelism specified by the number of threads per- * taskq and the number of taskqs; when dispatching an event in this case, the @@ -149,6 +151,7 @@ const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ + { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ }; static void spa_sync_version(void *arg, dmu_tx_t *tx); @@ -434,8 +437,9 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp) dp = spa_get_dsl(spa); dsl_pool_config_enter(dp, FTAG); - if ((err = dsl_dataset_hold_obj(dp, - za.za_first_integer, FTAG, &ds))) { + err = dsl_dataset_hold_obj(dp, + za.za_first_integer, FTAG, &ds); + if (err != 0) { dsl_pool_config_exit(dp, FTAG); break; } @@ -552,6 +556,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) case ZPOOL_PROP_AUTOREPLACE: case ZPOOL_PROP_LISTSNAPS: case ZPOOL_PROP_AUTOEXPAND: + case ZPOOL_PROP_AUTOTRIM: error = nvpair_value_uint64(elem, &intval); if (!error && intval > 1) error = SET_ERROR(EINVAL); @@ -601,14 +606,14 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) } error = dmu_objset_hold(strval, FTAG, &os); - if (error) + if (error != 0) break; /* * Must be ZPL, and its property settings * must be supported by GRUB (compression - * is not gzip, and large blocks or large - * dnodes are not used). + * is not gzip, and large dnodes are not + * used). */ if (dmu_objset_type(os) != DMU_OST_ZFS) { @@ -1218,8 +1223,10 @@ spa_activate(spa_t *spa, int mode) spa_create_zio_taskqs(spa); } - for (size_t i = 0; i < TXG_SIZE; i++) - spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 0); + for (size_t i = 0; i < TXG_SIZE; i++) { + spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, + ZIO_FLAG_CANFAIL); + } list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), offsetof(vdev_t, vdev_config_dirty_node)); @@ -1430,6 +1437,7 @@ spa_unload(spa_t *spa) ASSERT(MUTEX_HELD(&spa_namespace_lock)); + spa_import_progress_remove(spa_guid(spa)); spa_load_note(spa, "UNLOADING"); /* @@ -1437,6 +1445,13 @@ spa_unload(spa_t *spa) */ spa_async_suspend(spa); + if (spa->spa_root_vdev) { + vdev_t *root_vdev = spa->spa_root_vdev; + vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE); + vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); + vdev_autotrim_stop_all(spa); + } + /* * Stop syncing. */ @@ -1452,10 +1467,10 @@ spa_unload(spa_t *spa) * calling taskq_wait(mg_taskq). */ if (spa->spa_root_vdev != NULL) { - spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); - spa_config_exit(spa, SCL_ALL, FTAG); + spa_config_exit(spa, SCL_ALL, spa); } if (spa->spa_mmp.mmp_thread) @@ -1477,13 +1492,11 @@ spa_unload(spa_t *spa) } if (spa->spa_condense_zthr != NULL) { - ASSERT(!zthr_isrunning(spa->spa_condense_zthr)); zthr_destroy(spa->spa_condense_zthr); spa->spa_condense_zthr = NULL; } if (spa->spa_checkpoint_discard_zthr != NULL) { - ASSERT(!zthr_isrunning(spa->spa_checkpoint_discard_zthr)); zthr_destroy(spa->spa_checkpoint_discard_zthr); spa->spa_checkpoint_discard_zthr = NULL; } @@ -1492,7 +1505,7 @@ spa_unload(spa_t *spa) bpobj_close(&spa->spa_deferred_bpobj); - spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); /* * Close all vdevs. @@ -1554,7 +1567,7 @@ spa_unload(spa_t *spa) spa->spa_comment = NULL; } - spa_config_exit(spa, SCL_ALL, FTAG); + spa_config_exit(spa, SCL_ALL, spa); } /* @@ -2363,6 +2376,8 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) int error; spa->spa_load_state = state; + (void) spa_import_progress_set_state(spa_guid(spa), + spa_load_state(spa)); gethrestime(&spa->spa_loaded_ts); error = spa_load_impl(spa, type, &ereport); @@ -2385,6 +2400,9 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; spa->spa_ena = 0; + (void) spa_import_progress_set_state(spa_guid(spa), + spa_load_state(spa)); + return (error); } @@ -2430,6 +2448,7 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, uint64_t hostid = 0; uint64_t tryconfig_txg = 0; uint64_t tryconfig_timestamp = 0; + uint16_t tryconfig_mmp_seq = 0; nvlist_t *nvinfo; if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { @@ -2438,6 +2457,8 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, &tryconfig_txg); (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, &tryconfig_timestamp); + (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, + &tryconfig_mmp_seq); } (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); @@ -2454,14 +2475,17 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, */ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) return (B_FALSE); + /* - * If the tryconfig_* values are nonzero, they are the results of an - * earlier tryimport. If they match the uberblock we just found, then - * the pool has not changed and we return false so we do not test a - * second time. + * If the tryconfig_ values are nonzero, they are the results of an + * earlier tryimport. If they all match the uberblock we just found, + * then the pool has not changed and we return false so we do not test + * a second time. */ if (tryconfig_txg && tryconfig_txg == ub->ub_txg && - tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp) + tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && + tryconfig_mmp_seq && tryconfig_mmp_seq == + (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) return (B_FALSE); /* @@ -2484,6 +2508,76 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, return (B_TRUE); } +/* + * Nanoseconds the activity check must watch for changes on-disk. + */ +static uint64_t +spa_activity_check_duration(spa_t *spa, uberblock_t *ub) +{ + uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); + uint64_t multihost_interval = MSEC2NSEC( + MMP_INTERVAL_OK(zfs_multihost_interval)); + uint64_t import_delay = MAX(NANOSEC, import_intervals * + multihost_interval); + + /* + * Local tunables determine a minimum duration except for the case + * where we know when the remote host will suspend the pool if MMP + * writes do not land. + * + * See Big Theory comment at the top of mmp.c for the reasoning behind + * these cases and times. + */ + + ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); + + if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && + MMP_FAIL_INT(ub) > 0) { + + /* MMP on remote host will suspend pool after failed writes */ + import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * + MMP_IMPORT_SAFETY_FACTOR / 100; + + zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " + "mmp_fails=%llu ub_mmp mmp_interval=%llu " + "import_intervals=%u", import_delay, MMP_FAIL_INT(ub), + MMP_INTERVAL(ub), import_intervals); + + } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && + MMP_FAIL_INT(ub) == 0) { + + /* MMP on remote host will never suspend pool */ + import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + + ub->ub_mmp_delay) * import_intervals); + + zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " + "mmp_interval=%llu ub_mmp_delay=%llu " + "import_intervals=%u", import_delay, MMP_INTERVAL(ub), + ub->ub_mmp_delay, import_intervals); + + } else if (MMP_VALID(ub)) { + /* + * zfs-0.7 compatability case + */ + + import_delay = MAX(import_delay, (multihost_interval + + ub->ub_mmp_delay) * import_intervals); + + zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " + "import_intervals=%u leaves=%u", import_delay, + ub->ub_mmp_delay, import_intervals, + vdev_count_leaves(spa)); + } else { + /* Using local tunings is the only reasonable option */ + zfs_dbgmsg("pool last imported on non-MMP aware " + "host using import_delay=%llu multihost_interval=%llu " + "import_intervals=%u", import_delay, multihost_interval, + import_intervals); + } + + return (import_delay); +} + /* * Perform the import activity check. If the user canceled the import or * we detected activity then fail. @@ -2491,10 +2585,11 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, static int spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) { - uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); uint64_t txg = ub->ub_txg; uint64_t timestamp = ub->ub_timestamp; - uint64_t import_delay = NANOSEC; + uint64_t mmp_config = ub->ub_mmp_config; + uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; + uint64_t import_delay; hrtime_t import_expire; nvlist_t *mmp_label = NULL; vdev_t *rvd = spa->spa_root_vdev; @@ -2511,7 +2606,7 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) * during the earlier tryimport. If the txg recorded there is 0 then * the pool is known to be active on another host. * - * Otherwise, the pool might be in use on another node. Check for + * Otherwise, the pool might be in use on another host. Check for * changes in the uberblocks on disk if necessary. */ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { @@ -2526,32 +2621,28 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) } } - /* - * Preferentially use the zfs_multihost_interval from the node which - * last imported the pool. This value is stored in an MMP uberblock as. - * - * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval - */ - if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay) - import_delay = MAX(import_delay, import_intervals * - ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1)); - - /* Apply a floor using the local default values. */ - import_delay = MAX(import_delay, import_intervals * - MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL))); - - zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u " - "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals, - vdev_count_leaves(spa)); + import_delay = spa_activity_check_duration(spa, ub); /* Add a small random factor in case of simultaneous imports (0-25%) */ - import_expire = gethrtime() + import_delay + - (import_delay * spa_get_random(250) / 1000); + import_delay += import_delay * spa_get_random(250) / 1000; + + import_expire = gethrtime() + import_delay; while (gethrtime() < import_expire) { + (void) spa_import_progress_set_mmp_check(spa_guid(spa), + NSEC2SEC(import_expire - gethrtime())); + vdev_uberblock_load(rvd, ub, &mmp_label); - if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) { + if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || + mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { + zfs_dbgmsg("multihost activity detected " + "txg %llu ub_txg %llu " + "timestamp %llu ub_timestamp %llu " + "mmp_config %#llx ub_mmp_config %#llx", + txg, ub->ub_txg, timestamp, ub->ub_timestamp, + mmp_config, ub->ub_mmp_config); + error = SET_ERROR(EREMOTEIO); break; } @@ -2907,6 +2998,10 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); } + if (spa->spa_load_max_txg != UINT64_MAX) { + (void) spa_import_progress_set_max_txg(spa_guid(spa), + (u_longlong_t)spa->spa_load_max_txg); + } spa_load_note(spa, "using uberblock with txg=%llu", (u_longlong_t)ub->ub_txg); @@ -2937,6 +3032,9 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); + fnvlist_add_uint16(spa->spa_load_info, + ZPOOL_CONFIG_MMP_SEQ, + (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); } /* @@ -3353,6 +3451,16 @@ spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); } + /* + * Encryption was added before bookmark_v2, even though bookmark_v2 + * is now a dependency. If this pool has encryption enabled without + * bookmark_v2, trigger an errata message. + */ + if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && + !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { + spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; + } + return (0); } @@ -3497,7 +3605,7 @@ spa_ld_get_props(spa_t *spa) spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, &spa->spa_dedup_ditto); - + spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); spa->spa_autoreplace = (autoreplace != 0); } @@ -3823,6 +3931,8 @@ spa_ld_mos_init(spa_t *spa, spa_import_type_t type) if (error != 0) return (error); + spa_import_progress_add(spa); + /* * Now that we have the vdev tree, try to open each vdev. This involves * opening the underlying physical device, retrieving its geometry and @@ -4226,8 +4336,17 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) */ spa_history_log_version(spa, "open", NULL); + spa_restart_removal(spa); + spa_spawn_aux_threads(spa); + /* * Delete any inconsistent datasets. + * + * Note: + * Since we may be issuing deletes for clones here, + * we make sure to do so after we've spawned all the + * auxiliary threads above (from which the livelist + * deletion zthr is part of). */ (void) dmu_objset_find(spa_name(spa), dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); @@ -4237,11 +4356,14 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) */ dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); - spa_restart_removal(spa); - - spa_spawn_aux_threads(spa); + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + vdev_initialize_restart(spa->spa_root_vdev); + vdev_trim_restart(spa->spa_root_vdev); + vdev_autotrim_restart(spa); + spa_config_exit(spa, SCL_CONFIG, FTAG); } + spa_import_progress_remove(spa_guid(spa)); spa_load_note(spa, "LOADED"); return (0); @@ -4302,6 +4424,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, * from previous txgs when spa_load fails. */ ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); + spa_import_progress_remove(spa_guid(spa)); return (load_error); } @@ -4313,6 +4436,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, if (rewind_flags & ZPOOL_NEVER_REWIND) { nvlist_free(config); + spa_import_progress_remove(spa_guid(spa)); return (load_error); } @@ -4355,6 +4479,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, if (state == SPA_LOAD_RECOVER) { ASSERT3P(loadinfo, ==, NULL); + spa_import_progress_remove(spa_guid(spa)); return (rewind_error); } else { /* Store the rewind info as part of the initial load info */ @@ -4365,6 +4490,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, fnvlist_free(spa->spa_load_info); spa->spa_load_info = loadinfo; + spa_import_progress_remove(spa_guid(spa)); return (load_error); } } @@ -5079,6 +5205,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, spa->spa_removing_phys.sr_state = DSS_NONE; spa->spa_removing_phys.sr_removing_vdev = -1; spa->spa_removing_phys.sr_prev_indirect_vdev = -1; + spa->spa_indirect_vdevs_loaded = B_TRUE; /* * Create "The Godfather" zio to hold all async IOs @@ -5241,6 +5368,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); + spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); if (props != NULL) { spa_configfile_set(spa, props, B_FALSE); @@ -5647,6 +5775,20 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, return (SET_ERROR(EXDEV)); } + /* + * We're about to export or destroy this pool. Make sure + * we stop all initialization and trim activity here before + * we set the spa_final_txg. This will ensure that all + * dirty data resulting from the initialization is + * committed to disk before we unload the pool. + */ + if (spa->spa_root_vdev != NULL) { + vdev_t *rvd = spa->spa_root_vdev; + vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); + vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); + vdev_autotrim_stop_all(spa); + } + /* * We want this to be reflected on every label, * so mark them all dirty. spa_unload() will do the @@ -6059,9 +6201,14 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) /* * Schedule the resilver to restart in the future. We do this to * ensure that dmu_sync-ed blocks have been stitched into the - * respective datasets. + * respective datasets. We do not do this if resilvers have been + * deferred. */ - dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); + if (dsl_scan_resilvering(spa_get_dsl(spa)) && + spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) + vdev_set_deferred_resilver(spa, newvd); + else + dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); if (spa->spa_bootfs) spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); @@ -6262,7 +6409,6 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) vdev_remove_parent(cvd); } - /* * We don't set tvd until now because the parent we just removed * may have been the previous top-level vdev. @@ -6346,6 +6492,237 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) return (error); } +static int +spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, + list_t *vd_list) +{ + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); + + /* Look up vdev and ensure it's a leaf. */ + vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); + if (vd == NULL || vd->vdev_detached) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(ENODEV)); + } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EINVAL)); + } else if (!vdev_writeable(vd)) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EROFS)); + } + mutex_enter(&vd->vdev_initialize_lock); + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + + /* + * When we activate an initialize action we check to see + * if the vdev_initialize_thread is NULL. We do this instead + * of using the vdev_initialize_state since there might be + * a previous initialization process which has completed but + * the thread is not exited. + */ + if (cmd_type == POOL_INITIALIZE_START && + (vd->vdev_initialize_thread != NULL || + vd->vdev_top->vdev_removing)) { + mutex_exit(&vd->vdev_initialize_lock); + return (SET_ERROR(EBUSY)); + } else if (cmd_type == POOL_INITIALIZE_CANCEL && + (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && + vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { + mutex_exit(&vd->vdev_initialize_lock); + return (SET_ERROR(ESRCH)); + } else if (cmd_type == POOL_INITIALIZE_SUSPEND && + vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { + mutex_exit(&vd->vdev_initialize_lock); + return (SET_ERROR(ESRCH)); + } + + switch (cmd_type) { + case POOL_INITIALIZE_START: + vdev_initialize(vd); + break; + case POOL_INITIALIZE_CANCEL: + vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); + break; + case POOL_INITIALIZE_SUSPEND: + vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); + break; + default: + panic("invalid cmd_type %llu", (unsigned long long)cmd_type); + } + mutex_exit(&vd->vdev_initialize_lock); + + return (0); +} + +int +spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, + nvlist_t *vdev_errlist) +{ + int total_errors = 0; + list_t vd_list; + + list_create(&vd_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_initialize_node)); + + /* + * We hold the namespace lock through the whole function + * to prevent any changes to the pool while we're starting or + * stopping initialization. The config and state locks are held so that + * we can properly assess the vdev state before we commit to + * the initializing operation. + */ + mutex_enter(&spa_namespace_lock); + + for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); + pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { + uint64_t vdev_guid = fnvpair_value_uint64(pair); + + int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, + &vd_list); + if (error != 0) { + char guid_as_str[MAXNAMELEN]; + + (void) snprintf(guid_as_str, sizeof (guid_as_str), + "%llu", (unsigned long long)vdev_guid); + fnvlist_add_int64(vdev_errlist, guid_as_str, error); + total_errors++; + } + } + + /* Wait for all initialize threads to stop. */ + vdev_initialize_stop_wait(spa, &vd_list); + + /* Sync out the initializing state */ + txg_wait_synced(spa->spa_dsl_pool, 0); + mutex_exit(&spa_namespace_lock); + + list_destroy(&vd_list); + + return (total_errors); +} + +static int +spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, + uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) +{ + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); + + /* Look up vdev and ensure it's a leaf. */ + vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); + if (vd == NULL || vd->vdev_detached) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(ENODEV)); + } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EINVAL)); + } else if (!vdev_writeable(vd)) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EROFS)); + } else if (!vd->vdev_has_trim) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EOPNOTSUPP)); + } else if (secure && !vd->vdev_has_securetrim) { + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + return (SET_ERROR(EOPNOTSUPP)); + } + mutex_enter(&vd->vdev_trim_lock); + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + + /* + * When we activate a TRIM action we check to see if the + * vdev_trim_thread is NULL. We do this instead of using the + * vdev_trim_state since there might be a previous TRIM process + * which has completed but the thread is not exited. + */ + if (cmd_type == POOL_TRIM_START && + (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) { + mutex_exit(&vd->vdev_trim_lock); + return (SET_ERROR(EBUSY)); + } else if (cmd_type == POOL_TRIM_CANCEL && + (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && + vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { + mutex_exit(&vd->vdev_trim_lock); + return (SET_ERROR(ESRCH)); + } else if (cmd_type == POOL_TRIM_SUSPEND && + vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { + mutex_exit(&vd->vdev_trim_lock); + return (SET_ERROR(ESRCH)); + } + + switch (cmd_type) { + case POOL_TRIM_START: + vdev_trim(vd, rate, partial, secure); + break; + case POOL_TRIM_CANCEL: + vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); + break; + case POOL_TRIM_SUSPEND: + vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); + break; + default: + panic("invalid cmd_type %llu", (unsigned long long)cmd_type); + } + mutex_exit(&vd->vdev_trim_lock); + + return (0); +} + +/* + * Initiates a manual TRIM for the requested vdevs. This kicks off individual + * TRIM threads for each child vdev. These threads pass over all of the free + * space in the vdev's metaslabs and issues TRIM commands for that space. + */ +int +spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, + boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) +{ + int total_errors = 0; + list_t vd_list; + + list_create(&vd_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_trim_node)); + + /* + * We hold the namespace lock through the whole function + * to prevent any changes to the pool while we're starting or + * stopping TRIM. The config and state locks are held so that + * we can properly assess the vdev state before we commit to + * the TRIM operation. + */ + mutex_enter(&spa_namespace_lock); + + for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); + pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { + uint64_t vdev_guid = fnvpair_value_uint64(pair); + + int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, + rate, partial, secure, &vd_list); + if (error != 0) { + char guid_as_str[MAXNAMELEN]; + + (void) snprintf(guid_as_str, sizeof (guid_as_str), + "%llu", (unsigned long long)vdev_guid); + fnvlist_add_int64(vdev_errlist, guid_as_str, error); + total_errors++; + } + } + + /* Wait for all TRIM threads to stop. */ + vdev_trim_stop_wait(spa, &vd_list); + + /* Sync out the TRIM state */ + txg_wait_synced(spa->spa_dsl_pool, 0); + mutex_exit(&spa_namespace_lock); + + list_destroy(&vd_list); + + return (total_errors); +} + /* * Split a set of devices from their mirrors, and create a new pool from them. */ @@ -6554,6 +6931,38 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, spa_activate(newspa, spa_mode_global); spa_async_suspend(newspa); + /* + * Temporarily stop the initializing and TRIM activity. We set the + * state to ACTIVE so that we know to resume initializing or TRIM + * once the split has completed. + */ + list_t vd_initialize_list; + list_create(&vd_initialize_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_initialize_node)); + + list_t vd_trim_list; + list_create(&vd_trim_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_trim_node)); + + for (c = 0; c < children; c++) { + if (vml[c] != NULL) { + mutex_enter(&vml[c]->vdev_initialize_lock); + vdev_initialize_stop(vml[c], + VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); + mutex_exit(&vml[c]->vdev_initialize_lock); + + mutex_enter(&vml[c]->vdev_trim_lock); + vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); + mutex_exit(&vml[c]->vdev_trim_lock); + } + } + + vdev_initialize_stop_wait(spa, &vd_initialize_list); + vdev_trim_stop_wait(spa, &vd_trim_list); + + list_destroy(&vd_initialize_list); + list_destroy(&vd_trim_list); + newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; /* create the new pool from the disks of the original pool */ @@ -6597,6 +7006,18 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, dmu_tx_abort(tx); for (c = 0; c < children; c++) { if (vml[c] != NULL) { + vdev_t *tvd = vml[c]->vdev_top; + + /* + * Need to be sure the detachable VDEV is not + * on any *other* txg's DTL list to prevent it + * from being accessed after it's freed. + */ + for (int t = 0; t < TXG_SIZE; t++) { + (void) txg_list_remove_this( + &tvd->vdev_dtl_list, vml[c], t); + } + vdev_split(vml[c]); if (error == 0) spa_history_log_internal(spa, "detach", tx, @@ -6641,6 +7062,12 @@ out: if (vml[c] != NULL) vml[c]->vdev_offline = B_FALSE; } + + /* restart initializing or trimming disks as necessary */ + spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); + spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); + spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); + vdev_reopen(spa->spa_root_vdev); nvlist_free(spa->spa_config_splitting); @@ -6856,6 +7283,10 @@ spa_scan(spa_t *spa, pool_scan_func_t func) if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) return (SET_ERROR(ENOTSUP)); + if (func == POOL_SCAN_RESILVER && + !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) + return (SET_ERROR(ENOTSUP)); + /* * If a resilver was requested, but there is no DTL on a * writeable leaf device, we have nothing to do. @@ -6933,6 +7364,7 @@ static void spa_async_thread(void *arg) { spa_t *spa = (spa_t *)arg; + dsl_pool_t *dp = spa->spa_dsl_pool; int tasks; ASSERT(spa->spa_sync_on); @@ -7008,8 +7440,34 @@ spa_async_thread(void *arg) /* * Kick off a resilver. */ - if (tasks & SPA_ASYNC_RESILVER) - dsl_resilver_restart(spa->spa_dsl_pool, 0); + if (tasks & SPA_ASYNC_RESILVER && + (!dsl_scan_resilvering(dp) || + !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) + dsl_resilver_restart(dp, 0); + + if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { + mutex_enter(&spa_namespace_lock); + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + vdev_initialize_restart(spa->spa_root_vdev); + spa_config_exit(spa, SCL_CONFIG, FTAG); + mutex_exit(&spa_namespace_lock); + } + + if (tasks & SPA_ASYNC_TRIM_RESTART) { + mutex_enter(&spa_namespace_lock); + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + vdev_trim_restart(spa->spa_root_vdev); + spa_config_exit(spa, SCL_CONFIG, FTAG); + mutex_exit(&spa_namespace_lock); + } + + if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { + mutex_enter(&spa_namespace_lock); + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + vdev_autotrim_restart(spa); + spa_config_exit(spa, SCL_CONFIG, FTAG); + mutex_exit(&spa_namespace_lock); + } /* * Let the world know that we're done. @@ -7033,12 +7491,12 @@ spa_async_suspend(spa_t *spa) spa_vdev_remove_suspend(spa); zthr_t *condense_thread = spa->spa_condense_zthr; - if (condense_thread != NULL && zthr_isrunning(condense_thread)) - VERIFY0(zthr_cancel(condense_thread)); + if (condense_thread != NULL) + zthr_cancel(condense_thread); zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; - if (discard_thread != NULL && zthr_isrunning(discard_thread)) - VERIFY0(zthr_cancel(discard_thread)); + if (discard_thread != NULL) + zthr_cancel(discard_thread); } void @@ -7051,11 +7509,11 @@ spa_async_resume(spa_t *spa) spa_restart_removal(spa); zthr_t *condense_thread = spa->spa_condense_zthr; - if (condense_thread != NULL && !zthr_isrunning(condense_thread)) + if (condense_thread != NULL) zthr_resume(condense_thread); zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; - if (discard_thread != NULL && !zthr_isrunning(discard_thread)) + if (discard_thread != NULL) zthr_resume(discard_thread); } @@ -7144,6 +7602,9 @@ spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) static void spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) { + if (spa_sync_pass(spa) != 1) + return; + zio_t *zio = zio_root(spa, NULL, NULL, 0); VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, spa_free_sync_cb, zio, tx), ==, 0); @@ -7507,6 +7968,11 @@ spa_sync_props(void *arg, dmu_tx_t *tx) case ZPOOL_PROP_FAILUREMODE: spa->spa_failmode = intval; break; + case ZPOOL_PROP_AUTOTRIM: + spa->spa_autotrim = intval; + spa_async_request(spa, + SPA_ASYNC_AUTOTRIM_RESTART); + break; case ZPOOL_PROP_AUTOEXPAND: spa->spa_autoexpand = intval; if (tx->tx_txg != TXG_INITIAL) @@ -7539,10 +8005,10 @@ spa_sync_props(void *arg, dmu_tx_t *tx) static void spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) { - dsl_pool_t *dp = spa->spa_dsl_pool; - - ASSERT(spa->spa_sync_pass == 1); + if (spa_sync_pass(spa) != 1) + return; + dsl_pool_t *dp = spa->spa_dsl_pool; rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && @@ -7614,14 +8080,15 @@ vdev_indirect_state_sync_verify(vdev_t *vd) ASSERT(vib != NULL); } - if (vdev_obsolete_sm_object(vd) != 0) { + uint64_t obsolete_sm_object = 0; + ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); + if (obsolete_sm_object != 0) { ASSERT(vd->vdev_obsolete_sm != NULL); ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); - - ASSERT3U(vdev_obsolete_sm_object(vd), ==, + ASSERT3U(obsolete_sm_object, ==, space_map_object(vd->vdev_obsolete_sm)); ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, space_map_allocated(vd->vdev_obsolete_sm)); @@ -7637,117 +8104,31 @@ vdev_indirect_state_sync_verify(vdev_t *vd) } /* - * Sync the specified transaction group. New blocks may be dirtied as - * part of the process, so we iterate until it converges. + * Set the top-level vdev's max queue depth. Evaluate each top-level's + * async write queue depth in case it changed. The max queue depth will + * not change in the middle of syncing out this txg. */ -void -spa_sync(spa_t *spa, uint64_t txg) +static void +spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) { - dsl_pool_t *dp = spa->spa_dsl_pool; - objset_t *mos = spa->spa_meta_objset; - bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; - metaslab_class_t *normal = spa_normal_class(spa); - metaslab_class_t *special = spa_special_class(spa); - metaslab_class_t *dedup = spa_dedup_class(spa); + ASSERT(spa_writeable(spa)); + vdev_t *rvd = spa->spa_root_vdev; - vdev_t *vd; - dmu_tx_t *tx; - int error; uint32_t max_queue_depth = zfs_vdev_async_write_max_active * zfs_vdev_queue_depth_pct / 100; + metaslab_class_t *normal = spa_normal_class(spa); + metaslab_class_t *special = spa_special_class(spa); + metaslab_class_t *dedup = spa_dedup_class(spa); - VERIFY(spa_writeable(spa)); - - /* - * Wait for i/os issued in open context that need to complete - * before this txg syncs. - */ - VERIFY0(zio_wait(spa->spa_txg_zio[txg & TXG_MASK])); - spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 0); - - /* - * Lock out configuration changes. - */ - spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); - - spa->spa_syncing_txg = txg; - spa->spa_sync_pass = 0; - - for (int i = 0; i < spa->spa_alloc_count; i++) { - mutex_enter(&spa->spa_alloc_locks[i]); - VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); - mutex_exit(&spa->spa_alloc_locks[i]); - } - - /* - * If there are any pending vdev state changes, convert them - * into config changes that go out with this transaction group. - */ - spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); - while (list_head(&spa->spa_state_dirty_list) != NULL) { - /* - * We need the write lock here because, for aux vdevs, - * calling vdev_config_dirty() modifies sav_config. - * This is ugly and will become unnecessary when we - * eliminate the aux vdev wart by integrating all vdevs - * into the root vdev tree. - */ - spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); - spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); - while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { - vdev_state_clean(vd); - vdev_config_dirty(vd); - } - spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); - spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); - } - spa_config_exit(spa, SCL_STATE, FTAG); - - tx = dmu_tx_create_assigned(dp, txg); - - spa->spa_sync_starttime = gethrtime(); - taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); - spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, - spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + - NSEC_TO_TICK(spa->spa_deadman_synctime)); - - /* - * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, - * set spa_deflate if we have no raid-z vdevs. - */ - if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && - spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { - int i; - - for (i = 0; i < rvd->vdev_children; i++) { - vd = rvd->vdev_child[i]; - if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) - break; - } - if (i == rvd->vdev_children) { - spa->spa_deflate = TRUE; - VERIFY(0 == zap_add(spa->spa_meta_objset, - DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, - sizeof (uint64_t), 1, &spa->spa_deflate, tx)); - } - } - - /* - * Set the top-level vdev's max queue depth. Evaluate each - * top-level's async write queue depth in case it changed. - * The max queue depth will not change in the middle of syncing - * out this txg. - */ uint64_t slots_per_allocator = 0; for (int c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; - metaslab_group_t *mg = tvd->vdev_mg; - metaslab_class_t *mc; + metaslab_group_t *mg = tvd->vdev_mg; if (mg == NULL || !metaslab_group_initialized(mg)) continue; - mc = mg->mg_class; + metaslab_class_t *mc = mg->mg_class; if (mc != normal && mc != special && mc != dedup) continue; @@ -7779,7 +8160,14 @@ spa_sync(spa_t *spa, uint64_t txg) normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; +} + +static void +spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) +{ + ASSERT(spa_writeable(spa)); + vdev_t *rvd = spa->spa_root_vdev; for (int c = 0; c < rvd->vdev_children; c++) { vdev_t *vd = rvd->vdev_child[c]; vdev_indirect_state_sync_verify(vd); @@ -7789,10 +8177,16 @@ spa_sync(spa_t *spa, uint64_t txg) break; } } +} + +static void +spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) +{ + objset_t *mos = spa->spa_meta_objset; + dsl_pool_t *dp = spa->spa_dsl_pool; + uint64_t txg = tx->tx_txg; + bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; - /* - * Iterate to convergence. - */ do { int pass = ++spa->spa_sync_pass; @@ -7818,81 +8212,60 @@ spa_sync(spa_t *spa, uint64_t txg) ddt_sync(spa, txg); dsl_scan_sync(dp, tx); + svr_sync(spa, tx); + spa_sync_upgrades(spa, tx); - if (spa->spa_vdev_removal != NULL) - svr_sync(spa, tx); - + vdev_t *vd = NULL; while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) != NULL) vdev_sync(vd, txg); - if (pass == 1) { - spa_sync_upgrades(spa, tx); - ASSERT3U(txg, >=, - spa->spa_uberblock.ub_rootbp.blk_birth); + /* + * Note: We need to check if the MOS is dirty because we could + * have marked the MOS dirty without updating the uberblock + * (e.g. if we have sync tasks but no dirty user data). We need + * to check the uberblock's rootbp because it is updated if we + * have synced out dirty data (though in this case the MOS will + * most likely also be dirty due to second order effects, we + * don't want to rely on that here). + */ + if (pass == 1 && + spa->spa_uberblock.ub_rootbp.blk_birth < txg && + !dmu_objset_is_dirty(mos, txg)) { /* - * Note: We need to check if the MOS is dirty - * because we could have marked the MOS dirty - * without updating the uberblock (e.g. if we - * have sync tasks but no dirty user data). We - * need to check the uberblock's rootbp because - * it is updated if we have synced out dirty - * data (though in this case the MOS will most - * likely also be dirty due to second order - * effects, we don't want to rely on that here). + * Nothing changed on the first pass, therefore this + * TXG is a no-op. Avoid syncing deferred frees, so + * that we can keep this TXG as a no-op. */ - if (spa->spa_uberblock.ub_rootbp.blk_birth < txg && - !dmu_objset_is_dirty(mos, txg)) { - /* - * Nothing changed on the first pass, - * therefore this TXG is a no-op. Avoid - * syncing deferred frees, so that we - * can keep this TXG as a no-op. - */ - ASSERT(txg_list_empty(&dp->dp_dirty_datasets, - txg)); - ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); - ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); - ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, - txg)); - break; - } - spa_sync_deferred_frees(spa, tx); + ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); + ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); + ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); + ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); + break; } + spa_sync_deferred_frees(spa, tx); } while (dmu_objset_is_dirty(mos, txg)); +} -#ifdef ZFS_DEBUG - if (!list_is_empty(&spa->spa_config_dirty_list)) { - /* - * Make sure that the number of ZAPs for all the vdevs matches - * the number of ZAPs in the per-vdev ZAP list. This only gets - * called if the config is dirty; otherwise there may be - * outstanding AVZ operations that weren't completed in - * spa_sync_config_object. - */ - uint64_t all_vdev_zap_entry_count; - ASSERT0(zap_count(spa->spa_meta_objset, - spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); - ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, - all_vdev_zap_entry_count); - } -#endif - - if (spa->spa_vdev_removal != NULL) { - ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); - } +/* + * Rewrite the vdev configuration (which includes the uberblock) to + * commit the transaction group. + * + * If there are no dirty vdevs, we sync the uberblock to a few random + * top-level vdevs that are known to be visible in the config cache + * (see spa_vdev_add() for a complete description). If there *are* dirty + * vdevs, sync the uberblock to all vdevs. + */ +static void +spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) +{ + vdev_t *rvd = spa->spa_root_vdev; + uint64_t txg = tx->tx_txg; - /* - * Rewrite the vdev configuration (which includes the uberblock) - * to commit the transaction group. - * - * If there are no dirty vdevs, we sync the uberblock to a few - * random top-level vdevs that are known to be visible in the - * config cache (see spa_vdev_add() for a complete description). - * If there *are* dirty vdevs, sync the uberblock to all vdevs. - */ for (;;) { + int error = 0; + /* * We hold SCL_STATE to prevent vdev open/close/etc. * while we're attempting to write the vdev labels. @@ -7906,13 +8279,15 @@ spa_sync(spa_t *spa, uint64_t txg) int c0 = spa_get_random(children); for (int c = 0; c < children; c++) { - vd = rvd->vdev_child[(c0 + c) % children]; + vdev_t *vd = + rvd->vdev_child[(c0 + c) % children]; /* Stop when revisiting the first vdev */ if (c > 0 && svd[0] == vd) break; - if (vd->vdev_ms_array == 0 || vd->vdev_islog || + if (vd->vdev_ms_array == 0 || + vd->vdev_islog || !vdev_is_concrete(vd)) continue; @@ -7936,6 +8311,124 @@ spa_sync(spa_t *spa, uint64_t txg) zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); zio_resume_wait(spa); } +} + +/* + * Sync the specified transaction group. New blocks may be dirtied as + * part of the process, so we iterate until it converges. + */ +void +spa_sync(spa_t *spa, uint64_t txg) +{ + vdev_t *vd = NULL; + + VERIFY(spa_writeable(spa)); + + /* + * Wait for i/os issued in open context that need to complete + * before this txg syncs. + */ + (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); + spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, + ZIO_FLAG_CANFAIL); + + /* + * Lock out configuration changes. + */ + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + + spa->spa_syncing_txg = txg; + spa->spa_sync_pass = 0; + + for (int i = 0; i < spa->spa_alloc_count; i++) { + mutex_enter(&spa->spa_alloc_locks[i]); + VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); + mutex_exit(&spa->spa_alloc_locks[i]); + } + + /* + * If there are any pending vdev state changes, convert them + * into config changes that go out with this transaction group. + */ + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + while (list_head(&spa->spa_state_dirty_list) != NULL) { + /* + * We need the write lock here because, for aux vdevs, + * calling vdev_config_dirty() modifies sav_config. + * This is ugly and will become unnecessary when we + * eliminate the aux vdev wart by integrating all vdevs + * into the root vdev tree. + */ + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); + while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { + vdev_state_clean(vd); + vdev_config_dirty(vd); + } + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); + } + spa_config_exit(spa, SCL_STATE, FTAG); + + dsl_pool_t *dp = spa->spa_dsl_pool; + dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); + + spa->spa_sync_starttime = gethrtime(); + taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); + spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, + spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + + NSEC_TO_TICK(spa->spa_deadman_synctime)); + + /* + * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, + * set spa_deflate if we have no raid-z vdevs. + */ + if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && + spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { + vdev_t *rvd = spa->spa_root_vdev; + + int i; + for (i = 0; i < rvd->vdev_children; i++) { + vd = rvd->vdev_child[i]; + if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) + break; + } + if (i == rvd->vdev_children) { + spa->spa_deflate = TRUE; + VERIFY0(zap_add(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, + sizeof (uint64_t), 1, &spa->spa_deflate, tx)); + } + } + + spa_sync_adjust_vdev_max_queue_depth(spa); + + spa_sync_condense_indirect(spa, tx); + + spa_sync_iterate_to_convergence(spa, tx); + +#ifdef ZFS_DEBUG + if (!list_is_empty(&spa->spa_config_dirty_list)) { + /* + * Make sure that the number of ZAPs for all the vdevs matches + * the number of ZAPs in the per-vdev ZAP list. This only gets + * called if the config is dirty; otherwise there may be + * outstanding AVZ operations that weren't completed in + * spa_sync_config_object. + */ + uint64_t all_vdev_zap_entry_count; + ASSERT0(zap_count(spa->spa_meta_objset, + spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); + ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, + all_vdev_zap_entry_count); + } +#endif + + if (spa->spa_vdev_removal != NULL) { + ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); + } + + spa_sync_rewrite_vdev_config(spa, tx); dmu_tx_commit(tx); taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); @@ -7968,7 +8461,8 @@ spa_sync(spa_t *spa, uint64_t txg) /* * Update usable space statistics. */ - while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))) + while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) + != NULL) vdev_sync_done(vd, txg); spa_update_dspace(spa);