]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/spa.c
Make zio_taskq_batch_pct user configurable
[mirror_zfs.git] / module / zfs / spa.c
index 3312c301cb9a5c5c1282ee3d05e607c1d26a5900..b4831a724c36554deb249c32391063616621d099 100644 (file)
@@ -127,9 +127,9 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
        /* ISSUE        ISSUE_HIGH      INTR            INTR_HIGH */
        { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* NULL */
-       { ZTI_N(8),     ZTI_NULL,       ZTI_BATCH,      ZTI_NULL }, /* READ */
-       { ZTI_BATCH,    ZTI_N(5),       ZTI_N(16),      ZTI_N(5) }, /* WRITE */
-       { ZTI_P(4, 8),  ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* FREE */
+       { ZTI_N(8),     ZTI_NULL,       ZTI_P(12, 8),   ZTI_NULL }, /* READ */
+       { ZTI_BATCH,    ZTI_N(5),       ZTI_P(12, 8),   ZTI_N(5) }, /* WRITE */
+       { ZTI_P(12, 8), ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* FREE */
        { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* CLAIM */
        { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* IOCTL */
 };
@@ -266,6 +266,14 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
                spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
                    0, ZPROP_SRC_LOCAL);
 
+       if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
+               spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+                   MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
+       } else {
+               spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+                   SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
+       }
+
        if ((dp = list_head(&spa->spa_config_list)) != NULL) {
                if (dp->scd_path == NULL) {
                        spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
@@ -482,7 +490,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
 
                        if (!error) {
                                objset_t *os;
-                               uint64_t compress;
+                               uint64_t propval;
 
                                if (strval == NULL || strval[0] == '\0') {
                                        objnum = zpool_prop_default_numeric(
@@ -494,15 +502,25 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
                                if (error)
                                        break;
 
-                               /* Must be ZPL and not gzip compressed. */
+                               /*
+                                * Must be ZPL, and its property settings
+                                * must be supported by GRUB (compression
+                                * is not gzip, and large blocks are not used).
+                                */
 
                                if (dmu_objset_type(os) != DMU_OST_ZFS) {
                                        error = SET_ERROR(ENOTSUP);
                                } else if ((error =
                                    dsl_prop_get_int_ds(dmu_objset_ds(os),
                                    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
-                                   &compress)) == 0 &&
-                                   !BOOTFS_COMPRESS_VALID(compress)) {
+                                   &propval)) == 0 &&
+                                   !BOOTFS_COMPRESS_VALID(propval)) {
+                                       error = SET_ERROR(ENOTSUP);
+                               } else if ((error =
+                                   dsl_prop_get_int_ds(dmu_objset_ds(os),
+                                   zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+                                   &propval)) == 0 &&
+                                   propval > SPA_OLD_MAXBLOCKSIZE) {
                                        error = SET_ERROR(ENOTSUP);
                                } else {
                                        objnum = dmu_objset_id(os);
@@ -826,7 +844,7 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
        uint_t count = ztip->zti_count;
        spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
        char name[32];
-       uint_t i, flags = 0;
+       uint_t i, flags = TASKQ_DYNAMIC;
        boolean_t batch = B_FALSE;
 
        if (mode == ZTI_MODE_NULL) {
@@ -849,7 +867,7 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
        case ZTI_MODE_BATCH:
                batch = B_TRUE;
                flags |= TASKQ_THREADS_CPU_PCT;
-               value = zio_taskq_batch_pct;
+               value = MIN(zio_taskq_batch_pct, 100);
                break;
 
        default:
@@ -880,11 +898,13 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
                        pri_t pri = maxclsyspri;
                        /*
                         * The write issue taskq can be extremely CPU
-                        * intensive.  Run it at slightly lower priority
-                        * than the other taskqs.
+                        * intensive.  Run it at slightly less important
+                        * priority than the other taskqs.  Under Linux this
+                        * means incrementing the priority value on platforms
+                        * like illumos it should be decremented.
                         */
                        if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
-                               pri--;
+                               pri++;
 
                        tq = taskq_create_proc(name, value, pri, 50,
                            INT_MAX, spa->spa_proc, flags);
@@ -1755,6 +1775,7 @@ static boolean_t
 spa_check_logs(spa_t *spa)
 {
        boolean_t rv = B_FALSE;
+       dsl_pool_t *dp = spa_get_dsl(spa);
 
        switch (spa->spa_log_state) {
        default:
@@ -1762,8 +1783,8 @@ spa_check_logs(spa_t *spa)
        case SPA_LOG_MISSING:
                /* need to recheck in case slog has been restored */
        case SPA_LOG_UNKNOWN:
-               rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
-                   NULL, DS_FIND_CHILDREN) != 0);
+               rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
+                   zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
                if (rv)
                        spa_set_log_state(spa, SPA_LOG_MISSING);
                break;
@@ -2226,6 +2247,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
                return (error);
 
        ASSERT(spa->spa_root_vdev == rvd);
+       ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
+       ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
 
        if (type != SPA_IMPORT_ASSEMBLE) {
                ASSERT(spa_guid(spa) == pool_guid);
@@ -2745,6 +2768,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
            spa->spa_load_max_txg == UINT64_MAX)) {
                dmu_tx_t *tx;
                int need_update = B_FALSE;
+               dsl_pool_t *dp = spa_get_dsl(spa);
                int c;
 
                ASSERT(state != SPA_LOAD_TRYIMPORT);
@@ -2758,9 +2782,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
                 */
                spa->spa_claiming = B_TRUE;
 
-               tx = dmu_tx_create_assigned(spa_get_dsl(spa),
-                   spa_first_txg(spa));
-               (void) dmu_objset_find(spa_name(spa),
+               tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
+               (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
                    zil_claim, tx, DS_FIND_CHILDREN);
                dmu_tx_commit(tx);
 
@@ -6372,21 +6395,6 @@ spa_sync(spa_t *spa, uint64_t txg)
                }
        }
 
-       /*
-        * If anything has changed in this txg, or if someone is waiting
-        * for this txg to sync (eg, spa_vdev_remove()), push the
-        * deferred frees from the previous txg.  If not, leave them
-        * alone so that we don't generate work on an otherwise idle
-        * system.
-        */
-       if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
-           !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
-           !txg_list_empty(&dp->dp_sync_tasks, txg) ||
-           ((dsl_scan_active(dp->dp_scan) ||
-           txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
-               spa_sync_deferred_frees(spa, tx);
-       }
-
        /*
         * Iterate to convergence.
         */
@@ -6404,6 +6412,11 @@ spa_sync(spa_t *spa, uint64_t txg)
                if (pass < zfs_sync_pass_deferred_free) {
                        spa_sync_frees(spa, free_bpl, tx);
                } else {
+                       /*
+                        * We can not defer frees in pass 1, because
+                        * we sync the deferred frees later in pass 1.
+                        */
+                       ASSERT3U(pass, >, 1);
                        bplist_iterate(free_bpl, bpobj_enqueue_cb,
                            &spa->spa_deferred_bpobj, tx);
                }
@@ -6414,8 +6427,37 @@ spa_sync(spa_t *spa, uint64_t txg)
                while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
                        vdev_sync(vd, txg);
 
-               if (pass == 1)
+               if (pass == 1) {
                        spa_sync_upgrades(spa, tx);
+                       ASSERT3U(txg, >=,
+                           spa->spa_uberblock.ub_rootbp.blk_birth);
+                       /*
+                        * Note: We need to check if the MOS is dirty
+                        * because we could have marked the MOS dirty
+                        * without updating the uberblock (e.g. if we
+                        * have sync tasks but no dirty user data).  We
+                        * need to check the uberblock's rootbp because
+                        * it is updated if we have synced out dirty
+                        * data (though in this case the MOS will most
+                        * likely also be dirty due to second order
+                        * effects, we don't want to rely on that here).
+                        */
+                       if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
+                           !dmu_objset_is_dirty(mos, txg)) {
+                               /*
+                                * Nothing changed on the first pass,
+                                * therefore this TXG is a no-op.  Avoid
+                                * syncing deferred frees, so that we
+                                * can keep this TXG as a no-op.
+                                */
+                               ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
+                                   txg));
+                               ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
+                               ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
+                               break;
+                       }
+                       spa_sync_deferred_frees(spa, tx);
+               }
 
        } while (dmu_objset_is_dirty(mos, txg));
 
@@ -6762,4 +6804,9 @@ MODULE_PARM_DESC(spa_load_verify_metadata,
 module_param(spa_load_verify_data, int, 0644);
 MODULE_PARM_DESC(spa_load_verify_data,
        "Set to traverse data on pool import");
+
+module_param(zio_taskq_batch_pct, uint, 0444);
+MODULE_PARM_DESC(zio_taskq_batch_pct,
+       "Percentage of CPUs to run an IO worker thread");
+
 #endif