]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
ztest: creates partially initialized root dataset
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
64fc7762 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
0c66c32d 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
539d33c7 26 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
34dc7c2f
BB
27 */
28
34dc7c2f
BB
29#include <sys/dsl_pool.h>
30#include <sys/dsl_dataset.h>
428870ff 31#include <sys/dsl_prop.h>
34dc7c2f
BB
32#include <sys/dsl_dir.h>
33#include <sys/dsl_synctask.h>
428870ff
BB
34#include <sys/dsl_scan.h>
35#include <sys/dnode.h>
34dc7c2f
BB
36#include <sys/dmu_tx.h>
37#include <sys/dmu_objset.h>
38#include <sys/arc.h>
39#include <sys/zap.h>
40#include <sys/zio.h>
41#include <sys/zfs_context.h>
42#include <sys/fs/zfs.h>
b128c09f
BB
43#include <sys/zfs_znode.h>
44#include <sys/spa_impl.h>
428870ff 45#include <sys/dsl_deadlist.h>
d2734cce
SD
46#include <sys/vdev_impl.h>
47#include <sys/metaslab_impl.h>
9ae529ec
CS
48#include <sys/bptree.h>
49#include <sys/zfeature.h>
29809a6c 50#include <sys/zil_impl.h>
13fe0198 51#include <sys/dsl_userhold.h>
49ee64e5 52#include <sys/trace_txg.h>
379ca9cf 53#include <sys/mmp.h>
34dc7c2f 54
e8b96c60
MA
55/*
56 * ZFS Write Throttle
57 * ------------------
58 *
59 * ZFS must limit the rate of incoming writes to the rate at which it is able
60 * to sync data modifications to the backend storage. Throttling by too much
61 * creates an artificial limit; throttling by too little can only be sustained
62 * for short periods and would lead to highly lumpy performance. On a per-pool
63 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
64 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
65 * of dirty data decreases. When the amount of dirty data exceeds a
66 * predetermined threshold further modifications are blocked until the amount
67 * of dirty data decreases (as data is synced out).
68 *
69 * The limit on dirty data is tunable, and should be adjusted according to
70 * both the IO capacity and available memory of the system. The larger the
71 * window, the more ZFS is able to aggregate and amortize metadata (and data)
72 * changes. However, memory is a limited resource, and allowing for more dirty
73 * data comes at the cost of keeping other useful data in memory (for example
74 * ZFS data cached by the ARC).
75 *
76 * Implementation
77 *
78 * As buffers are modified dsl_pool_willuse_space() increments both the per-
79 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
80 * dirty space used; dsl_pool_dirty_space() decrements those values as data
81 * is synced out from dsl_pool_sync(). While only the poolwide value is
82 * relevant, the per-txg value is useful for debugging. The tunable
83 * zfs_dirty_data_max determines the dirty space limit. Once that value is
84 * exceeded, new writes are halted until space frees up.
85 *
00f198de 86 * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
e8b96c60
MA
87 * ensure that there is a txg syncing (see the comment in txg.c for a full
88 * description of transaction group stages).
89 *
90 * The IO scheduler uses both the dirty space limit and current amount of
91 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
92 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
93 *
94 * The delay is also calculated based on the amount of dirty data. See the
95 * comment above dmu_tx_delay() for details.
96 */
97
98/*
99 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
100 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
101 * parameter.
102 */
103unsigned long zfs_dirty_data_max = 0;
104unsigned long zfs_dirty_data_max_max = 0;
105int zfs_dirty_data_max_percent = 10;
106int zfs_dirty_data_max_max_percent = 25;
b128c09f 107
e8b96c60 108/*
dfbe2675
MA
109 * If there's at least this much dirty data (as a percentage of
110 * zfs_dirty_data_max), push out a txg. This should be less than
111 * zfs_vdev_async_write_active_min_dirty_percent.
e8b96c60 112 */
dfbe2675 113int zfs_dirty_data_sync_percent = 20;
34dc7c2f 114
e8b96c60
MA
115/*
116 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
117 * and delay each transaction.
118 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
119 */
120int zfs_delay_min_dirty_percent = 60;
b128c09f 121
e8b96c60
MA
122/*
123 * This controls how quickly the delay approaches infinity.
124 * Larger values cause it to delay more for a given amount of dirty data.
125 * Therefore larger values will cause there to be less dirty data for a
126 * given throughput.
127 *
128 * For the smoothest delay, this value should be about 1 billion divided
129 * by the maximum number of operations per second. This will smoothly
130 * handle between 10x and 1/10th this number.
131 *
132 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
133 * multiply in dmu_tx_delay().
134 */
135unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
b128c09f 136
64fc7762
MA
137/*
138 * This determines the number of threads used by the dp_sync_taskq.
139 */
140int zfs_sync_taskq_batch_pct = 75;
141
a032ac4b
BB
142/*
143 * These tunables determine the behavior of how zil_itxg_clean() is
144 * called via zil_clean() in the context of spa_sync(). When an itxg
145 * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
146 * If the dispatch fails, the call to zil_itxg_clean() will occur
147 * synchronously in the context of spa_sync(), which can negatively
148 * impact the performance of spa_sync() (e.g. in the case of the itxg
149 * list having a large number of itxs that needs to be cleaned).
150 *
151 * Thus, these tunables can be used to manipulate the behavior of the
152 * taskq used by zil_clean(); they determine the number of taskq entries
153 * that are pre-populated when the taskq is first created (via the
154 * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
155 * taskq entries that are cached after an on-demand allocation (via the
156 * "zfs_zil_clean_taskq_maxalloc").
157 *
158 * The idea being, we want to try reasonably hard to ensure there will
159 * already be a taskq entry pre-allocated by the time that it is needed
160 * by zil_clean(). This way, we can avoid the possibility of an
161 * on-demand allocation of a new taskq entry from failing, which would
162 * result in zil_itxg_clean() being called synchronously from zil_clean()
163 * (which can adversely affect performance of spa_sync()).
164 *
165 * Additionally, the number of threads used by the taskq can be
166 * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
167 */
168int zfs_zil_clean_taskq_nthr_pct = 100;
169int zfs_zil_clean_taskq_minalloc = 1024;
170int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
171
428870ff 172int
b128c09f 173dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
174{
175 uint64_t obj;
176 int err;
177
178 err = zap_lookup(dp->dp_meta_objset,
d683ddbb 179 dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
b128c09f 180 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
181 if (err)
182 return (err);
183
13fe0198 184 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
185}
186
187static dsl_pool_t *
188dsl_pool_open_impl(spa_t *spa, uint64_t txg)
189{
190 dsl_pool_t *dp;
191 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
192
193 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
194 dp->dp_spa = spa;
195 dp->dp_meta_rootbp = *bp;
13fe0198 196 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f 197 txg_init(dp, txg);
379ca9cf 198 mmp_init(spa);
34dc7c2f 199
4747a7d3 200 txg_list_create(&dp->dp_dirty_datasets, spa,
34dc7c2f 201 offsetof(dsl_dataset_t, ds_dirty_link));
4747a7d3 202 txg_list_create(&dp->dp_dirty_zilogs, spa,
29809a6c 203 offsetof(zilog_t, zl_dirty_link));
4747a7d3 204 txg_list_create(&dp->dp_dirty_dirs, spa,
34dc7c2f 205 offsetof(dsl_dir_t, dd_dirty_link));
4747a7d3 206 txg_list_create(&dp->dp_sync_tasks, spa,
13fe0198 207 offsetof(dsl_sync_task_t, dst_node));
d2734cce
SD
208 txg_list_create(&dp->dp_early_sync_tasks, spa,
209 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f 210
64fc7762
MA
211 dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
212 zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
213 TASKQ_THREADS_CPU_PCT);
214
a032ac4b
BB
215 dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
216 zfs_zil_clean_taskq_nthr_pct, minclsyspri,
217 zfs_zil_clean_taskq_minalloc,
218 zfs_zil_clean_taskq_maxalloc,
219 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
220
34dc7c2f 221 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
e8b96c60 222 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 223
1229323d 224 dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
aa9af22c 225 max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
9babb374 226
34dc7c2f
BB
227 return (dp);
228}
229
230int
9ae529ec 231dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
232{
233 int err;
234 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec 235
b7faa7aa
G
236 /*
237 * Initialize the caller's dsl_pool_t structure before we actually open
238 * the meta objset. This is done because a self-healing write zio may
239 * be issued as part of dmu_objset_open_impl() and the spa needs its
240 * dsl_pool_t initialized in order to handle the write.
241 */
242 *dpp = dp;
243
9ae529ec
CS
244 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
245 &dp->dp_meta_objset);
b7faa7aa 246 if (err != 0) {
9ae529ec 247 dsl_pool_close(dp);
b7faa7aa
G
248 *dpp = NULL;
249 }
9ae529ec
CS
250
251 return (err);
252}
253
254int
255dsl_pool_open(dsl_pool_t *dp)
256{
257 int err;
b128c09f
BB
258 dsl_dir_t *dd;
259 dsl_dataset_t *ds;
428870ff 260 uint64_t obj;
34dc7c2f 261
13fe0198 262 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
263 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
264 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
265 &dp->dp_root_dir_obj);
266 if (err)
267 goto out;
268
13fe0198 269 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
270 NULL, dp, &dp->dp_root_dir);
271 if (err)
272 goto out;
273
b128c09f 274 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
275 if (err)
276 goto out;
277
9ae529ec 278 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
279 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
280 if (err)
281 goto out;
d683ddbb
JG
282 err = dsl_dataset_hold_obj(dp,
283 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
9babb374
BB
284 if (err == 0) {
285 err = dsl_dataset_hold_obj(dp,
d683ddbb 286 dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
9babb374
BB
287 &dp->dp_origin_snap);
288 dsl_dataset_rele(ds, FTAG);
289 }
13fe0198 290 dsl_dir_rele(dd, dp);
b128c09f
BB
291 if (err)
292 goto out;
b128c09f
BB
293 }
294
9ae529ec 295 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
296 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
297 &dp->dp_free_dir);
b128c09f
BB
298 if (err)
299 goto out;
428870ff 300
b128c09f 301 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 302 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
303 if (err)
304 goto out;
13fe0198 305 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 306 dp->dp_meta_objset, obj));
b128c09f
BB
307 }
308
a1d477c2
MA
309 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
310 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
311 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
312 if (err == 0) {
313 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
314 dp->dp_meta_objset, obj));
315 } else if (err == ENOENT) {
316 /*
317 * We might not have created the remap bpobj yet.
318 */
319 err = 0;
320 } else {
321 goto out;
322 }
323 }
324
fbeddd60 325 /*
a1d477c2
MA
326 * Note: errors ignored, because the these special dirs, used for
327 * space accounting, are only created on demand.
fbeddd60
MA
328 */
329 (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
330 &dp->dp_leak_dir);
331
fa86b5db 332 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
9ae529ec
CS
333 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
334 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
335 &dp->dp_bptree_obj);
336 if (err != 0)
337 goto out;
338 }
339
fa86b5db 340 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
753c3839
MA
341 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
342 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
343 &dp->dp_empty_bpobj);
344 if (err != 0)
345 goto out;
346 }
347
428870ff
BB
348 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
349 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
350 &dp->dp_tmp_userrefs_obj);
351 if (err == ENOENT)
352 err = 0;
353 if (err)
354 goto out;
355
9ae529ec 356 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 357
34dc7c2f 358out:
13fe0198 359 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
360 return (err);
361}
362
363void
364dsl_pool_close(dsl_pool_t *dp)
365{
b128c09f 366 /*
e8b96c60
MA
367 * Drop our references from dsl_pool_open().
368 *
b128c09f
BB
369 * Since we held the origin_snap from "syncing" context (which
370 * includes pool-opening context), it actually only got a "ref"
371 * and not a hold, so just drop that here.
372 */
a1d477c2 373 if (dp->dp_origin_snap != NULL)
13fe0198 374 dsl_dataset_rele(dp->dp_origin_snap, dp);
a1d477c2 375 if (dp->dp_mos_dir != NULL)
13fe0198 376 dsl_dir_rele(dp->dp_mos_dir, dp);
a1d477c2 377 if (dp->dp_free_dir != NULL)
13fe0198 378 dsl_dir_rele(dp->dp_free_dir, dp);
a1d477c2 379 if (dp->dp_leak_dir != NULL)
fbeddd60 380 dsl_dir_rele(dp->dp_leak_dir, dp);
a1d477c2 381 if (dp->dp_root_dir != NULL)
13fe0198 382 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 383
428870ff 384 bpobj_close(&dp->dp_free_bpobj);
a1d477c2 385 bpobj_close(&dp->dp_obsolete_bpobj);
428870ff 386
34dc7c2f 387 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
a1d477c2 388 if (dp->dp_meta_objset != NULL)
428870ff 389 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
390
391 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 392 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 393 txg_list_destroy(&dp->dp_sync_tasks);
d2734cce 394 txg_list_destroy(&dp->dp_early_sync_tasks);
34dc7c2f 395 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f 396
a032ac4b 397 taskq_destroy(dp->dp_zil_clean_taskq);
64fc7762
MA
398 taskq_destroy(dp->dp_sync_taskq);
399
ca0bf58d
PS
400 /*
401 * We can't set retry to TRUE since we're explicitly specifying
402 * a spa to flush. This is good enough; any missed buffers for
403 * this spa won't cause trouble, and they'll eventually fall
404 * out of the ARC just like any other unused buffer.
405 */
406 arc_flush(dp->dp_spa, FALSE);
407
379ca9cf 408 mmp_fini(dp->dp_spa);
34dc7c2f 409 txg_fini(dp);
428870ff 410 dsl_scan_fini(dp);
0c66c32d
JG
411 dmu_buf_user_evict_wait();
412
13fe0198 413 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 414 mutex_destroy(&dp->dp_lock);
c17486b2 415 cv_destroy(&dp->dp_spaceavail_cv);
3558fd73 416 taskq_destroy(dp->dp_iput_taskq);
a1d477c2 417 if (dp->dp_blkstats != NULL) {
d4a72f23 418 mutex_destroy(&dp->dp_blkstats->zab_lock);
79c76d5b 419 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
d4a72f23 420 }
34dc7c2f
BB
421 kmem_free(dp, sizeof (dsl_pool_t));
422}
423
a1d477c2
MA
424void
425dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
426{
427 uint64_t obj;
428 /*
429 * Currently, we only create the obsolete_bpobj where there are
430 * indirect vdevs with referenced mappings.
431 */
432 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
433 /* create and open the obsolete_bpobj */
434 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
435 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
436 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
437 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
438 spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
439}
440
441void
442dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
443{
444 spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
445 VERIFY0(zap_remove(dp->dp_meta_objset,
446 DMU_POOL_DIRECTORY_OBJECT,
447 DMU_POOL_OBSOLETE_BPOBJ, tx));
448 bpobj_free(dp->dp_meta_objset,
449 dp->dp_obsolete_bpobj.bpo_object, tx);
450 bpobj_close(&dp->dp_obsolete_bpobj);
451}
452
34dc7c2f 453dsl_pool_t *
b5256303
TC
454dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
455 uint64_t txg)
34dc7c2f
BB
456{
457 int err;
458 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
459 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
0a108631 460#ifdef _KERNEL
461 objset_t *os;
462#else
463 objset_t *os __attribute__((unused));
464#endif
b128c09f 465 dsl_dataset_t *ds;
428870ff 466 uint64_t obj;
b128c09f 467
13fe0198
MA
468 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
469
b128c09f 470 /* create and open the MOS (meta-objset) */
428870ff
BB
471 dp->dp_meta_objset = dmu_objset_create_impl(spa,
472 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
b5256303 473 spa->spa_meta_objset = dp->dp_meta_objset;
34dc7c2f
BB
474
475 /* create the pool directory */
476 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
477 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 478 ASSERT0(err);
34dc7c2f 479
428870ff 480 /* Initialize scan structures */
13fe0198 481 VERIFY0(dsl_scan_init(dp, txg));
428870ff 482
34dc7c2f 483 /* create and open the root dir */
b128c09f 484 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 485 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
486 NULL, dp, &dp->dp_root_dir));
487
488 /* create and open the meta-objset dir */
b128c09f 489 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 490 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
491 MOS_DIR_NAME, &dp->dp_mos_dir));
492
428870ff
BB
493 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
494 /* create and open the free dir */
495 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
496 FREE_DIR_NAME, tx);
13fe0198 497 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
498 FREE_DIR_NAME, &dp->dp_free_dir));
499
500 /* create and open the free_bplist */
f1512ee6 501 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
428870ff
BB
502 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
503 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 504 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
505 dp->dp_meta_objset, obj));
506 }
507
b128c09f
BB
508 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
509 dsl_pool_create_origin(dp, tx);
510
b5256303
TC
511 /*
512 * Some features may be needed when creating the root dataset, so we
513 * create the feature objects here.
514 */
515 if (spa_version(spa) >= SPA_VERSION_FEATURES)
516 spa_feature_create_zap_objects(spa, tx);
517
518 if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
519 dcp->cp_crypt != ZIO_CRYPT_INHERIT)
520 spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
521
b128c09f 522 /* create the root dataset */
b5256303 523 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
b128c09f
BB
524
525 /* create the root objset */
52ce99dd
TC
526 VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
527 DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
0a108631 528 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
529 os = dmu_objset_create_impl(dp->dp_spa, ds,
530 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
531 rrw_exit(&ds->ds_bp_rwlock, FTAG);
b128c09f 532#ifdef _KERNEL
0a108631 533 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f 534#endif
52ce99dd 535 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
34dc7c2f
BB
536
537 dmu_tx_commit(tx);
538
13fe0198
MA
539 rrw_exit(&dp->dp_config_rwlock, FTAG);
540
34dc7c2f
BB
541 return (dp);
542}
543
29809a6c
MA
544/*
545 * Account for the meta-objset space in its placeholder dsl_dir.
546 */
547void
548dsl_pool_mos_diduse_space(dsl_pool_t *dp,
549 int64_t used, int64_t comp, int64_t uncomp)
550{
551 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
552 mutex_enter(&dp->dp_lock);
553 dp->dp_mos_used_delta += used;
554 dp->dp_mos_compressed_delta += comp;
555 dp->dp_mos_uncompressed_delta += uncomp;
556 mutex_exit(&dp->dp_lock);
557}
558
e8b96c60
MA
559static void
560dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
561{
562 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
563 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
564 VERIFY0(zio_wait(zio));
565 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
566 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
567}
568
569static void
570dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
571{
572 ASSERT(MUTEX_HELD(&dp->dp_lock));
573
574 if (delta < 0)
575 ASSERT3U(-delta, <=, dp->dp_dirty_total);
576
577 dp->dp_dirty_total += delta;
578
579 /*
580 * Note: we signal even when increasing dp_dirty_total.
581 * This ensures forward progress -- each thread wakes the next waiter.
582 */
c0c8cc7b 583 if (dp->dp_dirty_total < zfs_dirty_data_max)
e8b96c60
MA
584 cv_signal(&dp->dp_spaceavail_cv);
585}
586
d2734cce
SD
587#ifdef ZFS_DEBUG
588static boolean_t
589dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
590{
591 spa_t *spa = dp->dp_spa;
592 vdev_t *rvd = spa->spa_root_vdev;
593
594 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
595 vdev_t *vd = rvd->vdev_child[c];
596 txg_list_t *tl = &vd->vdev_ms_list;
597 metaslab_t *ms;
598
599 for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
600 ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
601 VERIFY(range_tree_is_empty(ms->ms_freeing));
602 VERIFY(range_tree_is_empty(ms->ms_checkpointing));
603 }
604 }
605
606 return (B_TRUE);
607}
608#endif
609
34dc7c2f
BB
610void
611dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
612{
613 zio_t *zio;
614 dmu_tx_t *tx;
615 dsl_dir_t *dd;
616 dsl_dataset_t *ds;
428870ff 617 objset_t *mos = dp->dp_meta_objset;
29809a6c
MA
618 list_t synced_datasets;
619
620 list_create(&synced_datasets, sizeof (dsl_dataset_t),
621 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f
BB
622
623 tx = dmu_tx_create_assigned(dp, txg);
624
d2734cce
SD
625 /*
626 * Run all early sync tasks before writing out any dirty blocks.
627 * For more info on early sync tasks see block comment in
628 * dsl_early_sync_task().
629 */
630 if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
631 dsl_sync_task_t *dst;
632
633 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
634 while ((dst =
635 txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
636 ASSERT(dsl_early_sync_task_verify(dp, txg));
637 dsl_sync_task_sync(dst, tx);
638 }
639 ASSERT(dsl_early_sync_task_verify(dp, txg));
640 }
641
e8b96c60
MA
642 /*
643 * Write out all dirty blocks of dirty datasets.
644 */
34dc7c2f 645 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 646 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
647 /*
648 * We must not sync any non-MOS datasets twice, because
649 * we may have taken a snapshot of them. However, we
650 * may sync newly-created datasets on pass 2.
651 */
652 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 653 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
654 dsl_dataset_sync(ds, zio, tx);
655 }
e8b96c60 656 VERIFY0(zio_wait(zio));
9babb374 657
e8b96c60
MA
658 /*
659 * We have written all of the accounted dirty data, so our
660 * dp_space_towrite should now be zero. However, some seldom-used
661 * code paths do not adhere to this (e.g. dbuf_undirty(), also
662 * rounding error in dbuf_write_physdone).
663 * Shore up the accounting of any dirtied space now.
664 */
665 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
34dc7c2f 666
539d33c7
GM
667 /*
668 * Update the long range free counter after
669 * we're done syncing user data
670 */
671 mutex_enter(&dp->dp_lock);
672 ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
673 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
674 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
675 mutex_exit(&dp->dp_lock);
676
29809a6c
MA
677 /*
678 * After the data blocks have been written (ensured by the zio_wait()
9c5167d1 679 * above), update the user/group/project space accounting. This happens
64fc7762
MA
680 * in tasks dispatched to dp_sync_taskq, so wait for them before
681 * continuing.
29809a6c 682 */
e8b96c60
MA
683 for (ds = list_head(&synced_datasets); ds != NULL;
684 ds = list_next(&synced_datasets, ds)) {
428870ff 685 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
e8b96c60 686 }
64fc7762 687 taskq_wait(dp->dp_sync_taskq);
9babb374
BB
688
689 /*
690 * Sync the datasets again to push out the changes due to
428870ff 691 * userspace updates. This must be done before we process the
29809a6c
MA
692 * sync tasks, so that any snapshots will have the correct
693 * user accounting information (and we won't get confused
694 * about which blocks are part of the snapshot).
9babb374
BB
695 */
696 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 697 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
52ce99dd
TC
698 objset_t *os = ds->ds_objset;
699
9babb374
BB
700 ASSERT(list_link_active(&ds->ds_synced_link));
701 dmu_buf_rele(ds->ds_dbuf, ds);
702 dsl_dataset_sync(ds, zio, tx);
52ce99dd
TC
703
704 /*
705 * Release any key mappings created by calls to
706 * dsl_dataset_dirty() from the userquota accounting
707 * code paths.
708 */
709 if (os->os_encrypted && !os->os_raw_receive &&
710 !os->os_next_write_raw[txg & TXG_MASK]) {
711 ASSERT3P(ds->ds_key_mapping, !=, NULL);
712 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
713 }
9babb374 714 }
e8b96c60 715 VERIFY0(zio_wait(zio));
9babb374 716
428870ff 717 /*
29809a6c
MA
718 * Now that the datasets have been completely synced, we can
719 * clean up our in-memory structures accumulated while syncing:
720 *
721 * - move dead blocks from the pending deadlist to the on-disk deadlist
29809a6c 722 * - release hold from dsl_dataset_dirty()
52ce99dd 723 * - release key mapping hold from dsl_dataset_dirty()
428870ff 724 */
e8b96c60 725 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
52ce99dd
TC
726 objset_t *os = ds->ds_objset;
727
728 if (os->os_encrypted && !os->os_raw_receive &&
729 !os->os_next_write_raw[txg & TXG_MASK]) {
730 ASSERT3P(ds->ds_key_mapping, !=, NULL);
731 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
732 }
733
0efd9791 734 dsl_dataset_sync_done(ds, tx);
428870ff
BB
735 }
736
e8b96c60 737 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
34dc7c2f 738 dsl_dir_sync(dd, tx);
e8b96c60 739 }
b128c09f 740
29809a6c
MA
741 /*
742 * The MOS's space is accounted for in the pool/$MOS
743 * (dp_mos_dir). We can't modify the mos while we're syncing
744 * it, so we remember the deltas and apply them here.
745 */
746 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
747 dp->dp_mos_uncompressed_delta != 0) {
748 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
749 dp->dp_mos_used_delta,
750 dp->dp_mos_compressed_delta,
751 dp->dp_mos_uncompressed_delta, tx);
752 dp->dp_mos_used_delta = 0;
753 dp->dp_mos_compressed_delta = 0;
754 dp->dp_mos_uncompressed_delta = 0;
755 }
756
64fc7762 757 if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) {
e8b96c60 758 dsl_pool_sync_mos(dp, tx);
34dc7c2f
BB
759 }
760
29809a6c
MA
761 /*
762 * If we modify a dataset in the same txg that we want to destroy it,
763 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
764 * dsl_dir_destroy_check() will fail if there are unexpected holds.
765 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
766 * and clearing the hold on it) before we process the sync_tasks.
767 * The MOS data dirtied by the sync_tasks will be synced on the next
768 * pass.
769 */
29809a6c 770 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 771 dsl_sync_task_t *dst;
29809a6c
MA
772 /*
773 * No more sync tasks should have been added while we
774 * were syncing.
775 */
e8b96c60
MA
776 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
777 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
13fe0198 778 dsl_sync_task_sync(dst, tx);
29809a6c
MA
779 }
780
34dc7c2f 781 dmu_tx_commit(tx);
b128c09f 782
e8b96c60 783 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
34dc7c2f
BB
784}
785
786void
428870ff 787dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 788{
29809a6c 789 zilog_t *zilog;
34dc7c2f 790
55922e73 791 while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
e8b96c60 792 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
55922e73
GW
793 /*
794 * We don't remove the zilog from the dp_dirty_zilogs
795 * list until after we've cleaned it. This ensures that
796 * callers of zilog_is_dirty() receive an accurate
797 * answer when they are racing with the spa sync thread.
798 */
29809a6c 799 zil_clean(zilog, txg);
55922e73 800 (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
29809a6c
MA
801 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
802 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 803 }
428870ff 804 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
805}
806
807/*
808 * TRUE if the current thread is the tx_sync_thread or if we
809 * are being called from SPA context during pool initialization.
810 */
811int
812dsl_pool_sync_context(dsl_pool_t *dp)
813{
814 return (curthread == dp->dp_tx.tx_sync_thread ||
64fc7762
MA
815 spa_is_initializing(dp->dp_spa) ||
816 taskq_member(dp->dp_sync_taskq, curthread));
34dc7c2f
BB
817}
818
d2734cce
SD
819/*
820 * This function returns the amount of allocatable space in the pool
821 * minus whatever space is currently reserved by ZFS for specific
822 * purposes. Specifically:
823 *
824 * 1] Any reserved SLOP space
825 * 2] Any space used by the checkpoint
826 * 3] Any space used for deferred frees
827 *
828 * The latter 2 are especially important because they are needed to
829 * rectify the SPA's and DMU's different understanding of how much space
830 * is used. Now the DMU is aware of that extra space tracked by the SPA
831 * without having to maintain a separate special dir (e.g similar to
832 * $MOS, $FREEING, and $LEAKED).
833 *
834 * Note: By deferred frees here, we mean the frees that were deferred
835 * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
836 * segments placed in ms_defer trees during metaslab_sync_done().
837 */
34dc7c2f 838uint64_t
d2734cce 839dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
34dc7c2f 840{
d2734cce
SD
841 spa_t *spa = dp->dp_spa;
842 uint64_t space, resv, adjustedsize;
843 uint64_t spa_deferred_frees =
844 spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
845
846 space = spa_get_dspace(spa)
847 - spa_get_checkpoint_space(spa) - spa_deferred_frees;
848 resv = spa_get_slop_space(spa);
849
850 switch (slop_policy) {
851 case ZFS_SPACE_CHECK_NORMAL:
852 break;
853 case ZFS_SPACE_CHECK_RESERVED:
34dc7c2f 854 resv >>= 1;
d2734cce
SD
855 break;
856 case ZFS_SPACE_CHECK_EXTRA_RESERVED:
857 resv >>= 2;
858 break;
859 case ZFS_SPACE_CHECK_NONE:
860 resv = 0;
861 break;
862 default:
863 panic("invalid slop policy value: %d", slop_policy);
864 break;
865 }
866 adjustedsize = (space >= resv) ? (space - resv) : 0;
34dc7c2f 867
d2734cce
SD
868 return (adjustedsize);
869}
870
871uint64_t
872dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
873{
874 uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
875 uint64_t deferred =
876 metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
877 uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
878 return (quota);
34dc7c2f
BB
879}
880
e8b96c60
MA
881boolean_t
882dsl_pool_need_dirty_delay(dsl_pool_t *dp)
34dc7c2f 883{
e8b96c60
MA
884 uint64_t delay_min_bytes =
885 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
dfbe2675
MA
886 uint64_t dirty_min_bytes =
887 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
e8b96c60 888 boolean_t rv;
34dc7c2f 889
e8b96c60 890 mutex_enter(&dp->dp_lock);
dfbe2675 891 if (dp->dp_dirty_total > dirty_min_bytes)
e8b96c60
MA
892 txg_kick(dp);
893 rv = (dp->dp_dirty_total > delay_min_bytes);
894 mutex_exit(&dp->dp_lock);
895 return (rv);
34dc7c2f
BB
896}
897
898void
e8b96c60 899dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
34dc7c2f 900{
e8b96c60
MA
901 if (space > 0) {
902 mutex_enter(&dp->dp_lock);
903 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
904 dsl_pool_dirty_delta(dp, space);
905 mutex_exit(&dp->dp_lock);
906 }
34dc7c2f
BB
907}
908
909void
e8b96c60 910dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
34dc7c2f 911{
e8b96c60
MA
912 ASSERT3S(space, >=, 0);
913 if (space == 0)
34dc7c2f
BB
914 return;
915
e8b96c60
MA
916 mutex_enter(&dp->dp_lock);
917 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
918 /* XXX writing something we didn't dirty? */
919 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
34dc7c2f 920 }
e8b96c60
MA
921 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
922 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
923 ASSERT3U(dp->dp_dirty_total, >=, space);
924 dsl_pool_dirty_delta(dp, -space);
925 mutex_exit(&dp->dp_lock);
34dc7c2f 926}
b128c09f
BB
927
928/* ARGSUSED */
929static int
13fe0198 930upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
931{
932 dmu_tx_t *tx = arg;
933 dsl_dataset_t *ds, *prev = NULL;
934 int err;
b128c09f 935
13fe0198 936 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
937 if (err)
938 return (err);
939
d683ddbb
JG
940 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
941 err = dsl_dataset_hold_obj(dp,
942 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
b128c09f
BB
943 if (err) {
944 dsl_dataset_rele(ds, FTAG);
945 return (err);
946 }
947
d683ddbb 948 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
b128c09f
BB
949 break;
950 dsl_dataset_rele(ds, FTAG);
951 ds = prev;
952 prev = NULL;
953 }
954
955 if (prev == NULL) {
956 prev = dp->dp_origin_snap;
957
958 /*
959 * The $ORIGIN can't have any data, or the accounting
960 * will be wrong.
961 */
cc9bb3e5 962 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
d683ddbb 963 ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
cc9bb3e5 964 rrw_exit(&ds->ds_bp_rwlock, FTAG);
b128c09f
BB
965
966 /* The origin doesn't get attached to itself */
967 if (ds->ds_object == prev->ds_object) {
968 dsl_dataset_rele(ds, FTAG);
969 return (0);
970 }
971
972 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb
JG
973 dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
974 dsl_dataset_phys(ds)->ds_prev_snap_txg =
975 dsl_dataset_phys(prev)->ds_creation_txg;
b128c09f
BB
976
977 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
d683ddbb 978 dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
b128c09f
BB
979
980 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 981 dsl_dataset_phys(prev)->ds_num_children++;
b128c09f 982
d683ddbb 983 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
b128c09f 984 ASSERT(ds->ds_prev == NULL);
13fe0198 985 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
986 dsl_dataset_phys(ds)->ds_prev_snap_obj,
987 ds, &ds->ds_prev));
b128c09f
BB
988 }
989 }
990
d683ddbb
JG
991 ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
992 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f 993
d683ddbb 994 if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
428870ff 995 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 996 dsl_dataset_phys(prev)->ds_next_clones_obj =
b128c09f
BB
997 zap_create(dp->dp_meta_objset,
998 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
999 }
13fe0198 1000 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb 1001 dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
b128c09f
BB
1002
1003 dsl_dataset_rele(ds, FTAG);
1004 if (prev != dp->dp_origin_snap)
1005 dsl_dataset_rele(prev, FTAG);
1006 return (0);
1007}
1008
1009void
1010dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1011{
1012 ASSERT(dmu_tx_is_syncing(tx));
1013 ASSERT(dp->dp_origin_snap != NULL);
1014
13fe0198 1015 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
9c43027b 1016 tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
428870ff
BB
1017}
1018
1019/* ARGSUSED */
1020static int
13fe0198 1021upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
1022{
1023 dmu_tx_t *tx = arg;
428870ff
BB
1024 objset_t *mos = dp->dp_meta_objset;
1025
d683ddbb 1026 if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
428870ff
BB
1027 dsl_dataset_t *origin;
1028
13fe0198 1029 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 1030 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
428870ff 1031
d683ddbb 1032 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
428870ff 1033 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
d683ddbb
JG
1034 dsl_dir_phys(origin->ds_dir)->dd_clones =
1035 zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
1036 0, tx);
428870ff
BB
1037 }
1038
13fe0198 1039 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb
JG
1040 dsl_dir_phys(origin->ds_dir)->dd_clones,
1041 ds->ds_object, tx));
428870ff
BB
1042
1043 dsl_dataset_rele(origin, FTAG);
1044 }
428870ff
BB
1045 return (0);
1046}
1047
1048void
1049dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1050{
428870ff
BB
1051 uint64_t obj;
1052
d6320ddb
BB
1053 ASSERT(dmu_tx_is_syncing(tx));
1054
428870ff 1055 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 1056 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
1057 FREE_DIR_NAME, &dp->dp_free_dir));
1058
1059 /*
1060 * We can't use bpobj_alloc(), because spa_version() still
1061 * returns the old version, and we need a new-version bpobj with
1062 * subobj support. So call dmu_object_alloc() directly.
1063 */
1064 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
f1512ee6 1065 SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 1066 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 1067 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 1068 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 1069
13fe0198 1070 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
9c43027b 1071 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
b128c09f
BB
1072}
1073
1074void
1075dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1076{
1077 uint64_t dsobj;
1078 dsl_dataset_t *ds;
1079
1080 ASSERT(dmu_tx_is_syncing(tx));
1081 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 1082 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
1083
1084 /* create the origin dir, ds, & snap-ds */
b128c09f 1085 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
b5256303 1086 NULL, 0, kcred, NULL, tx);
13fe0198
MA
1087 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1088 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
d683ddbb 1089 VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
b128c09f
BB
1090 dp, &dp->dp_origin_snap));
1091 dsl_dataset_rele(ds, FTAG);
b128c09f 1092}
9babb374
BB
1093
1094taskq_t *
3558fd73 1095dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 1096{
3558fd73 1097 return (dp->dp_iput_taskq);
9babb374 1098}
428870ff
BB
1099
1100/*
1101 * Walk through the pool-wide zap object of temporary snapshot user holds
1102 * and release them.
1103 */
1104void
1105dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1106{
1107 zap_attribute_t za;
1108 zap_cursor_t zc;
1109 objset_t *mos = dp->dp_meta_objset;
1110 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
95fd54a1 1111 nvlist_t *holds;
428870ff
BB
1112
1113 if (zapobj == 0)
1114 return;
1115 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1116
95fd54a1
SH
1117 holds = fnvlist_alloc();
1118
428870ff
BB
1119 for (zap_cursor_init(&zc, mos, zapobj);
1120 zap_cursor_retrieve(&zc, &za) == 0;
1121 zap_cursor_advance(&zc)) {
1122 char *htag;
95fd54a1 1123 nvlist_t *tags;
428870ff
BB
1124
1125 htag = strchr(za.za_name, '-');
1126 *htag = '\0';
1127 ++htag;
95fd54a1
SH
1128 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
1129 tags = fnvlist_alloc();
1130 fnvlist_add_boolean(tags, htag);
1131 fnvlist_add_nvlist(holds, za.za_name, tags);
1132 fnvlist_free(tags);
1133 } else {
1134 fnvlist_add_boolean(tags, htag);
1135 }
428870ff 1136 }
95fd54a1
SH
1137 dsl_dataset_user_release_tmp(dp, holds);
1138 fnvlist_free(holds);
428870ff
BB
1139 zap_cursor_fini(&zc);
1140}
1141
1142/*
1143 * Create the pool-wide zap object for storing temporary snapshot holds.
1144 */
1145void
1146dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1147{
1148 objset_t *mos = dp->dp_meta_objset;
1149
1150 ASSERT(dp->dp_tmp_userrefs_obj == 0);
1151 ASSERT(dmu_tx_is_syncing(tx));
1152
9ae529ec
CS
1153 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1154 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
1155}
1156
1157static int
1158dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 1159 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
1160{
1161 objset_t *mos = dp->dp_meta_objset;
1162 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1163 char *name;
1164 int error;
1165
1166 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1167 ASSERT(dmu_tx_is_syncing(tx));
1168
1169 /*
1170 * If the pool was created prior to SPA_VERSION_USERREFS, the
1171 * zap object for temporary holds might not exist yet.
1172 */
1173 if (zapobj == 0) {
1174 if (holding) {
1175 dsl_pool_user_hold_create_obj(dp, tx);
1176 zapobj = dp->dp_tmp_userrefs_obj;
1177 } else {
2e528b49 1178 return (SET_ERROR(ENOENT));
428870ff
BB
1179 }
1180 }
1181
1182 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1183 if (holding)
13fe0198 1184 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
1185 else
1186 error = zap_remove(mos, zapobj, name, tx);
1187 strfree(name);
1188
1189 return (error);
1190}
1191
1192/*
1193 * Add a temporary hold for the given dataset object and tag.
1194 */
1195int
1196dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 1197 uint64_t now, dmu_tx_t *tx)
428870ff
BB
1198{
1199 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1200}
1201
1202/*
1203 * Release a temporary hold for the given dataset object and tag.
1204 */
1205int
1206dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1207 dmu_tx_t *tx)
1208{
13fe0198 1209 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
1210 tx, B_FALSE));
1211}
c409e464 1212
13fe0198
MA
1213/*
1214 * DSL Pool Configuration Lock
1215 *
1216 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
1217 * creation / destruction / rename / property setting). It must be held for
1218 * read to hold a dataset or dsl_dir. I.e. you must call
1219 * dsl_pool_config_enter() or dsl_pool_hold() before calling
1220 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
1221 * must be held continuously until all datasets and dsl_dirs are released.
1222 *
1223 * The only exception to this rule is that if a "long hold" is placed on
1224 * a dataset, then the dp_config_rwlock may be dropped while the dataset
1225 * is still held. The long hold will prevent the dataset from being
1226 * destroyed -- the destroy will fail with EBUSY. A long hold can be
1227 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
1228 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
1229 *
1230 * Legitimate long-holders (including owners) should be long-running, cancelable
1231 * tasks that should cause "zfs destroy" to fail. This includes DMU
1232 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
1233 * "zfs send", and "zfs diff". There are several other long-holders whose
1234 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
1235 *
1236 * The usual formula for long-holding would be:
1237 * dsl_pool_hold()
1238 * dsl_dataset_hold()
1239 * ... perform checks ...
1240 * dsl_dataset_long_hold()
1241 * dsl_pool_rele()
1242 * ... perform long-running task ...
1243 * dsl_dataset_long_rele()
1244 * dsl_dataset_rele()
1245 *
1246 * Note that when the long hold is released, the dataset is still held but
1247 * the pool is not held. The dataset may change arbitrarily during this time
1248 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
1249 * dataset except release it.
1250 *
1251 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
1252 * or modifying operations.
1253 *
1254 * Modifying operations should generally use dsl_sync_task(). The synctask
1255 * infrastructure enforces proper locking strategy with respect to the
1256 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
1257 *
1258 * Read-only operations will manually hold the pool, then the dataset, obtain
1259 * information from the dataset, then release the pool and dataset.
1260 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1261 * hold/rele.
1262 */
1263
1264int
1265dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1266{
1267 spa_t *spa;
1268 int error;
1269
1270 error = spa_open(name, &spa, tag);
1271 if (error == 0) {
1272 *dp = spa_get_dsl(spa);
1273 dsl_pool_config_enter(*dp, tag);
1274 }
1275 return (error);
1276}
1277
1278void
1279dsl_pool_rele(dsl_pool_t *dp, void *tag)
1280{
1281 dsl_pool_config_exit(dp, tag);
1282 spa_close(dp->dp_spa, tag);
1283}
1284
1285void
1286dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1287{
1288 /*
1289 * We use a "reentrant" reader-writer lock, but not reentrantly.
1290 *
1291 * The rrwlock can (with the track_all flag) track all reading threads,
1292 * which is very useful for debugging which code path failed to release
1293 * the lock, and for verifying that the *current* thread does hold
1294 * the lock.
1295 *
1296 * (Unlike a rwlock, which knows that N threads hold it for
1297 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1298 * if any thread holds it for read, even if this thread doesn't).
1299 */
1300 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1301 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1302}
1303
5e8cd5d1
AJ
1304void
1305dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
1306{
1307 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1308 rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
1309}
1310
13fe0198
MA
1311void
1312dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1313{
1314 rrw_exit(&dp->dp_config_rwlock, tag);
1315}
1316
1317boolean_t
1318dsl_pool_config_held(dsl_pool_t *dp)
1319{
1320 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1321}
1322
9c43027b
AJ
1323boolean_t
1324dsl_pool_config_held_writer(dsl_pool_t *dp)
1325{
1326 return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1327}
1328
93ce2b4c 1329#if defined(_KERNEL)
40a806df
NB
1330EXPORT_SYMBOL(dsl_pool_config_enter);
1331EXPORT_SYMBOL(dsl_pool_config_exit);
1332
02730c33 1333/* BEGIN CSTYLED */
d1d7e268 1334/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1335module_param(zfs_dirty_data_max_percent, int, 0444);
1336MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
c409e464 1337
d1d7e268 1338/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1339module_param(zfs_dirty_data_max_max_percent, int, 0444);
1340MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
d1d7e268 1341 "zfs_dirty_data_max upper bound as % of RAM");
c409e464 1342
e8b96c60
MA
1343module_param(zfs_delay_min_dirty_percent, int, 0644);
1344MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
c409e464 1345
e8b96c60
MA
1346module_param(zfs_dirty_data_max, ulong, 0644);
1347MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
c409e464 1348
d1d7e268 1349/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
e8b96c60
MA
1350module_param(zfs_dirty_data_max_max, ulong, 0444);
1351MODULE_PARM_DESC(zfs_dirty_data_max_max,
d1d7e268 1352 "zfs_dirty_data_max upper bound in bytes");
c409e464 1353
dfbe2675 1354module_param(zfs_dirty_data_sync_percent, int, 0644);
00f198de
TC
1355MODULE_PARM_DESC(zfs_dirty_data_sync_percent,
1356 "dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
c409e464 1357
e8b96c60
MA
1358module_param(zfs_delay_scale, ulong, 0644);
1359MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
64fc7762
MA
1360
1361module_param(zfs_sync_taskq_batch_pct, int, 0644);
1362MODULE_PARM_DESC(zfs_sync_taskq_batch_pct,
1363 "max percent of CPUs that are used to sync dirty data");
a032ac4b
BB
1364
1365module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644);
1366MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct,
1367 "max percent of CPUs that are used per dp_sync_taskq");
1368
1369module_param(zfs_zil_clean_taskq_minalloc, int, 0644);
1370MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc,
1371 "number of taskq entries that are pre-populated");
1372
1373module_param(zfs_zil_clean_taskq_maxalloc, int, 0644);
1374MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc,
1375 "max number of taskq entries that are cached");
1376
02730c33 1377/* END CSTYLED */
c409e464 1378#endif