]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
OpenZFS 7614, 9064 - zfs device evacuation/removal
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
64fc7762 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
0c66c32d 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
539d33c7 26 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
34dc7c2f
BB
27 */
28
34dc7c2f
BB
29#include <sys/dsl_pool.h>
30#include <sys/dsl_dataset.h>
428870ff 31#include <sys/dsl_prop.h>
34dc7c2f
BB
32#include <sys/dsl_dir.h>
33#include <sys/dsl_synctask.h>
428870ff
BB
34#include <sys/dsl_scan.h>
35#include <sys/dnode.h>
34dc7c2f
BB
36#include <sys/dmu_tx.h>
37#include <sys/dmu_objset.h>
38#include <sys/arc.h>
39#include <sys/zap.h>
40#include <sys/zio.h>
41#include <sys/zfs_context.h>
42#include <sys/fs/zfs.h>
b128c09f
BB
43#include <sys/zfs_znode.h>
44#include <sys/spa_impl.h>
428870ff 45#include <sys/dsl_deadlist.h>
9ae529ec
CS
46#include <sys/bptree.h>
47#include <sys/zfeature.h>
29809a6c 48#include <sys/zil_impl.h>
13fe0198 49#include <sys/dsl_userhold.h>
49ee64e5 50#include <sys/trace_txg.h>
379ca9cf 51#include <sys/mmp.h>
34dc7c2f 52
e8b96c60
MA
53/*
54 * ZFS Write Throttle
55 * ------------------
56 *
57 * ZFS must limit the rate of incoming writes to the rate at which it is able
58 * to sync data modifications to the backend storage. Throttling by too much
59 * creates an artificial limit; throttling by too little can only be sustained
60 * for short periods and would lead to highly lumpy performance. On a per-pool
61 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
62 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
63 * of dirty data decreases. When the amount of dirty data exceeds a
64 * predetermined threshold further modifications are blocked until the amount
65 * of dirty data decreases (as data is synced out).
66 *
67 * The limit on dirty data is tunable, and should be adjusted according to
68 * both the IO capacity and available memory of the system. The larger the
69 * window, the more ZFS is able to aggregate and amortize metadata (and data)
70 * changes. However, memory is a limited resource, and allowing for more dirty
71 * data comes at the cost of keeping other useful data in memory (for example
72 * ZFS data cached by the ARC).
73 *
74 * Implementation
75 *
76 * As buffers are modified dsl_pool_willuse_space() increments both the per-
77 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
78 * dirty space used; dsl_pool_dirty_space() decrements those values as data
79 * is synced out from dsl_pool_sync(). While only the poolwide value is
80 * relevant, the per-txg value is useful for debugging. The tunable
81 * zfs_dirty_data_max determines the dirty space limit. Once that value is
82 * exceeded, new writes are halted until space frees up.
83 *
84 * The zfs_dirty_data_sync tunable dictates the threshold at which we
85 * ensure that there is a txg syncing (see the comment in txg.c for a full
86 * description of transaction group stages).
87 *
88 * The IO scheduler uses both the dirty space limit and current amount of
89 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
90 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
91 *
92 * The delay is also calculated based on the amount of dirty data. See the
93 * comment above dmu_tx_delay() for details.
94 */
95
96/*
97 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
98 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
99 * parameter.
100 */
101unsigned long zfs_dirty_data_max = 0;
102unsigned long zfs_dirty_data_max_max = 0;
103int zfs_dirty_data_max_percent = 10;
104int zfs_dirty_data_max_max_percent = 25;
b128c09f 105
e8b96c60
MA
106/*
107 * If there is at least this much dirty data, push out a txg.
108 */
109unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024;
34dc7c2f 110
e8b96c60
MA
111/*
112 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
113 * and delay each transaction.
114 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
115 */
116int zfs_delay_min_dirty_percent = 60;
b128c09f 117
e8b96c60
MA
118/*
119 * This controls how quickly the delay approaches infinity.
120 * Larger values cause it to delay more for a given amount of dirty data.
121 * Therefore larger values will cause there to be less dirty data for a
122 * given throughput.
123 *
124 * For the smoothest delay, this value should be about 1 billion divided
125 * by the maximum number of operations per second. This will smoothly
126 * handle between 10x and 1/10th this number.
127 *
128 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
129 * multiply in dmu_tx_delay().
130 */
131unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
b128c09f 132
64fc7762
MA
133/*
134 * This determines the number of threads used by the dp_sync_taskq.
135 */
136int zfs_sync_taskq_batch_pct = 75;
137
a032ac4b
BB
138/*
139 * These tunables determine the behavior of how zil_itxg_clean() is
140 * called via zil_clean() in the context of spa_sync(). When an itxg
141 * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
142 * If the dispatch fails, the call to zil_itxg_clean() will occur
143 * synchronously in the context of spa_sync(), which can negatively
144 * impact the performance of spa_sync() (e.g. in the case of the itxg
145 * list having a large number of itxs that needs to be cleaned).
146 *
147 * Thus, these tunables can be used to manipulate the behavior of the
148 * taskq used by zil_clean(); they determine the number of taskq entries
149 * that are pre-populated when the taskq is first created (via the
150 * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
151 * taskq entries that are cached after an on-demand allocation (via the
152 * "zfs_zil_clean_taskq_maxalloc").
153 *
154 * The idea being, we want to try reasonably hard to ensure there will
155 * already be a taskq entry pre-allocated by the time that it is needed
156 * by zil_clean(). This way, we can avoid the possibility of an
157 * on-demand allocation of a new taskq entry from failing, which would
158 * result in zil_itxg_clean() being called synchronously from zil_clean()
159 * (which can adversely affect performance of spa_sync()).
160 *
161 * Additionally, the number of threads used by the taskq can be
162 * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
163 */
164int zfs_zil_clean_taskq_nthr_pct = 100;
165int zfs_zil_clean_taskq_minalloc = 1024;
166int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
167
428870ff 168int
b128c09f 169dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
170{
171 uint64_t obj;
172 int err;
173
174 err = zap_lookup(dp->dp_meta_objset,
d683ddbb 175 dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
b128c09f 176 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
177 if (err)
178 return (err);
179
13fe0198 180 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
181}
182
183static dsl_pool_t *
184dsl_pool_open_impl(spa_t *spa, uint64_t txg)
185{
186 dsl_pool_t *dp;
187 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
188
189 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
190 dp->dp_spa = spa;
191 dp->dp_meta_rootbp = *bp;
13fe0198 192 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f 193 txg_init(dp, txg);
379ca9cf 194 mmp_init(spa);
34dc7c2f 195
4747a7d3 196 txg_list_create(&dp->dp_dirty_datasets, spa,
34dc7c2f 197 offsetof(dsl_dataset_t, ds_dirty_link));
4747a7d3 198 txg_list_create(&dp->dp_dirty_zilogs, spa,
29809a6c 199 offsetof(zilog_t, zl_dirty_link));
4747a7d3 200 txg_list_create(&dp->dp_dirty_dirs, spa,
34dc7c2f 201 offsetof(dsl_dir_t, dd_dirty_link));
4747a7d3 202 txg_list_create(&dp->dp_sync_tasks, spa,
13fe0198 203 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f 204
64fc7762
MA
205 dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
206 zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
207 TASKQ_THREADS_CPU_PCT);
208
a032ac4b
BB
209 dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
210 zfs_zil_clean_taskq_nthr_pct, minclsyspri,
211 zfs_zil_clean_taskq_minalloc,
212 zfs_zil_clean_taskq_maxalloc,
213 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
214
34dc7c2f 215 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
e8b96c60 216 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 217
1229323d 218 dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
aa9af22c 219 max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
9babb374 220
34dc7c2f
BB
221 return (dp);
222}
223
224int
9ae529ec 225dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
226{
227 int err;
228 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec 229
b7faa7aa
G
230 /*
231 * Initialize the caller's dsl_pool_t structure before we actually open
232 * the meta objset. This is done because a self-healing write zio may
233 * be issued as part of dmu_objset_open_impl() and the spa needs its
234 * dsl_pool_t initialized in order to handle the write.
235 */
236 *dpp = dp;
237
9ae529ec
CS
238 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
239 &dp->dp_meta_objset);
b7faa7aa 240 if (err != 0) {
9ae529ec 241 dsl_pool_close(dp);
b7faa7aa
G
242 *dpp = NULL;
243 }
9ae529ec
CS
244
245 return (err);
246}
247
248int
249dsl_pool_open(dsl_pool_t *dp)
250{
251 int err;
b128c09f
BB
252 dsl_dir_t *dd;
253 dsl_dataset_t *ds;
428870ff 254 uint64_t obj;
34dc7c2f 255
13fe0198 256 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
257 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
258 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
259 &dp->dp_root_dir_obj);
260 if (err)
261 goto out;
262
13fe0198 263 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
264 NULL, dp, &dp->dp_root_dir);
265 if (err)
266 goto out;
267
b128c09f 268 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
269 if (err)
270 goto out;
271
9ae529ec 272 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
273 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
274 if (err)
275 goto out;
d683ddbb
JG
276 err = dsl_dataset_hold_obj(dp,
277 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
9babb374
BB
278 if (err == 0) {
279 err = dsl_dataset_hold_obj(dp,
d683ddbb 280 dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
9babb374
BB
281 &dp->dp_origin_snap);
282 dsl_dataset_rele(ds, FTAG);
283 }
13fe0198 284 dsl_dir_rele(dd, dp);
b128c09f
BB
285 if (err)
286 goto out;
b128c09f
BB
287 }
288
9ae529ec 289 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
290 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
291 &dp->dp_free_dir);
b128c09f
BB
292 if (err)
293 goto out;
428870ff 294
b128c09f 295 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 296 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
297 if (err)
298 goto out;
13fe0198 299 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 300 dp->dp_meta_objset, obj));
b128c09f
BB
301 }
302
a1d477c2
MA
303 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
304 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
305 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
306 if (err == 0) {
307 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
308 dp->dp_meta_objset, obj));
309 } else if (err == ENOENT) {
310 /*
311 * We might not have created the remap bpobj yet.
312 */
313 err = 0;
314 } else {
315 goto out;
316 }
317 }
318
fbeddd60 319 /*
a1d477c2
MA
320 * Note: errors ignored, because the these special dirs, used for
321 * space accounting, are only created on demand.
fbeddd60
MA
322 */
323 (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
324 &dp->dp_leak_dir);
325
fa86b5db 326 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
9ae529ec
CS
327 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
328 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
329 &dp->dp_bptree_obj);
330 if (err != 0)
331 goto out;
332 }
333
fa86b5db 334 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
753c3839
MA
335 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
336 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
337 &dp->dp_empty_bpobj);
338 if (err != 0)
339 goto out;
340 }
341
428870ff
BB
342 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
343 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
344 &dp->dp_tmp_userrefs_obj);
345 if (err == ENOENT)
346 err = 0;
347 if (err)
348 goto out;
349
9ae529ec 350 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 351
34dc7c2f 352out:
13fe0198 353 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
354 return (err);
355}
356
357void
358dsl_pool_close(dsl_pool_t *dp)
359{
b128c09f 360 /*
e8b96c60
MA
361 * Drop our references from dsl_pool_open().
362 *
b128c09f
BB
363 * Since we held the origin_snap from "syncing" context (which
364 * includes pool-opening context), it actually only got a "ref"
365 * and not a hold, so just drop that here.
366 */
a1d477c2 367 if (dp->dp_origin_snap != NULL)
13fe0198 368 dsl_dataset_rele(dp->dp_origin_snap, dp);
a1d477c2 369 if (dp->dp_mos_dir != NULL)
13fe0198 370 dsl_dir_rele(dp->dp_mos_dir, dp);
a1d477c2 371 if (dp->dp_free_dir != NULL)
13fe0198 372 dsl_dir_rele(dp->dp_free_dir, dp);
a1d477c2 373 if (dp->dp_leak_dir != NULL)
fbeddd60 374 dsl_dir_rele(dp->dp_leak_dir, dp);
a1d477c2 375 if (dp->dp_root_dir != NULL)
13fe0198 376 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 377
428870ff 378 bpobj_close(&dp->dp_free_bpobj);
a1d477c2 379 bpobj_close(&dp->dp_obsolete_bpobj);
428870ff 380
34dc7c2f 381 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
a1d477c2 382 if (dp->dp_meta_objset != NULL)
428870ff 383 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
384
385 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 386 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 387 txg_list_destroy(&dp->dp_sync_tasks);
34dc7c2f 388 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f 389
a032ac4b 390 taskq_destroy(dp->dp_zil_clean_taskq);
64fc7762
MA
391 taskq_destroy(dp->dp_sync_taskq);
392
ca0bf58d
PS
393 /*
394 * We can't set retry to TRUE since we're explicitly specifying
395 * a spa to flush. This is good enough; any missed buffers for
396 * this spa won't cause trouble, and they'll eventually fall
397 * out of the ARC just like any other unused buffer.
398 */
399 arc_flush(dp->dp_spa, FALSE);
400
379ca9cf 401 mmp_fini(dp->dp_spa);
34dc7c2f 402 txg_fini(dp);
428870ff 403 dsl_scan_fini(dp);
0c66c32d
JG
404 dmu_buf_user_evict_wait();
405
13fe0198 406 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 407 mutex_destroy(&dp->dp_lock);
c17486b2 408 cv_destroy(&dp->dp_spaceavail_cv);
3558fd73 409 taskq_destroy(dp->dp_iput_taskq);
a1d477c2 410 if (dp->dp_blkstats != NULL) {
d4a72f23 411 mutex_destroy(&dp->dp_blkstats->zab_lock);
79c76d5b 412 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
d4a72f23 413 }
34dc7c2f
BB
414 kmem_free(dp, sizeof (dsl_pool_t));
415}
416
a1d477c2
MA
417void
418dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
419{
420 uint64_t obj;
421 /*
422 * Currently, we only create the obsolete_bpobj where there are
423 * indirect vdevs with referenced mappings.
424 */
425 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
426 /* create and open the obsolete_bpobj */
427 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
428 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
429 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
430 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
431 spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
432}
433
434void
435dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
436{
437 spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
438 VERIFY0(zap_remove(dp->dp_meta_objset,
439 DMU_POOL_DIRECTORY_OBJECT,
440 DMU_POOL_OBSOLETE_BPOBJ, tx));
441 bpobj_free(dp->dp_meta_objset,
442 dp->dp_obsolete_bpobj.bpo_object, tx);
443 bpobj_close(&dp->dp_obsolete_bpobj);
444}
445
34dc7c2f 446dsl_pool_t *
b5256303
TC
447dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
448 uint64_t txg)
34dc7c2f
BB
449{
450 int err;
451 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
452 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
b128c09f 453 dsl_dataset_t *ds;
428870ff 454 uint64_t obj;
b128c09f 455
13fe0198
MA
456 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
457
b128c09f 458 /* create and open the MOS (meta-objset) */
428870ff
BB
459 dp->dp_meta_objset = dmu_objset_create_impl(spa,
460 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
b5256303 461 spa->spa_meta_objset = dp->dp_meta_objset;
34dc7c2f
BB
462
463 /* create the pool directory */
464 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
465 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 466 ASSERT0(err);
34dc7c2f 467
428870ff 468 /* Initialize scan structures */
13fe0198 469 VERIFY0(dsl_scan_init(dp, txg));
428870ff 470
34dc7c2f 471 /* create and open the root dir */
b128c09f 472 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 473 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
474 NULL, dp, &dp->dp_root_dir));
475
476 /* create and open the meta-objset dir */
b128c09f 477 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 478 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
479 MOS_DIR_NAME, &dp->dp_mos_dir));
480
428870ff
BB
481 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
482 /* create and open the free dir */
483 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
484 FREE_DIR_NAME, tx);
13fe0198 485 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
486 FREE_DIR_NAME, &dp->dp_free_dir));
487
488 /* create and open the free_bplist */
f1512ee6 489 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
428870ff
BB
490 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
491 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 492 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
493 dp->dp_meta_objset, obj));
494 }
495
b128c09f
BB
496 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
497 dsl_pool_create_origin(dp, tx);
498
b5256303
TC
499 /*
500 * Some features may be needed when creating the root dataset, so we
501 * create the feature objects here.
502 */
503 if (spa_version(spa) >= SPA_VERSION_FEATURES)
504 spa_feature_create_zap_objects(spa, tx);
505
506 if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
507 dcp->cp_crypt != ZIO_CRYPT_INHERIT)
508 spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
509
b128c09f 510 /* create the root dataset */
b5256303 511 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
b128c09f
BB
512
513 /* create the root objset */
13fe0198 514 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
b128c09f 515#ifdef _KERNEL
d8fdfc2d
BB
516 {
517 objset_t *os;
518 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
519 os = dmu_objset_create_impl(dp->dp_spa, ds,
520 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
521 rrw_exit(&ds->ds_bp_rwlock, FTAG);
522 zfs_create_fs(os, kcred, zplprops, tx);
523 }
b128c09f
BB
524#endif
525 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
526
527 dmu_tx_commit(tx);
528
13fe0198
MA
529 rrw_exit(&dp->dp_config_rwlock, FTAG);
530
34dc7c2f
BB
531 return (dp);
532}
533
29809a6c
MA
534/*
535 * Account for the meta-objset space in its placeholder dsl_dir.
536 */
537void
538dsl_pool_mos_diduse_space(dsl_pool_t *dp,
539 int64_t used, int64_t comp, int64_t uncomp)
540{
541 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
542 mutex_enter(&dp->dp_lock);
543 dp->dp_mos_used_delta += used;
544 dp->dp_mos_compressed_delta += comp;
545 dp->dp_mos_uncompressed_delta += uncomp;
546 mutex_exit(&dp->dp_lock);
547}
548
e8b96c60
MA
549static void
550dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
551{
552 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
553 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
554 VERIFY0(zio_wait(zio));
555 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
556 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
557}
558
559static void
560dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
561{
562 ASSERT(MUTEX_HELD(&dp->dp_lock));
563
564 if (delta < 0)
565 ASSERT3U(-delta, <=, dp->dp_dirty_total);
566
567 dp->dp_dirty_total += delta;
568
569 /*
570 * Note: we signal even when increasing dp_dirty_total.
571 * This ensures forward progress -- each thread wakes the next waiter.
572 */
c0c8cc7b 573 if (dp->dp_dirty_total < zfs_dirty_data_max)
e8b96c60
MA
574 cv_signal(&dp->dp_spaceavail_cv);
575}
576
34dc7c2f
BB
577void
578dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
579{
580 zio_t *zio;
581 dmu_tx_t *tx;
582 dsl_dir_t *dd;
583 dsl_dataset_t *ds;
428870ff 584 objset_t *mos = dp->dp_meta_objset;
29809a6c
MA
585 list_t synced_datasets;
586
587 list_create(&synced_datasets, sizeof (dsl_dataset_t),
588 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f
BB
589
590 tx = dmu_tx_create_assigned(dp, txg);
591
e8b96c60
MA
592 /*
593 * Write out all dirty blocks of dirty datasets.
594 */
34dc7c2f 595 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 596 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
597 /*
598 * We must not sync any non-MOS datasets twice, because
599 * we may have taken a snapshot of them. However, we
600 * may sync newly-created datasets on pass 2.
601 */
602 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 603 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
604 dsl_dataset_sync(ds, zio, tx);
605 }
e8b96c60 606 VERIFY0(zio_wait(zio));
9babb374 607
e8b96c60
MA
608 /*
609 * We have written all of the accounted dirty data, so our
610 * dp_space_towrite should now be zero. However, some seldom-used
611 * code paths do not adhere to this (e.g. dbuf_undirty(), also
612 * rounding error in dbuf_write_physdone).
613 * Shore up the accounting of any dirtied space now.
614 */
615 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
34dc7c2f 616
539d33c7
GM
617 /*
618 * Update the long range free counter after
619 * we're done syncing user data
620 */
621 mutex_enter(&dp->dp_lock);
622 ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
623 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
624 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
625 mutex_exit(&dp->dp_lock);
626
29809a6c
MA
627 /*
628 * After the data blocks have been written (ensured by the zio_wait()
9c5167d1 629 * above), update the user/group/project space accounting. This happens
64fc7762
MA
630 * in tasks dispatched to dp_sync_taskq, so wait for them before
631 * continuing.
29809a6c 632 */
e8b96c60
MA
633 for (ds = list_head(&synced_datasets); ds != NULL;
634 ds = list_next(&synced_datasets, ds)) {
428870ff 635 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
e8b96c60 636 }
64fc7762 637 taskq_wait(dp->dp_sync_taskq);
9babb374
BB
638
639 /*
640 * Sync the datasets again to push out the changes due to
428870ff 641 * userspace updates. This must be done before we process the
29809a6c
MA
642 * sync tasks, so that any snapshots will have the correct
643 * user accounting information (and we won't get confused
644 * about which blocks are part of the snapshot).
9babb374
BB
645 */
646 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 647 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
648 ASSERT(list_link_active(&ds->ds_synced_link));
649 dmu_buf_rele(ds->ds_dbuf, ds);
650 dsl_dataset_sync(ds, zio, tx);
651 }
e8b96c60 652 VERIFY0(zio_wait(zio));
9babb374 653
428870ff 654 /*
29809a6c
MA
655 * Now that the datasets have been completely synced, we can
656 * clean up our in-memory structures accumulated while syncing:
657 *
658 * - move dead blocks from the pending deadlist to the on-disk deadlist
29809a6c 659 * - release hold from dsl_dataset_dirty()
428870ff 660 */
e8b96c60 661 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
0efd9791 662 dsl_dataset_sync_done(ds, tx);
428870ff
BB
663 }
664
e8b96c60 665 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
34dc7c2f 666 dsl_dir_sync(dd, tx);
e8b96c60 667 }
b128c09f 668
29809a6c
MA
669 /*
670 * The MOS's space is accounted for in the pool/$MOS
671 * (dp_mos_dir). We can't modify the mos while we're syncing
672 * it, so we remember the deltas and apply them here.
673 */
674 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
675 dp->dp_mos_uncompressed_delta != 0) {
676 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
677 dp->dp_mos_used_delta,
678 dp->dp_mos_compressed_delta,
679 dp->dp_mos_uncompressed_delta, tx);
680 dp->dp_mos_used_delta = 0;
681 dp->dp_mos_compressed_delta = 0;
682 dp->dp_mos_uncompressed_delta = 0;
683 }
684
64fc7762 685 if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) {
e8b96c60 686 dsl_pool_sync_mos(dp, tx);
34dc7c2f
BB
687 }
688
29809a6c
MA
689 /*
690 * If we modify a dataset in the same txg that we want to destroy it,
691 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
692 * dsl_dir_destroy_check() will fail if there are unexpected holds.
693 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
694 * and clearing the hold on it) before we process the sync_tasks.
695 * The MOS data dirtied by the sync_tasks will be synced on the next
696 * pass.
697 */
29809a6c 698 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 699 dsl_sync_task_t *dst;
29809a6c
MA
700 /*
701 * No more sync tasks should have been added while we
702 * were syncing.
703 */
e8b96c60
MA
704 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
705 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
13fe0198 706 dsl_sync_task_sync(dst, tx);
29809a6c
MA
707 }
708
34dc7c2f 709 dmu_tx_commit(tx);
b128c09f 710
e8b96c60 711 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
34dc7c2f
BB
712}
713
714void
428870ff 715dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 716{
29809a6c 717 zilog_t *zilog;
34dc7c2f 718
55922e73 719 while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
e8b96c60 720 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
55922e73
GW
721 /*
722 * We don't remove the zilog from the dp_dirty_zilogs
723 * list until after we've cleaned it. This ensures that
724 * callers of zilog_is_dirty() receive an accurate
725 * answer when they are racing with the spa sync thread.
726 */
29809a6c 727 zil_clean(zilog, txg);
55922e73 728 (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
29809a6c
MA
729 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
730 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 731 }
428870ff 732 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
733}
734
735/*
736 * TRUE if the current thread is the tx_sync_thread or if we
737 * are being called from SPA context during pool initialization.
738 */
739int
740dsl_pool_sync_context(dsl_pool_t *dp)
741{
742 return (curthread == dp->dp_tx.tx_sync_thread ||
64fc7762
MA
743 spa_is_initializing(dp->dp_spa) ||
744 taskq_member(dp->dp_sync_taskq, curthread));
34dc7c2f
BB
745}
746
747uint64_t
748dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
749{
750 uint64_t space, resv;
751
752 /*
34dc7c2f
BB
753 * If we're trying to assess whether it's OK to do a free,
754 * cut the reservation in half to allow forward progress
755 * (e.g. make it possible to rm(1) files from a full pool).
756 */
757 space = spa_get_dspace(dp->dp_spa);
0c60cc32 758 resv = spa_get_slop_space(dp->dp_spa);
34dc7c2f
BB
759 if (netfree)
760 resv >>= 1;
761
762 return (space - resv);
763}
764
e8b96c60
MA
765boolean_t
766dsl_pool_need_dirty_delay(dsl_pool_t *dp)
34dc7c2f 767{
e8b96c60
MA
768 uint64_t delay_min_bytes =
769 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
770 boolean_t rv;
34dc7c2f 771
e8b96c60
MA
772 mutex_enter(&dp->dp_lock);
773 if (dp->dp_dirty_total > zfs_dirty_data_sync)
774 txg_kick(dp);
775 rv = (dp->dp_dirty_total > delay_min_bytes);
776 mutex_exit(&dp->dp_lock);
777 return (rv);
34dc7c2f
BB
778}
779
780void
e8b96c60 781dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
34dc7c2f 782{
e8b96c60
MA
783 if (space > 0) {
784 mutex_enter(&dp->dp_lock);
785 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
786 dsl_pool_dirty_delta(dp, space);
787 mutex_exit(&dp->dp_lock);
788 }
34dc7c2f
BB
789}
790
791void
e8b96c60 792dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
34dc7c2f 793{
e8b96c60
MA
794 ASSERT3S(space, >=, 0);
795 if (space == 0)
34dc7c2f
BB
796 return;
797
e8b96c60
MA
798 mutex_enter(&dp->dp_lock);
799 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
800 /* XXX writing something we didn't dirty? */
801 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
34dc7c2f 802 }
e8b96c60
MA
803 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
804 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
805 ASSERT3U(dp->dp_dirty_total, >=, space);
806 dsl_pool_dirty_delta(dp, -space);
807 mutex_exit(&dp->dp_lock);
34dc7c2f 808}
b128c09f
BB
809
810/* ARGSUSED */
811static int
13fe0198 812upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
813{
814 dmu_tx_t *tx = arg;
815 dsl_dataset_t *ds, *prev = NULL;
816 int err;
b128c09f 817
13fe0198 818 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
819 if (err)
820 return (err);
821
d683ddbb
JG
822 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
823 err = dsl_dataset_hold_obj(dp,
824 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
b128c09f
BB
825 if (err) {
826 dsl_dataset_rele(ds, FTAG);
827 return (err);
828 }
829
d683ddbb 830 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
b128c09f
BB
831 break;
832 dsl_dataset_rele(ds, FTAG);
833 ds = prev;
834 prev = NULL;
835 }
836
837 if (prev == NULL) {
838 prev = dp->dp_origin_snap;
839
840 /*
841 * The $ORIGIN can't have any data, or the accounting
842 * will be wrong.
843 */
cc9bb3e5 844 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
d683ddbb 845 ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
cc9bb3e5 846 rrw_exit(&ds->ds_bp_rwlock, FTAG);
b128c09f
BB
847
848 /* The origin doesn't get attached to itself */
849 if (ds->ds_object == prev->ds_object) {
850 dsl_dataset_rele(ds, FTAG);
851 return (0);
852 }
853
854 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb
JG
855 dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
856 dsl_dataset_phys(ds)->ds_prev_snap_txg =
857 dsl_dataset_phys(prev)->ds_creation_txg;
b128c09f
BB
858
859 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
d683ddbb 860 dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
b128c09f
BB
861
862 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 863 dsl_dataset_phys(prev)->ds_num_children++;
b128c09f 864
d683ddbb 865 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
b128c09f 866 ASSERT(ds->ds_prev == NULL);
13fe0198 867 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
868 dsl_dataset_phys(ds)->ds_prev_snap_obj,
869 ds, &ds->ds_prev));
b128c09f
BB
870 }
871 }
872
d683ddbb
JG
873 ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
874 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f 875
d683ddbb 876 if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
428870ff 877 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 878 dsl_dataset_phys(prev)->ds_next_clones_obj =
b128c09f
BB
879 zap_create(dp->dp_meta_objset,
880 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
881 }
13fe0198 882 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb 883 dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
b128c09f
BB
884
885 dsl_dataset_rele(ds, FTAG);
886 if (prev != dp->dp_origin_snap)
887 dsl_dataset_rele(prev, FTAG);
888 return (0);
889}
890
891void
892dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
893{
894 ASSERT(dmu_tx_is_syncing(tx));
895 ASSERT(dp->dp_origin_snap != NULL);
896
13fe0198 897 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
9c43027b 898 tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
428870ff
BB
899}
900
901/* ARGSUSED */
902static int
13fe0198 903upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
904{
905 dmu_tx_t *tx = arg;
428870ff
BB
906 objset_t *mos = dp->dp_meta_objset;
907
d683ddbb 908 if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
428870ff
BB
909 dsl_dataset_t *origin;
910
13fe0198 911 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 912 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
428870ff 913
d683ddbb 914 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
428870ff 915 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
d683ddbb
JG
916 dsl_dir_phys(origin->ds_dir)->dd_clones =
917 zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
918 0, tx);
428870ff
BB
919 }
920
13fe0198 921 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb
JG
922 dsl_dir_phys(origin->ds_dir)->dd_clones,
923 ds->ds_object, tx));
428870ff
BB
924
925 dsl_dataset_rele(origin, FTAG);
926 }
428870ff
BB
927 return (0);
928}
929
930void
931dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
932{
428870ff
BB
933 uint64_t obj;
934
d6320ddb
BB
935 ASSERT(dmu_tx_is_syncing(tx));
936
428870ff 937 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 938 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
939 FREE_DIR_NAME, &dp->dp_free_dir));
940
941 /*
942 * We can't use bpobj_alloc(), because spa_version() still
943 * returns the old version, and we need a new-version bpobj with
944 * subobj support. So call dmu_object_alloc() directly.
945 */
946 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
f1512ee6 947 SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 948 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 949 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 950 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 951
13fe0198 952 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
9c43027b 953 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
b128c09f
BB
954}
955
956void
957dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
958{
959 uint64_t dsobj;
960 dsl_dataset_t *ds;
961
962 ASSERT(dmu_tx_is_syncing(tx));
963 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 964 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
965
966 /* create the origin dir, ds, & snap-ds */
b128c09f 967 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
b5256303 968 NULL, 0, kcred, NULL, tx);
13fe0198
MA
969 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
970 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
d683ddbb 971 VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
b128c09f
BB
972 dp, &dp->dp_origin_snap));
973 dsl_dataset_rele(ds, FTAG);
b128c09f 974}
9babb374
BB
975
976taskq_t *
3558fd73 977dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 978{
3558fd73 979 return (dp->dp_iput_taskq);
9babb374 980}
428870ff
BB
981
982/*
983 * Walk through the pool-wide zap object of temporary snapshot user holds
984 * and release them.
985 */
986void
987dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
988{
989 zap_attribute_t za;
990 zap_cursor_t zc;
991 objset_t *mos = dp->dp_meta_objset;
992 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
95fd54a1 993 nvlist_t *holds;
428870ff
BB
994
995 if (zapobj == 0)
996 return;
997 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
998
95fd54a1
SH
999 holds = fnvlist_alloc();
1000
428870ff
BB
1001 for (zap_cursor_init(&zc, mos, zapobj);
1002 zap_cursor_retrieve(&zc, &za) == 0;
1003 zap_cursor_advance(&zc)) {
1004 char *htag;
95fd54a1 1005 nvlist_t *tags;
428870ff
BB
1006
1007 htag = strchr(za.za_name, '-');
1008 *htag = '\0';
1009 ++htag;
95fd54a1
SH
1010 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
1011 tags = fnvlist_alloc();
1012 fnvlist_add_boolean(tags, htag);
1013 fnvlist_add_nvlist(holds, za.za_name, tags);
1014 fnvlist_free(tags);
1015 } else {
1016 fnvlist_add_boolean(tags, htag);
1017 }
428870ff 1018 }
95fd54a1
SH
1019 dsl_dataset_user_release_tmp(dp, holds);
1020 fnvlist_free(holds);
428870ff
BB
1021 zap_cursor_fini(&zc);
1022}
1023
1024/*
1025 * Create the pool-wide zap object for storing temporary snapshot holds.
1026 */
1027void
1028dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1029{
1030 objset_t *mos = dp->dp_meta_objset;
1031
1032 ASSERT(dp->dp_tmp_userrefs_obj == 0);
1033 ASSERT(dmu_tx_is_syncing(tx));
1034
9ae529ec
CS
1035 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1036 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
1037}
1038
1039static int
1040dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 1041 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
1042{
1043 objset_t *mos = dp->dp_meta_objset;
1044 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1045 char *name;
1046 int error;
1047
1048 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1049 ASSERT(dmu_tx_is_syncing(tx));
1050
1051 /*
1052 * If the pool was created prior to SPA_VERSION_USERREFS, the
1053 * zap object for temporary holds might not exist yet.
1054 */
1055 if (zapobj == 0) {
1056 if (holding) {
1057 dsl_pool_user_hold_create_obj(dp, tx);
1058 zapobj = dp->dp_tmp_userrefs_obj;
1059 } else {
2e528b49 1060 return (SET_ERROR(ENOENT));
428870ff
BB
1061 }
1062 }
1063
1064 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1065 if (holding)
13fe0198 1066 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
1067 else
1068 error = zap_remove(mos, zapobj, name, tx);
1069 strfree(name);
1070
1071 return (error);
1072}
1073
1074/*
1075 * Add a temporary hold for the given dataset object and tag.
1076 */
1077int
1078dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 1079 uint64_t now, dmu_tx_t *tx)
428870ff
BB
1080{
1081 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1082}
1083
1084/*
1085 * Release a temporary hold for the given dataset object and tag.
1086 */
1087int
1088dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1089 dmu_tx_t *tx)
1090{
13fe0198 1091 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
1092 tx, B_FALSE));
1093}
c409e464 1094
13fe0198
MA
1095/*
1096 * DSL Pool Configuration Lock
1097 *
1098 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
1099 * creation / destruction / rename / property setting). It must be held for
1100 * read to hold a dataset or dsl_dir. I.e. you must call
1101 * dsl_pool_config_enter() or dsl_pool_hold() before calling
1102 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
1103 * must be held continuously until all datasets and dsl_dirs are released.
1104 *
1105 * The only exception to this rule is that if a "long hold" is placed on
1106 * a dataset, then the dp_config_rwlock may be dropped while the dataset
1107 * is still held. The long hold will prevent the dataset from being
1108 * destroyed -- the destroy will fail with EBUSY. A long hold can be
1109 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
1110 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
1111 *
1112 * Legitimate long-holders (including owners) should be long-running, cancelable
1113 * tasks that should cause "zfs destroy" to fail. This includes DMU
1114 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
1115 * "zfs send", and "zfs diff". There are several other long-holders whose
1116 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
1117 *
1118 * The usual formula for long-holding would be:
1119 * dsl_pool_hold()
1120 * dsl_dataset_hold()
1121 * ... perform checks ...
1122 * dsl_dataset_long_hold()
1123 * dsl_pool_rele()
1124 * ... perform long-running task ...
1125 * dsl_dataset_long_rele()
1126 * dsl_dataset_rele()
1127 *
1128 * Note that when the long hold is released, the dataset is still held but
1129 * the pool is not held. The dataset may change arbitrarily during this time
1130 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
1131 * dataset except release it.
1132 *
1133 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
1134 * or modifying operations.
1135 *
1136 * Modifying operations should generally use dsl_sync_task(). The synctask
1137 * infrastructure enforces proper locking strategy with respect to the
1138 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
1139 *
1140 * Read-only operations will manually hold the pool, then the dataset, obtain
1141 * information from the dataset, then release the pool and dataset.
1142 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1143 * hold/rele.
1144 */
1145
1146int
1147dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1148{
1149 spa_t *spa;
1150 int error;
1151
1152 error = spa_open(name, &spa, tag);
1153 if (error == 0) {
1154 *dp = spa_get_dsl(spa);
1155 dsl_pool_config_enter(*dp, tag);
1156 }
1157 return (error);
1158}
1159
1160void
1161dsl_pool_rele(dsl_pool_t *dp, void *tag)
1162{
1163 dsl_pool_config_exit(dp, tag);
1164 spa_close(dp->dp_spa, tag);
1165}
1166
1167void
1168dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1169{
1170 /*
1171 * We use a "reentrant" reader-writer lock, but not reentrantly.
1172 *
1173 * The rrwlock can (with the track_all flag) track all reading threads,
1174 * which is very useful for debugging which code path failed to release
1175 * the lock, and for verifying that the *current* thread does hold
1176 * the lock.
1177 *
1178 * (Unlike a rwlock, which knows that N threads hold it for
1179 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1180 * if any thread holds it for read, even if this thread doesn't).
1181 */
1182 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1183 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1184}
1185
5e8cd5d1
AJ
1186void
1187dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
1188{
1189 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1190 rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
1191}
1192
13fe0198
MA
1193void
1194dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1195{
1196 rrw_exit(&dp->dp_config_rwlock, tag);
1197}
1198
1199boolean_t
1200dsl_pool_config_held(dsl_pool_t *dp)
1201{
1202 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1203}
1204
9c43027b
AJ
1205boolean_t
1206dsl_pool_config_held_writer(dsl_pool_t *dp)
1207{
1208 return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1209}
1210
c409e464 1211#if defined(_KERNEL) && defined(HAVE_SPL)
40a806df
NB
1212EXPORT_SYMBOL(dsl_pool_config_enter);
1213EXPORT_SYMBOL(dsl_pool_config_exit);
1214
02730c33 1215/* BEGIN CSTYLED */
d1d7e268 1216/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1217module_param(zfs_dirty_data_max_percent, int, 0444);
1218MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
c409e464 1219
d1d7e268 1220/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1221module_param(zfs_dirty_data_max_max_percent, int, 0444);
1222MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
d1d7e268 1223 "zfs_dirty_data_max upper bound as % of RAM");
c409e464 1224
e8b96c60
MA
1225module_param(zfs_delay_min_dirty_percent, int, 0644);
1226MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
c409e464 1227
e8b96c60
MA
1228module_param(zfs_dirty_data_max, ulong, 0644);
1229MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
c409e464 1230
d1d7e268 1231/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
e8b96c60
MA
1232module_param(zfs_dirty_data_max_max, ulong, 0444);
1233MODULE_PARM_DESC(zfs_dirty_data_max_max,
d1d7e268 1234 "zfs_dirty_data_max upper bound in bytes");
c409e464 1235
e8b96c60
MA
1236module_param(zfs_dirty_data_sync, ulong, 0644);
1237MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
c409e464 1238
e8b96c60
MA
1239module_param(zfs_delay_scale, ulong, 0644);
1240MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
64fc7762
MA
1241
1242module_param(zfs_sync_taskq_batch_pct, int, 0644);
1243MODULE_PARM_DESC(zfs_sync_taskq_batch_pct,
1244 "max percent of CPUs that are used to sync dirty data");
a032ac4b
BB
1245
1246module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644);
1247MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct,
1248 "max percent of CPUs that are used per dp_sync_taskq");
1249
1250module_param(zfs_zil_clean_taskq_minalloc, int, 0644);
1251MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc,
1252 "number of taskq entries that are pre-populated");
1253
1254module_param(zfs_zil_clean_taskq_maxalloc, int, 0644);
1255MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc,
1256 "max number of taskq entries that are cached");
1257
02730c33 1258/* END CSTYLED */
c409e464 1259#endif