]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
Implement memory and CPU hotplug
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
93e28d66 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
0c66c32d 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
539d33c7 26 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
34dc7c2f
BB
27 */
28
34dc7c2f
BB
29#include <sys/dsl_pool.h>
30#include <sys/dsl_dataset.h>
428870ff 31#include <sys/dsl_prop.h>
34dc7c2f
BB
32#include <sys/dsl_dir.h>
33#include <sys/dsl_synctask.h>
428870ff
BB
34#include <sys/dsl_scan.h>
35#include <sys/dnode.h>
34dc7c2f
BB
36#include <sys/dmu_tx.h>
37#include <sys/dmu_objset.h>
38#include <sys/arc.h>
39#include <sys/zap.h>
40#include <sys/zio.h>
41#include <sys/zfs_context.h>
42#include <sys/fs/zfs.h>
b128c09f
BB
43#include <sys/zfs_znode.h>
44#include <sys/spa_impl.h>
d2734cce
SD
45#include <sys/vdev_impl.h>
46#include <sys/metaslab_impl.h>
9ae529ec
CS
47#include <sys/bptree.h>
48#include <sys/zfeature.h>
29809a6c 49#include <sys/zil_impl.h>
13fe0198 50#include <sys/dsl_userhold.h>
e5d1c27e 51#include <sys/trace_zfs.h>
379ca9cf 52#include <sys/mmp.h>
34dc7c2f 53
e8b96c60
MA
54/*
55 * ZFS Write Throttle
56 * ------------------
57 *
58 * ZFS must limit the rate of incoming writes to the rate at which it is able
59 * to sync data modifications to the backend storage. Throttling by too much
60 * creates an artificial limit; throttling by too little can only be sustained
61 * for short periods and would lead to highly lumpy performance. On a per-pool
62 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
63 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
64 * of dirty data decreases. When the amount of dirty data exceeds a
65 * predetermined threshold further modifications are blocked until the amount
66 * of dirty data decreases (as data is synced out).
67 *
68 * The limit on dirty data is tunable, and should be adjusted according to
69 * both the IO capacity and available memory of the system. The larger the
70 * window, the more ZFS is able to aggregate and amortize metadata (and data)
71 * changes. However, memory is a limited resource, and allowing for more dirty
72 * data comes at the cost of keeping other useful data in memory (for example
73 * ZFS data cached by the ARC).
74 *
75 * Implementation
76 *
77 * As buffers are modified dsl_pool_willuse_space() increments both the per-
78 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
79 * dirty space used; dsl_pool_dirty_space() decrements those values as data
80 * is synced out from dsl_pool_sync(). While only the poolwide value is
81 * relevant, the per-txg value is useful for debugging. The tunable
82 * zfs_dirty_data_max determines the dirty space limit. Once that value is
83 * exceeded, new writes are halted until space frees up.
84 *
00f198de 85 * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
e8b96c60
MA
86 * ensure that there is a txg syncing (see the comment in txg.c for a full
87 * description of transaction group stages).
88 *
89 * The IO scheduler uses both the dirty space limit and current amount of
90 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
91 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
92 *
93 * The delay is also calculated based on the amount of dirty data. See the
94 * comment above dmu_tx_delay() for details.
95 */
96
97/*
98 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
99 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
100 * parameter.
101 */
102unsigned long zfs_dirty_data_max = 0;
103unsigned long zfs_dirty_data_max_max = 0;
104int zfs_dirty_data_max_percent = 10;
105int zfs_dirty_data_max_max_percent = 25;
b128c09f 106
e8b96c60 107/*
dfbe2675
MA
108 * If there's at least this much dirty data (as a percentage of
109 * zfs_dirty_data_max), push out a txg. This should be less than
110 * zfs_vdev_async_write_active_min_dirty_percent.
e8b96c60 111 */
dfbe2675 112int zfs_dirty_data_sync_percent = 20;
34dc7c2f 113
e8b96c60
MA
114/*
115 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
116 * and delay each transaction.
117 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
118 */
119int zfs_delay_min_dirty_percent = 60;
b128c09f 120
e8b96c60
MA
121/*
122 * This controls how quickly the delay approaches infinity.
123 * Larger values cause it to delay more for a given amount of dirty data.
124 * Therefore larger values will cause there to be less dirty data for a
125 * given throughput.
126 *
127 * For the smoothest delay, this value should be about 1 billion divided
128 * by the maximum number of operations per second. This will smoothly
129 * handle between 10x and 1/10th this number.
130 *
131 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
132 * multiply in dmu_tx_delay().
133 */
134unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
b128c09f 135
64fc7762
MA
136/*
137 * This determines the number of threads used by the dp_sync_taskq.
138 */
139int zfs_sync_taskq_batch_pct = 75;
140
a032ac4b
BB
141/*
142 * These tunables determine the behavior of how zil_itxg_clean() is
143 * called via zil_clean() in the context of spa_sync(). When an itxg
144 * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
145 * If the dispatch fails, the call to zil_itxg_clean() will occur
146 * synchronously in the context of spa_sync(), which can negatively
147 * impact the performance of spa_sync() (e.g. in the case of the itxg
148 * list having a large number of itxs that needs to be cleaned).
149 *
150 * Thus, these tunables can be used to manipulate the behavior of the
151 * taskq used by zil_clean(); they determine the number of taskq entries
152 * that are pre-populated when the taskq is first created (via the
153 * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
154 * taskq entries that are cached after an on-demand allocation (via the
155 * "zfs_zil_clean_taskq_maxalloc").
156 *
157 * The idea being, we want to try reasonably hard to ensure there will
158 * already be a taskq entry pre-allocated by the time that it is needed
159 * by zil_clean(). This way, we can avoid the possibility of an
160 * on-demand allocation of a new taskq entry from failing, which would
161 * result in zil_itxg_clean() being called synchronously from zil_clean()
162 * (which can adversely affect performance of spa_sync()).
163 *
164 * Additionally, the number of threads used by the taskq can be
165 * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
166 */
167int zfs_zil_clean_taskq_nthr_pct = 100;
168int zfs_zil_clean_taskq_minalloc = 1024;
169int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
170
428870ff 171int
b128c09f 172dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
173{
174 uint64_t obj;
175 int err;
176
177 err = zap_lookup(dp->dp_meta_objset,
d683ddbb 178 dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
b128c09f 179 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
180 if (err)
181 return (err);
182
13fe0198 183 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
184}
185
186static dsl_pool_t *
187dsl_pool_open_impl(spa_t *spa, uint64_t txg)
188{
189 dsl_pool_t *dp;
190 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
191
192 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
193 dp->dp_spa = spa;
194 dp->dp_meta_rootbp = *bp;
13fe0198 195 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f 196 txg_init(dp, txg);
379ca9cf 197 mmp_init(spa);
34dc7c2f 198
4747a7d3 199 txg_list_create(&dp->dp_dirty_datasets, spa,
34dc7c2f 200 offsetof(dsl_dataset_t, ds_dirty_link));
4747a7d3 201 txg_list_create(&dp->dp_dirty_zilogs, spa,
29809a6c 202 offsetof(zilog_t, zl_dirty_link));
4747a7d3 203 txg_list_create(&dp->dp_dirty_dirs, spa,
34dc7c2f 204 offsetof(dsl_dir_t, dd_dirty_link));
4747a7d3 205 txg_list_create(&dp->dp_sync_tasks, spa,
13fe0198 206 offsetof(dsl_sync_task_t, dst_node));
d2734cce
SD
207 txg_list_create(&dp->dp_early_sync_tasks, spa,
208 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f 209
64fc7762
MA
210 dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
211 zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
212 TASKQ_THREADS_CPU_PCT);
213
a032ac4b
BB
214 dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
215 zfs_zil_clean_taskq_nthr_pct, minclsyspri,
216 zfs_zil_clean_taskq_minalloc,
217 zfs_zil_clean_taskq_maxalloc,
218 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
219
34dc7c2f 220 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
e8b96c60 221 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 222
60a4c7d2
PD
223 dp->dp_zrele_taskq = taskq_create("z_zrele", 100, defclsyspri,
224 boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
225 TASKQ_THREADS_CPU_PCT);
dcec0a12 226 dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
60a4c7d2
PD
227 100, defclsyspri, boot_ncpus, INT_MAX,
228 TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
9babb374 229
34dc7c2f
BB
230 return (dp);
231}
232
233int
9ae529ec 234dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
235{
236 int err;
237 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec 238
b7faa7aa
G
239 /*
240 * Initialize the caller's dsl_pool_t structure before we actually open
241 * the meta objset. This is done because a self-healing write zio may
242 * be issued as part of dmu_objset_open_impl() and the spa needs its
243 * dsl_pool_t initialized in order to handle the write.
244 */
245 *dpp = dp;
246
9ae529ec
CS
247 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
248 &dp->dp_meta_objset);
b7faa7aa 249 if (err != 0) {
9ae529ec 250 dsl_pool_close(dp);
b7faa7aa
G
251 *dpp = NULL;
252 }
9ae529ec
CS
253
254 return (err);
255}
256
257int
258dsl_pool_open(dsl_pool_t *dp)
259{
260 int err;
b128c09f
BB
261 dsl_dir_t *dd;
262 dsl_dataset_t *ds;
428870ff 263 uint64_t obj;
34dc7c2f 264
13fe0198 265 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
266 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
267 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
268 &dp->dp_root_dir_obj);
269 if (err)
270 goto out;
271
13fe0198 272 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
273 NULL, dp, &dp->dp_root_dir);
274 if (err)
275 goto out;
276
b128c09f 277 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
278 if (err)
279 goto out;
280
9ae529ec 281 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
282 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
283 if (err)
284 goto out;
d683ddbb
JG
285 err = dsl_dataset_hold_obj(dp,
286 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
9babb374
BB
287 if (err == 0) {
288 err = dsl_dataset_hold_obj(dp,
d683ddbb 289 dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
9babb374
BB
290 &dp->dp_origin_snap);
291 dsl_dataset_rele(ds, FTAG);
292 }
13fe0198 293 dsl_dir_rele(dd, dp);
b128c09f
BB
294 if (err)
295 goto out;
b128c09f
BB
296 }
297
9ae529ec 298 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
299 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
300 &dp->dp_free_dir);
b128c09f
BB
301 if (err)
302 goto out;
428870ff 303
b128c09f 304 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 305 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
306 if (err)
307 goto out;
13fe0198 308 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 309 dp->dp_meta_objset, obj));
b128c09f
BB
310 }
311
a1d477c2
MA
312 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
313 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
314 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
315 if (err == 0) {
316 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
317 dp->dp_meta_objset, obj));
318 } else if (err == ENOENT) {
319 /*
320 * We might not have created the remap bpobj yet.
321 */
322 err = 0;
323 } else {
324 goto out;
325 }
326 }
327
fbeddd60 328 /*
a1d477c2
MA
329 * Note: errors ignored, because the these special dirs, used for
330 * space accounting, are only created on demand.
fbeddd60
MA
331 */
332 (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
333 &dp->dp_leak_dir);
334
fa86b5db 335 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
9ae529ec
CS
336 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
337 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
338 &dp->dp_bptree_obj);
339 if (err != 0)
340 goto out;
341 }
342
fa86b5db 343 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
753c3839
MA
344 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
345 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
346 &dp->dp_empty_bpobj);
347 if (err != 0)
348 goto out;
349 }
350
428870ff
BB
351 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
352 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
353 &dp->dp_tmp_userrefs_obj);
354 if (err == ENOENT)
355 err = 0;
356 if (err)
357 goto out;
358
9ae529ec 359 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 360
34dc7c2f 361out:
13fe0198 362 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
363 return (err);
364}
365
366void
367dsl_pool_close(dsl_pool_t *dp)
368{
b128c09f 369 /*
e8b96c60
MA
370 * Drop our references from dsl_pool_open().
371 *
b128c09f
BB
372 * Since we held the origin_snap from "syncing" context (which
373 * includes pool-opening context), it actually only got a "ref"
374 * and not a hold, so just drop that here.
375 */
a1d477c2 376 if (dp->dp_origin_snap != NULL)
13fe0198 377 dsl_dataset_rele(dp->dp_origin_snap, dp);
a1d477c2 378 if (dp->dp_mos_dir != NULL)
13fe0198 379 dsl_dir_rele(dp->dp_mos_dir, dp);
a1d477c2 380 if (dp->dp_free_dir != NULL)
13fe0198 381 dsl_dir_rele(dp->dp_free_dir, dp);
a1d477c2 382 if (dp->dp_leak_dir != NULL)
fbeddd60 383 dsl_dir_rele(dp->dp_leak_dir, dp);
a1d477c2 384 if (dp->dp_root_dir != NULL)
13fe0198 385 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 386
428870ff 387 bpobj_close(&dp->dp_free_bpobj);
a1d477c2 388 bpobj_close(&dp->dp_obsolete_bpobj);
428870ff 389
34dc7c2f 390 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
a1d477c2 391 if (dp->dp_meta_objset != NULL)
428870ff 392 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
393
394 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 395 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 396 txg_list_destroy(&dp->dp_sync_tasks);
d2734cce 397 txg_list_destroy(&dp->dp_early_sync_tasks);
34dc7c2f 398 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f 399
a032ac4b 400 taskq_destroy(dp->dp_zil_clean_taskq);
64fc7762
MA
401 taskq_destroy(dp->dp_sync_taskq);
402
ca0bf58d
PS
403 /*
404 * We can't set retry to TRUE since we're explicitly specifying
405 * a spa to flush. This is good enough; any missed buffers for
406 * this spa won't cause trouble, and they'll eventually fall
407 * out of the ARC just like any other unused buffer.
408 */
409 arc_flush(dp->dp_spa, FALSE);
410
379ca9cf 411 mmp_fini(dp->dp_spa);
34dc7c2f 412 txg_fini(dp);
428870ff 413 dsl_scan_fini(dp);
0c66c32d
JG
414 dmu_buf_user_evict_wait();
415
13fe0198 416 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 417 mutex_destroy(&dp->dp_lock);
c17486b2 418 cv_destroy(&dp->dp_spaceavail_cv);
dcec0a12 419 taskq_destroy(dp->dp_unlinked_drain_taskq);
657ce253 420 taskq_destroy(dp->dp_zrele_taskq);
a1d477c2 421 if (dp->dp_blkstats != NULL) {
d4a72f23 422 mutex_destroy(&dp->dp_blkstats->zab_lock);
79c76d5b 423 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
d4a72f23 424 }
34dc7c2f
BB
425 kmem_free(dp, sizeof (dsl_pool_t));
426}
427
a1d477c2
MA
428void
429dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
430{
431 uint64_t obj;
432 /*
433 * Currently, we only create the obsolete_bpobj where there are
434 * indirect vdevs with referenced mappings.
435 */
436 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
437 /* create and open the obsolete_bpobj */
438 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
439 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
440 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
441 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
442 spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
443}
444
445void
446dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
447{
448 spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
449 VERIFY0(zap_remove(dp->dp_meta_objset,
450 DMU_POOL_DIRECTORY_OBJECT,
451 DMU_POOL_OBSOLETE_BPOBJ, tx));
452 bpobj_free(dp->dp_meta_objset,
453 dp->dp_obsolete_bpobj.bpo_object, tx);
454 bpobj_close(&dp->dp_obsolete_bpobj);
455}
456
34dc7c2f 457dsl_pool_t *
b5256303
TC
458dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
459 uint64_t txg)
34dc7c2f
BB
460{
461 int err;
462 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
463 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
0a108631 464#ifdef _KERNEL
465 objset_t *os;
466#else
467 objset_t *os __attribute__((unused));
468#endif
b128c09f 469 dsl_dataset_t *ds;
428870ff 470 uint64_t obj;
b128c09f 471
13fe0198
MA
472 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
473
b128c09f 474 /* create and open the MOS (meta-objset) */
428870ff
BB
475 dp->dp_meta_objset = dmu_objset_create_impl(spa,
476 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
b5256303 477 spa->spa_meta_objset = dp->dp_meta_objset;
34dc7c2f
BB
478
479 /* create the pool directory */
480 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
481 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 482 ASSERT0(err);
34dc7c2f 483
428870ff 484 /* Initialize scan structures */
13fe0198 485 VERIFY0(dsl_scan_init(dp, txg));
428870ff 486
34dc7c2f 487 /* create and open the root dir */
b128c09f 488 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 489 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
490 NULL, dp, &dp->dp_root_dir));
491
492 /* create and open the meta-objset dir */
b128c09f 493 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 494 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
495 MOS_DIR_NAME, &dp->dp_mos_dir));
496
428870ff
BB
497 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
498 /* create and open the free dir */
499 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
500 FREE_DIR_NAME, tx);
13fe0198 501 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
502 FREE_DIR_NAME, &dp->dp_free_dir));
503
504 /* create and open the free_bplist */
f1512ee6 505 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
428870ff
BB
506 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
507 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 508 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
509 dp->dp_meta_objset, obj));
510 }
511
b128c09f
BB
512 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
513 dsl_pool_create_origin(dp, tx);
514
b5256303
TC
515 /*
516 * Some features may be needed when creating the root dataset, so we
517 * create the feature objects here.
518 */
519 if (spa_version(spa) >= SPA_VERSION_FEATURES)
520 spa_feature_create_zap_objects(spa, tx);
521
522 if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
523 dcp->cp_crypt != ZIO_CRYPT_INHERIT)
524 spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
525
b128c09f 526 /* create the root dataset */
b5256303 527 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
b128c09f
BB
528
529 /* create the root objset */
52ce99dd
TC
530 VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
531 DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
0a108631 532 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
533 os = dmu_objset_create_impl(dp->dp_spa, ds,
534 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
535 rrw_exit(&ds->ds_bp_rwlock, FTAG);
b128c09f 536#ifdef _KERNEL
0a108631 537 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f 538#endif
52ce99dd 539 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
34dc7c2f
BB
540
541 dmu_tx_commit(tx);
542
13fe0198
MA
543 rrw_exit(&dp->dp_config_rwlock, FTAG);
544
34dc7c2f
BB
545 return (dp);
546}
547
29809a6c
MA
548/*
549 * Account for the meta-objset space in its placeholder dsl_dir.
550 */
551void
552dsl_pool_mos_diduse_space(dsl_pool_t *dp,
553 int64_t used, int64_t comp, int64_t uncomp)
554{
555 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
556 mutex_enter(&dp->dp_lock);
557 dp->dp_mos_used_delta += used;
558 dp->dp_mos_compressed_delta += comp;
559 dp->dp_mos_uncompressed_delta += uncomp;
560 mutex_exit(&dp->dp_lock);
561}
562
e8b96c60
MA
563static void
564dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
565{
566 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
567 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
568 VERIFY0(zio_wait(zio));
569 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
570 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
571}
572
573static void
574dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
575{
576 ASSERT(MUTEX_HELD(&dp->dp_lock));
577
578 if (delta < 0)
579 ASSERT3U(-delta, <=, dp->dp_dirty_total);
580
581 dp->dp_dirty_total += delta;
582
583 /*
584 * Note: we signal even when increasing dp_dirty_total.
585 * This ensures forward progress -- each thread wakes the next waiter.
586 */
c0c8cc7b 587 if (dp->dp_dirty_total < zfs_dirty_data_max)
e8b96c60
MA
588 cv_signal(&dp->dp_spaceavail_cv);
589}
590
d2734cce
SD
591#ifdef ZFS_DEBUG
592static boolean_t
593dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
594{
595 spa_t *spa = dp->dp_spa;
596 vdev_t *rvd = spa->spa_root_vdev;
597
598 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
599 vdev_t *vd = rvd->vdev_child[c];
600 txg_list_t *tl = &vd->vdev_ms_list;
601 metaslab_t *ms;
602
603 for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
604 ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
605 VERIFY(range_tree_is_empty(ms->ms_freeing));
606 VERIFY(range_tree_is_empty(ms->ms_checkpointing));
607 }
608 }
609
610 return (B_TRUE);
611}
612#endif
613
34dc7c2f
BB
614void
615dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
616{
617 zio_t *zio;
618 dmu_tx_t *tx;
619 dsl_dir_t *dd;
620 dsl_dataset_t *ds;
428870ff 621 objset_t *mos = dp->dp_meta_objset;
29809a6c
MA
622 list_t synced_datasets;
623
624 list_create(&synced_datasets, sizeof (dsl_dataset_t),
625 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f
BB
626
627 tx = dmu_tx_create_assigned(dp, txg);
628
d2734cce
SD
629 /*
630 * Run all early sync tasks before writing out any dirty blocks.
631 * For more info on early sync tasks see block comment in
632 * dsl_early_sync_task().
633 */
634 if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
635 dsl_sync_task_t *dst;
636
637 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
638 while ((dst =
639 txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
640 ASSERT(dsl_early_sync_task_verify(dp, txg));
641 dsl_sync_task_sync(dst, tx);
642 }
643 ASSERT(dsl_early_sync_task_verify(dp, txg));
644 }
645
e8b96c60
MA
646 /*
647 * Write out all dirty blocks of dirty datasets.
648 */
34dc7c2f 649 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 650 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
651 /*
652 * We must not sync any non-MOS datasets twice, because
653 * we may have taken a snapshot of them. However, we
654 * may sync newly-created datasets on pass 2.
655 */
656 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 657 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
658 dsl_dataset_sync(ds, zio, tx);
659 }
e8b96c60 660 VERIFY0(zio_wait(zio));
9babb374 661
539d33c7
GM
662 /*
663 * Update the long range free counter after
664 * we're done syncing user data
665 */
666 mutex_enter(&dp->dp_lock);
667 ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
668 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
669 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
670 mutex_exit(&dp->dp_lock);
671
29809a6c
MA
672 /*
673 * After the data blocks have been written (ensured by the zio_wait()
9c5167d1 674 * above), update the user/group/project space accounting. This happens
64fc7762
MA
675 * in tasks dispatched to dp_sync_taskq, so wait for them before
676 * continuing.
29809a6c 677 */
e8b96c60
MA
678 for (ds = list_head(&synced_datasets); ds != NULL;
679 ds = list_next(&synced_datasets, ds)) {
428870ff 680 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
e8b96c60 681 }
64fc7762 682 taskq_wait(dp->dp_sync_taskq);
9babb374
BB
683
684 /*
685 * Sync the datasets again to push out the changes due to
428870ff 686 * userspace updates. This must be done before we process the
29809a6c
MA
687 * sync tasks, so that any snapshots will have the correct
688 * user accounting information (and we won't get confused
689 * about which blocks are part of the snapshot).
9babb374
BB
690 */
691 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 692 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
52ce99dd
TC
693 objset_t *os = ds->ds_objset;
694
9babb374
BB
695 ASSERT(list_link_active(&ds->ds_synced_link));
696 dmu_buf_rele(ds->ds_dbuf, ds);
697 dsl_dataset_sync(ds, zio, tx);
52ce99dd
TC
698
699 /*
700 * Release any key mappings created by calls to
701 * dsl_dataset_dirty() from the userquota accounting
702 * code paths.
703 */
704 if (os->os_encrypted && !os->os_raw_receive &&
705 !os->os_next_write_raw[txg & TXG_MASK]) {
706 ASSERT3P(ds->ds_key_mapping, !=, NULL);
707 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
708 }
9babb374 709 }
e8b96c60 710 VERIFY0(zio_wait(zio));
9babb374 711
428870ff 712 /*
29809a6c
MA
713 * Now that the datasets have been completely synced, we can
714 * clean up our in-memory structures accumulated while syncing:
715 *
37f03da8
SH
716 * - move dead blocks from the pending deadlist and livelists
717 * to the on-disk versions
29809a6c 718 * - release hold from dsl_dataset_dirty()
52ce99dd 719 * - release key mapping hold from dsl_dataset_dirty()
428870ff 720 */
e8b96c60 721 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
52ce99dd
TC
722 objset_t *os = ds->ds_objset;
723
724 if (os->os_encrypted && !os->os_raw_receive &&
725 !os->os_next_write_raw[txg & TXG_MASK]) {
726 ASSERT3P(ds->ds_key_mapping, !=, NULL);
727 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
728 }
729
0efd9791 730 dsl_dataset_sync_done(ds, tx);
428870ff
BB
731 }
732
e8b96c60 733 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
34dc7c2f 734 dsl_dir_sync(dd, tx);
e8b96c60 735 }
b128c09f 736
29809a6c
MA
737 /*
738 * The MOS's space is accounted for in the pool/$MOS
739 * (dp_mos_dir). We can't modify the mos while we're syncing
740 * it, so we remember the deltas and apply them here.
741 */
742 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
743 dp->dp_mos_uncompressed_delta != 0) {
744 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
745 dp->dp_mos_used_delta,
746 dp->dp_mos_compressed_delta,
747 dp->dp_mos_uncompressed_delta, tx);
748 dp->dp_mos_used_delta = 0;
749 dp->dp_mos_compressed_delta = 0;
750 dp->dp_mos_uncompressed_delta = 0;
751 }
752
93e28d66 753 if (dmu_objset_is_dirty(mos, txg)) {
e8b96c60 754 dsl_pool_sync_mos(dp, tx);
34dc7c2f
BB
755 }
756
0f8ff49e
SD
757 /*
758 * We have written all of the accounted dirty data, so our
759 * dp_space_towrite should now be zero. However, some seldom-used
760 * code paths do not adhere to this (e.g. dbuf_undirty()). Shore up
761 * the accounting of any dirtied space now.
762 *
763 * Note that, besides any dirty data from datasets, the amount of
764 * dirty data in the MOS is also accounted by the pool. Therefore,
765 * we want to do this cleanup after dsl_pool_sync_mos() so we don't
766 * attempt to update the accounting for the same dirty data twice.
767 * (i.e. at this point we only update the accounting for the space
768 * that we know that we "leaked").
769 */
770 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
771
29809a6c
MA
772 /*
773 * If we modify a dataset in the same txg that we want to destroy it,
774 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
775 * dsl_dir_destroy_check() will fail if there are unexpected holds.
776 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
777 * and clearing the hold on it) before we process the sync_tasks.
778 * The MOS data dirtied by the sync_tasks will be synced on the next
779 * pass.
780 */
29809a6c 781 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 782 dsl_sync_task_t *dst;
29809a6c
MA
783 /*
784 * No more sync tasks should have been added while we
785 * were syncing.
786 */
e8b96c60
MA
787 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
788 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
13fe0198 789 dsl_sync_task_sync(dst, tx);
29809a6c
MA
790 }
791
34dc7c2f 792 dmu_tx_commit(tx);
b128c09f 793
e8b96c60 794 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
34dc7c2f
BB
795}
796
797void
428870ff 798dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 799{
29809a6c 800 zilog_t *zilog;
34dc7c2f 801
55922e73 802 while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
e8b96c60 803 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
55922e73
GW
804 /*
805 * We don't remove the zilog from the dp_dirty_zilogs
806 * list until after we've cleaned it. This ensures that
807 * callers of zilog_is_dirty() receive an accurate
808 * answer when they are racing with the spa sync thread.
809 */
29809a6c 810 zil_clean(zilog, txg);
55922e73 811 (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
29809a6c
MA
812 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
813 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 814 }
428870ff 815 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
816}
817
818/*
819 * TRUE if the current thread is the tx_sync_thread or if we
820 * are being called from SPA context during pool initialization.
821 */
822int
823dsl_pool_sync_context(dsl_pool_t *dp)
824{
825 return (curthread == dp->dp_tx.tx_sync_thread ||
64fc7762
MA
826 spa_is_initializing(dp->dp_spa) ||
827 taskq_member(dp->dp_sync_taskq, curthread));
34dc7c2f
BB
828}
829
d2734cce
SD
830/*
831 * This function returns the amount of allocatable space in the pool
832 * minus whatever space is currently reserved by ZFS for specific
833 * purposes. Specifically:
834 *
835 * 1] Any reserved SLOP space
836 * 2] Any space used by the checkpoint
837 * 3] Any space used for deferred frees
838 *
839 * The latter 2 are especially important because they are needed to
840 * rectify the SPA's and DMU's different understanding of how much space
841 * is used. Now the DMU is aware of that extra space tracked by the SPA
842 * without having to maintain a separate special dir (e.g similar to
843 * $MOS, $FREEING, and $LEAKED).
844 *
845 * Note: By deferred frees here, we mean the frees that were deferred
846 * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
847 * segments placed in ms_defer trees during metaslab_sync_done().
848 */
34dc7c2f 849uint64_t
d2734cce 850dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
34dc7c2f 851{
d2734cce
SD
852 spa_t *spa = dp->dp_spa;
853 uint64_t space, resv, adjustedsize;
854 uint64_t spa_deferred_frees =
855 spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
856
857 space = spa_get_dspace(spa)
858 - spa_get_checkpoint_space(spa) - spa_deferred_frees;
859 resv = spa_get_slop_space(spa);
860
861 switch (slop_policy) {
862 case ZFS_SPACE_CHECK_NORMAL:
863 break;
864 case ZFS_SPACE_CHECK_RESERVED:
34dc7c2f 865 resv >>= 1;
d2734cce
SD
866 break;
867 case ZFS_SPACE_CHECK_EXTRA_RESERVED:
868 resv >>= 2;
869 break;
870 case ZFS_SPACE_CHECK_NONE:
871 resv = 0;
872 break;
873 default:
874 panic("invalid slop policy value: %d", slop_policy);
875 break;
876 }
877 adjustedsize = (space >= resv) ? (space - resv) : 0;
34dc7c2f 878
d2734cce
SD
879 return (adjustedsize);
880}
881
882uint64_t
883dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
884{
885 uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
886 uint64_t deferred =
887 metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
888 uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
889 return (quota);
34dc7c2f
BB
890}
891
e8b96c60
MA
892boolean_t
893dsl_pool_need_dirty_delay(dsl_pool_t *dp)
34dc7c2f 894{
e8b96c60
MA
895 uint64_t delay_min_bytes =
896 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
dfbe2675
MA
897 uint64_t dirty_min_bytes =
898 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
48be0dfb 899 uint64_t dirty;
34dc7c2f 900
e8b96c60 901 mutex_enter(&dp->dp_lock);
48be0dfb 902 dirty = dp->dp_dirty_total;
e8b96c60 903 mutex_exit(&dp->dp_lock);
48be0dfb
JD
904 if (dirty > dirty_min_bytes)
905 txg_kick(dp);
906 return (dirty > delay_min_bytes);
34dc7c2f
BB
907}
908
909void
e8b96c60 910dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
34dc7c2f 911{
e8b96c60
MA
912 if (space > 0) {
913 mutex_enter(&dp->dp_lock);
914 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
915 dsl_pool_dirty_delta(dp, space);
916 mutex_exit(&dp->dp_lock);
917 }
34dc7c2f
BB
918}
919
920void
e8b96c60 921dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
34dc7c2f 922{
e8b96c60
MA
923 ASSERT3S(space, >=, 0);
924 if (space == 0)
34dc7c2f
BB
925 return;
926
e8b96c60
MA
927 mutex_enter(&dp->dp_lock);
928 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
929 /* XXX writing something we didn't dirty? */
930 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
34dc7c2f 931 }
e8b96c60
MA
932 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
933 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
934 ASSERT3U(dp->dp_dirty_total, >=, space);
935 dsl_pool_dirty_delta(dp, -space);
936 mutex_exit(&dp->dp_lock);
34dc7c2f 937}
b128c09f
BB
938
939/* ARGSUSED */
940static int
13fe0198 941upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
942{
943 dmu_tx_t *tx = arg;
944 dsl_dataset_t *ds, *prev = NULL;
945 int err;
b128c09f 946
13fe0198 947 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
948 if (err)
949 return (err);
950
d683ddbb
JG
951 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
952 err = dsl_dataset_hold_obj(dp,
953 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
b128c09f
BB
954 if (err) {
955 dsl_dataset_rele(ds, FTAG);
956 return (err);
957 }
958
d683ddbb 959 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
b128c09f
BB
960 break;
961 dsl_dataset_rele(ds, FTAG);
962 ds = prev;
963 prev = NULL;
964 }
965
966 if (prev == NULL) {
967 prev = dp->dp_origin_snap;
968
969 /*
970 * The $ORIGIN can't have any data, or the accounting
971 * will be wrong.
972 */
cc9bb3e5 973 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
d683ddbb 974 ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
cc9bb3e5 975 rrw_exit(&ds->ds_bp_rwlock, FTAG);
b128c09f
BB
976
977 /* The origin doesn't get attached to itself */
978 if (ds->ds_object == prev->ds_object) {
979 dsl_dataset_rele(ds, FTAG);
980 return (0);
981 }
982
983 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb
JG
984 dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
985 dsl_dataset_phys(ds)->ds_prev_snap_txg =
986 dsl_dataset_phys(prev)->ds_creation_txg;
b128c09f
BB
987
988 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
d683ddbb 989 dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
b128c09f
BB
990
991 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 992 dsl_dataset_phys(prev)->ds_num_children++;
b128c09f 993
d683ddbb 994 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
b128c09f 995 ASSERT(ds->ds_prev == NULL);
13fe0198 996 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
997 dsl_dataset_phys(ds)->ds_prev_snap_obj,
998 ds, &ds->ds_prev));
b128c09f
BB
999 }
1000 }
1001
d683ddbb
JG
1002 ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
1003 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f 1004
d683ddbb 1005 if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
428870ff 1006 dmu_buf_will_dirty(prev->ds_dbuf, tx);
d683ddbb 1007 dsl_dataset_phys(prev)->ds_next_clones_obj =
b128c09f
BB
1008 zap_create(dp->dp_meta_objset,
1009 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
1010 }
13fe0198 1011 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb 1012 dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
b128c09f
BB
1013
1014 dsl_dataset_rele(ds, FTAG);
1015 if (prev != dp->dp_origin_snap)
1016 dsl_dataset_rele(prev, FTAG);
1017 return (0);
1018}
1019
1020void
1021dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1022{
1023 ASSERT(dmu_tx_is_syncing(tx));
1024 ASSERT(dp->dp_origin_snap != NULL);
1025
13fe0198 1026 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
9c43027b 1027 tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
428870ff
BB
1028}
1029
1030/* ARGSUSED */
1031static int
13fe0198 1032upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
1033{
1034 dmu_tx_t *tx = arg;
428870ff
BB
1035 objset_t *mos = dp->dp_meta_objset;
1036
d683ddbb 1037 if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
428870ff
BB
1038 dsl_dataset_t *origin;
1039
13fe0198 1040 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 1041 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
428870ff 1042
d683ddbb 1043 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
428870ff 1044 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
d683ddbb
JG
1045 dsl_dir_phys(origin->ds_dir)->dd_clones =
1046 zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
1047 0, tx);
428870ff
BB
1048 }
1049
13fe0198 1050 VERIFY0(zap_add_int(dp->dp_meta_objset,
d683ddbb
JG
1051 dsl_dir_phys(origin->ds_dir)->dd_clones,
1052 ds->ds_object, tx));
428870ff
BB
1053
1054 dsl_dataset_rele(origin, FTAG);
1055 }
428870ff
BB
1056 return (0);
1057}
1058
1059void
1060dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1061{
428870ff
BB
1062 uint64_t obj;
1063
d6320ddb
BB
1064 ASSERT(dmu_tx_is_syncing(tx));
1065
428870ff 1066 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 1067 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
1068 FREE_DIR_NAME, &dp->dp_free_dir));
1069
1070 /*
1071 * We can't use bpobj_alloc(), because spa_version() still
1072 * returns the old version, and we need a new-version bpobj with
1073 * subobj support. So call dmu_object_alloc() directly.
1074 */
1075 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
f1512ee6 1076 SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 1077 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 1078 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 1079 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 1080
13fe0198 1081 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
9c43027b 1082 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
b128c09f
BB
1083}
1084
1085void
1086dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1087{
1088 uint64_t dsobj;
1089 dsl_dataset_t *ds;
1090
1091 ASSERT(dmu_tx_is_syncing(tx));
1092 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 1093 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
1094
1095 /* create the origin dir, ds, & snap-ds */
b128c09f 1096 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
b5256303 1097 NULL, 0, kcred, NULL, tx);
13fe0198
MA
1098 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1099 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
d683ddbb 1100 VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
b128c09f
BB
1101 dp, &dp->dp_origin_snap));
1102 dsl_dataset_rele(ds, FTAG);
b128c09f 1103}
9babb374
BB
1104
1105taskq_t *
657ce253 1106dsl_pool_zrele_taskq(dsl_pool_t *dp)
9babb374 1107{
657ce253 1108 return (dp->dp_zrele_taskq);
9babb374 1109}
428870ff 1110
dcec0a12
AP
1111taskq_t *
1112dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp)
1113{
1114 return (dp->dp_unlinked_drain_taskq);
1115}
1116
428870ff
BB
1117/*
1118 * Walk through the pool-wide zap object of temporary snapshot user holds
1119 * and release them.
1120 */
1121void
1122dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1123{
1124 zap_attribute_t za;
1125 zap_cursor_t zc;
1126 objset_t *mos = dp->dp_meta_objset;
1127 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
95fd54a1 1128 nvlist_t *holds;
428870ff
BB
1129
1130 if (zapobj == 0)
1131 return;
1132 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1133
95fd54a1
SH
1134 holds = fnvlist_alloc();
1135
428870ff
BB
1136 for (zap_cursor_init(&zc, mos, zapobj);
1137 zap_cursor_retrieve(&zc, &za) == 0;
1138 zap_cursor_advance(&zc)) {
1139 char *htag;
95fd54a1 1140 nvlist_t *tags;
428870ff
BB
1141
1142 htag = strchr(za.za_name, '-');
1143 *htag = '\0';
1144 ++htag;
95fd54a1
SH
1145 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
1146 tags = fnvlist_alloc();
1147 fnvlist_add_boolean(tags, htag);
1148 fnvlist_add_nvlist(holds, za.za_name, tags);
1149 fnvlist_free(tags);
1150 } else {
1151 fnvlist_add_boolean(tags, htag);
1152 }
428870ff 1153 }
95fd54a1
SH
1154 dsl_dataset_user_release_tmp(dp, holds);
1155 fnvlist_free(holds);
428870ff
BB
1156 zap_cursor_fini(&zc);
1157}
1158
1159/*
1160 * Create the pool-wide zap object for storing temporary snapshot holds.
1161 */
65c7cc49 1162static void
428870ff
BB
1163dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1164{
1165 objset_t *mos = dp->dp_meta_objset;
1166
1167 ASSERT(dp->dp_tmp_userrefs_obj == 0);
1168 ASSERT(dmu_tx_is_syncing(tx));
1169
9ae529ec
CS
1170 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1171 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
1172}
1173
1174static int
1175dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 1176 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
1177{
1178 objset_t *mos = dp->dp_meta_objset;
1179 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1180 char *name;
1181 int error;
1182
1183 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1184 ASSERT(dmu_tx_is_syncing(tx));
1185
1186 /*
1187 * If the pool was created prior to SPA_VERSION_USERREFS, the
1188 * zap object for temporary holds might not exist yet.
1189 */
1190 if (zapobj == 0) {
1191 if (holding) {
1192 dsl_pool_user_hold_create_obj(dp, tx);
1193 zapobj = dp->dp_tmp_userrefs_obj;
1194 } else {
2e528b49 1195 return (SET_ERROR(ENOENT));
428870ff
BB
1196 }
1197 }
1198
1199 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1200 if (holding)
13fe0198 1201 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
1202 else
1203 error = zap_remove(mos, zapobj, name, tx);
e4f5fa12 1204 kmem_strfree(name);
428870ff
BB
1205
1206 return (error);
1207}
1208
1209/*
1210 * Add a temporary hold for the given dataset object and tag.
1211 */
1212int
1213dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 1214 uint64_t now, dmu_tx_t *tx)
428870ff
BB
1215{
1216 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1217}
1218
1219/*
1220 * Release a temporary hold for the given dataset object and tag.
1221 */
1222int
1223dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1224 dmu_tx_t *tx)
1225{
13fe0198 1226 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
1227 tx, B_FALSE));
1228}
c409e464 1229
13fe0198
MA
1230/*
1231 * DSL Pool Configuration Lock
1232 *
1233 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
1234 * creation / destruction / rename / property setting). It must be held for
1235 * read to hold a dataset or dsl_dir. I.e. you must call
1236 * dsl_pool_config_enter() or dsl_pool_hold() before calling
1237 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
1238 * must be held continuously until all datasets and dsl_dirs are released.
1239 *
1240 * The only exception to this rule is that if a "long hold" is placed on
1241 * a dataset, then the dp_config_rwlock may be dropped while the dataset
1242 * is still held. The long hold will prevent the dataset from being
1243 * destroyed -- the destroy will fail with EBUSY. A long hold can be
1244 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
1245 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
1246 *
1247 * Legitimate long-holders (including owners) should be long-running, cancelable
1248 * tasks that should cause "zfs destroy" to fail. This includes DMU
1249 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
1250 * "zfs send", and "zfs diff". There are several other long-holders whose
1251 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
1252 *
1253 * The usual formula for long-holding would be:
1254 * dsl_pool_hold()
1255 * dsl_dataset_hold()
1256 * ... perform checks ...
1257 * dsl_dataset_long_hold()
1258 * dsl_pool_rele()
1259 * ... perform long-running task ...
1260 * dsl_dataset_long_rele()
1261 * dsl_dataset_rele()
1262 *
1263 * Note that when the long hold is released, the dataset is still held but
1264 * the pool is not held. The dataset may change arbitrarily during this time
1265 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
1266 * dataset except release it.
1267 *
1268 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
1269 * or modifying operations.
1270 *
1271 * Modifying operations should generally use dsl_sync_task(). The synctask
1272 * infrastructure enforces proper locking strategy with respect to the
1273 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
1274 *
1275 * Read-only operations will manually hold the pool, then the dataset, obtain
1276 * information from the dataset, then release the pool and dataset.
1277 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1278 * hold/rele.
1279 */
1280
1281int
1282dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1283{
1284 spa_t *spa;
1285 int error;
1286
1287 error = spa_open(name, &spa, tag);
1288 if (error == 0) {
1289 *dp = spa_get_dsl(spa);
1290 dsl_pool_config_enter(*dp, tag);
1291 }
1292 return (error);
1293}
1294
1295void
1296dsl_pool_rele(dsl_pool_t *dp, void *tag)
1297{
1298 dsl_pool_config_exit(dp, tag);
1299 spa_close(dp->dp_spa, tag);
1300}
1301
1302void
1303dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1304{
1305 /*
1306 * We use a "reentrant" reader-writer lock, but not reentrantly.
1307 *
1308 * The rrwlock can (with the track_all flag) track all reading threads,
1309 * which is very useful for debugging which code path failed to release
1310 * the lock, and for verifying that the *current* thread does hold
1311 * the lock.
1312 *
1313 * (Unlike a rwlock, which knows that N threads hold it for
1314 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1315 * if any thread holds it for read, even if this thread doesn't).
1316 */
1317 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1318 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1319}
1320
5e8cd5d1
AJ
1321void
1322dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
1323{
1324 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1325 rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
1326}
1327
13fe0198
MA
1328void
1329dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1330{
1331 rrw_exit(&dp->dp_config_rwlock, tag);
1332}
1333
1334boolean_t
1335dsl_pool_config_held(dsl_pool_t *dp)
1336{
1337 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1338}
1339
9c43027b
AJ
1340boolean_t
1341dsl_pool_config_held_writer(dsl_pool_t *dp)
1342{
1343 return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1344}
1345
40a806df
NB
1346EXPORT_SYMBOL(dsl_pool_config_enter);
1347EXPORT_SYMBOL(dsl_pool_config_exit);
1348
02730c33 1349/* BEGIN CSTYLED */
d1d7e268 1350/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
03fdcb9a
MM
1351ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD,
1352 "Max percent of RAM allowed to be dirty");
c409e464 1353
d1d7e268 1354/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
03fdcb9a 1355ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD,
d1d7e268 1356 "zfs_dirty_data_max upper bound as % of RAM");
c409e464 1357
03fdcb9a
MM
1358ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW,
1359 "Transaction delay threshold");
c409e464 1360
03fdcb9a
MM
1361ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW,
1362 "Determines the dirty space limit");
c409e464 1363
d1d7e268 1364/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
03fdcb9a 1365ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
d1d7e268 1366 "zfs_dirty_data_max upper bound in bytes");
c409e464 1367
03fdcb9a
MM
1368ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW,
1369 "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
c409e464 1370
03fdcb9a
MM
1371ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
1372 "How quickly delay approaches infinity");
64fc7762 1373
03fdcb9a
MM
1374ZFS_MODULE_PARAM(zfs, zfs_, sync_taskq_batch_pct, INT, ZMOD_RW,
1375 "Max percent of CPUs that are used to sync dirty data");
a032ac4b 1376
03fdcb9a
MM
1377ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW,
1378 "Max percent of CPUs that are used per dp_sync_taskq");
a032ac4b 1379
03fdcb9a
MM
1380ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW,
1381 "Number of taskq entries that are pre-populated");
a032ac4b 1382
03fdcb9a
MM
1383ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW,
1384 "Max number of taskq entries that are cached");
02730c33 1385/* END CSTYLED */