]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
Illumos 4171, 4172
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 23 * Copyright (c) 2013 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f
BB
27#include <sys/dsl_pool.h>
28#include <sys/dsl_dataset.h>
428870ff 29#include <sys/dsl_prop.h>
34dc7c2f
BB
30#include <sys/dsl_dir.h>
31#include <sys/dsl_synctask.h>
428870ff
BB
32#include <sys/dsl_scan.h>
33#include <sys/dnode.h>
34dc7c2f
BB
34#include <sys/dmu_tx.h>
35#include <sys/dmu_objset.h>
36#include <sys/arc.h>
37#include <sys/zap.h>
38#include <sys/zio.h>
39#include <sys/zfs_context.h>
40#include <sys/fs/zfs.h>
b128c09f
BB
41#include <sys/zfs_znode.h>
42#include <sys/spa_impl.h>
428870ff 43#include <sys/dsl_deadlist.h>
9ae529ec
CS
44#include <sys/bptree.h>
45#include <sys/zfeature.h>
29809a6c 46#include <sys/zil_impl.h>
13fe0198 47#include <sys/dsl_userhold.h>
34dc7c2f 48
e8b96c60
MA
49/*
50 * ZFS Write Throttle
51 * ------------------
52 *
53 * ZFS must limit the rate of incoming writes to the rate at which it is able
54 * to sync data modifications to the backend storage. Throttling by too much
55 * creates an artificial limit; throttling by too little can only be sustained
56 * for short periods and would lead to highly lumpy performance. On a per-pool
57 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
58 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
59 * of dirty data decreases. When the amount of dirty data exceeds a
60 * predetermined threshold further modifications are blocked until the amount
61 * of dirty data decreases (as data is synced out).
62 *
63 * The limit on dirty data is tunable, and should be adjusted according to
64 * both the IO capacity and available memory of the system. The larger the
65 * window, the more ZFS is able to aggregate and amortize metadata (and data)
66 * changes. However, memory is a limited resource, and allowing for more dirty
67 * data comes at the cost of keeping other useful data in memory (for example
68 * ZFS data cached by the ARC).
69 *
70 * Implementation
71 *
72 * As buffers are modified dsl_pool_willuse_space() increments both the per-
73 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
74 * dirty space used; dsl_pool_dirty_space() decrements those values as data
75 * is synced out from dsl_pool_sync(). While only the poolwide value is
76 * relevant, the per-txg value is useful for debugging. The tunable
77 * zfs_dirty_data_max determines the dirty space limit. Once that value is
78 * exceeded, new writes are halted until space frees up.
79 *
80 * The zfs_dirty_data_sync tunable dictates the threshold at which we
81 * ensure that there is a txg syncing (see the comment in txg.c for a full
82 * description of transaction group stages).
83 *
84 * The IO scheduler uses both the dirty space limit and current amount of
85 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
86 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
87 *
88 * The delay is also calculated based on the amount of dirty data. See the
89 * comment above dmu_tx_delay() for details.
90 */
91
92/*
93 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
94 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
95 * parameter.
96 */
97unsigned long zfs_dirty_data_max = 0;
98unsigned long zfs_dirty_data_max_max = 0;
99int zfs_dirty_data_max_percent = 10;
100int zfs_dirty_data_max_max_percent = 25;
b128c09f 101
e8b96c60
MA
102/*
103 * If there is at least this much dirty data, push out a txg.
104 */
105unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024;
34dc7c2f 106
e8b96c60
MA
107/*
108 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
109 * and delay each transaction.
110 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
111 */
112int zfs_delay_min_dirty_percent = 60;
b128c09f 113
e8b96c60
MA
114/*
115 * This controls how quickly the delay approaches infinity.
116 * Larger values cause it to delay more for a given amount of dirty data.
117 * Therefore larger values will cause there to be less dirty data for a
118 * given throughput.
119 *
120 * For the smoothest delay, this value should be about 1 billion divided
121 * by the maximum number of operations per second. This will smoothly
122 * handle between 10x and 1/10th this number.
123 *
124 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
125 * multiply in dmu_tx_delay().
126 */
127unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
b128c09f 128
63fd3c6c
AL
129hrtime_t zfs_throttle_delay = MSEC2NSEC(10);
130hrtime_t zfs_throttle_resolution = MSEC2NSEC(10);
131
428870ff 132int
b128c09f 133dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
134{
135 uint64_t obj;
136 int err;
137
138 err = zap_lookup(dp->dp_meta_objset,
139 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
b128c09f 140 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
141 if (err)
142 return (err);
143
13fe0198 144 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
145}
146
147static dsl_pool_t *
148dsl_pool_open_impl(spa_t *spa, uint64_t txg)
149{
150 dsl_pool_t *dp;
151 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
152
153 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
154 dp->dp_spa = spa;
155 dp->dp_meta_rootbp = *bp;
13fe0198 156 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f
BB
157 txg_init(dp, txg);
158
159 txg_list_create(&dp->dp_dirty_datasets,
160 offsetof(dsl_dataset_t, ds_dirty_link));
29809a6c
MA
161 txg_list_create(&dp->dp_dirty_zilogs,
162 offsetof(zilog_t, zl_dirty_link));
34dc7c2f
BB
163 txg_list_create(&dp->dp_dirty_dirs,
164 offsetof(dsl_dir_t, dd_dirty_link));
165 txg_list_create(&dp->dp_sync_tasks,
13fe0198 166 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f
BB
167
168 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
e8b96c60 169 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 170
3558fd73 171 dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri,
9babb374
BB
172 1, 4, 0);
173
34dc7c2f
BB
174 return (dp);
175}
176
177int
9ae529ec 178dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
179{
180 int err;
181 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec
CS
182
183 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
184 &dp->dp_meta_objset);
185 if (err != 0)
186 dsl_pool_close(dp);
187 else
188 *dpp = dp;
189
190 return (err);
191}
192
193int
194dsl_pool_open(dsl_pool_t *dp)
195{
196 int err;
b128c09f
BB
197 dsl_dir_t *dd;
198 dsl_dataset_t *ds;
428870ff 199 uint64_t obj;
34dc7c2f 200
13fe0198 201 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
202 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
203 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
204 &dp->dp_root_dir_obj);
205 if (err)
206 goto out;
207
13fe0198 208 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
209 NULL, dp, &dp->dp_root_dir);
210 if (err)
211 goto out;
212
b128c09f 213 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
214 if (err)
215 goto out;
216
9ae529ec 217 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
218 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
219 if (err)
220 goto out;
221 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
222 FTAG, &ds);
9babb374
BB
223 if (err == 0) {
224 err = dsl_dataset_hold_obj(dp,
225 ds->ds_phys->ds_prev_snap_obj, dp,
226 &dp->dp_origin_snap);
227 dsl_dataset_rele(ds, FTAG);
228 }
13fe0198 229 dsl_dir_rele(dd, dp);
b128c09f
BB
230 if (err)
231 goto out;
b128c09f
BB
232 }
233
9ae529ec 234 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
235 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
236 &dp->dp_free_dir);
b128c09f
BB
237 if (err)
238 goto out;
428870ff 239
b128c09f 240 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 241 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
242 if (err)
243 goto out;
13fe0198 244 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 245 dp->dp_meta_objset, obj));
b128c09f
BB
246 }
247
fa86b5db 248 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
9ae529ec
CS
249 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
250 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
251 &dp->dp_bptree_obj);
252 if (err != 0)
253 goto out;
254 }
255
fa86b5db 256 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
753c3839
MA
257 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
258 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
259 &dp->dp_empty_bpobj);
260 if (err != 0)
261 goto out;
262 }
263
428870ff
BB
264 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
265 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
266 &dp->dp_tmp_userrefs_obj);
267 if (err == ENOENT)
268 err = 0;
269 if (err)
270 goto out;
271
9ae529ec 272 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 273
34dc7c2f 274out:
13fe0198 275 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
276 return (err);
277}
278
279void
280dsl_pool_close(dsl_pool_t *dp)
281{
b128c09f 282 /*
e8b96c60
MA
283 * Drop our references from dsl_pool_open().
284 *
b128c09f
BB
285 * Since we held the origin_snap from "syncing" context (which
286 * includes pool-opening context), it actually only got a "ref"
287 * and not a hold, so just drop that here.
288 */
289 if (dp->dp_origin_snap)
13fe0198 290 dsl_dataset_rele(dp->dp_origin_snap, dp);
34dc7c2f 291 if (dp->dp_mos_dir)
13fe0198 292 dsl_dir_rele(dp->dp_mos_dir, dp);
428870ff 293 if (dp->dp_free_dir)
13fe0198 294 dsl_dir_rele(dp->dp_free_dir, dp);
34dc7c2f 295 if (dp->dp_root_dir)
13fe0198 296 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 297
428870ff
BB
298 bpobj_close(&dp->dp_free_bpobj);
299
34dc7c2f
BB
300 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
301 if (dp->dp_meta_objset)
428870ff 302 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
303
304 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 305 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 306 txg_list_destroy(&dp->dp_sync_tasks);
34dc7c2f 307 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f
BB
308
309 arc_flush(dp->dp_spa);
310 txg_fini(dp);
428870ff 311 dsl_scan_fini(dp);
13fe0198 312 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 313 mutex_destroy(&dp->dp_lock);
3558fd73 314 taskq_destroy(dp->dp_iput_taskq);
b128c09f
BB
315 if (dp->dp_blkstats)
316 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
34dc7c2f
BB
317 kmem_free(dp, sizeof (dsl_pool_t));
318}
319
320dsl_pool_t *
b128c09f 321dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
34dc7c2f
BB
322{
323 int err;
324 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
325 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
428870ff 326 objset_t *os;
b128c09f 327 dsl_dataset_t *ds;
428870ff 328 uint64_t obj;
b128c09f 329
13fe0198
MA
330 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
331
b128c09f 332 /* create and open the MOS (meta-objset) */
428870ff
BB
333 dp->dp_meta_objset = dmu_objset_create_impl(spa,
334 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
34dc7c2f
BB
335
336 /* create the pool directory */
337 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
338 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 339 ASSERT0(err);
34dc7c2f 340
428870ff 341 /* Initialize scan structures */
13fe0198 342 VERIFY0(dsl_scan_init(dp, txg));
428870ff 343
34dc7c2f 344 /* create and open the root dir */
b128c09f 345 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 346 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
347 NULL, dp, &dp->dp_root_dir));
348
349 /* create and open the meta-objset dir */
b128c09f 350 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 351 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
352 MOS_DIR_NAME, &dp->dp_mos_dir));
353
428870ff
BB
354 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
355 /* create and open the free dir */
356 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
357 FREE_DIR_NAME, tx);
13fe0198 358 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
359 FREE_DIR_NAME, &dp->dp_free_dir));
360
361 /* create and open the free_bplist */
362 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx);
363 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
364 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 365 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
366 dp->dp_meta_objset, obj));
367 }
368
b128c09f
BB
369 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
370 dsl_pool_create_origin(dp, tx);
371
372 /* create the root dataset */
428870ff 373 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
b128c09f
BB
374
375 /* create the root objset */
13fe0198 376 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
0fe3d820
BB
377 VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds,
378 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx)));
b128c09f 379#ifdef _KERNEL
428870ff 380 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f
BB
381#endif
382 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
383
384 dmu_tx_commit(tx);
385
13fe0198
MA
386 rrw_exit(&dp->dp_config_rwlock, FTAG);
387
34dc7c2f
BB
388 return (dp);
389}
390
29809a6c
MA
391/*
392 * Account for the meta-objset space in its placeholder dsl_dir.
393 */
394void
395dsl_pool_mos_diduse_space(dsl_pool_t *dp,
396 int64_t used, int64_t comp, int64_t uncomp)
397{
398 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
399 mutex_enter(&dp->dp_lock);
400 dp->dp_mos_used_delta += used;
401 dp->dp_mos_compressed_delta += comp;
402 dp->dp_mos_uncompressed_delta += uncomp;
403 mutex_exit(&dp->dp_lock);
404}
405
428870ff
BB
406static int
407deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
408{
409 dsl_deadlist_t *dl = arg;
410 dsl_deadlist_insert(dl, bp, tx);
411 return (0);
412}
413
e8b96c60
MA
414static void
415dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
416{
417 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
418 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
419 VERIFY0(zio_wait(zio));
420 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
421 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
422}
423
424static void
425dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
426{
427 ASSERT(MUTEX_HELD(&dp->dp_lock));
428
429 if (delta < 0)
430 ASSERT3U(-delta, <=, dp->dp_dirty_total);
431
432 dp->dp_dirty_total += delta;
433
434 /*
435 * Note: we signal even when increasing dp_dirty_total.
436 * This ensures forward progress -- each thread wakes the next waiter.
437 */
438 if (dp->dp_dirty_total <= zfs_dirty_data_max)
439 cv_signal(&dp->dp_spaceavail_cv);
440}
441
34dc7c2f
BB
442void
443dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
444{
445 zio_t *zio;
446 dmu_tx_t *tx;
447 dsl_dir_t *dd;
448 dsl_dataset_t *ds;
428870ff 449 objset_t *mos = dp->dp_meta_objset;
29809a6c
MA
450 list_t synced_datasets;
451
452 list_create(&synced_datasets, sizeof (dsl_dataset_t),
453 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f
BB
454
455 tx = dmu_tx_create_assigned(dp, txg);
456
e8b96c60
MA
457 /*
458 * Write out all dirty blocks of dirty datasets.
459 */
34dc7c2f 460 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 461 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
462 /*
463 * We must not sync any non-MOS datasets twice, because
464 * we may have taken a snapshot of them. However, we
465 * may sync newly-created datasets on pass 2.
466 */
467 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 468 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
469 dsl_dataset_sync(ds, zio, tx);
470 }
e8b96c60 471 VERIFY0(zio_wait(zio));
9babb374 472
e8b96c60
MA
473 /*
474 * We have written all of the accounted dirty data, so our
475 * dp_space_towrite should now be zero. However, some seldom-used
476 * code paths do not adhere to this (e.g. dbuf_undirty(), also
477 * rounding error in dbuf_write_physdone).
478 * Shore up the accounting of any dirtied space now.
479 */
480 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
34dc7c2f 481
29809a6c
MA
482 /*
483 * After the data blocks have been written (ensured by the zio_wait()
484 * above), update the user/group space accounting.
485 */
e8b96c60
MA
486 for (ds = list_head(&synced_datasets); ds != NULL;
487 ds = list_next(&synced_datasets, ds)) {
428870ff 488 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
e8b96c60 489 }
9babb374
BB
490
491 /*
492 * Sync the datasets again to push out the changes due to
428870ff 493 * userspace updates. This must be done before we process the
29809a6c
MA
494 * sync tasks, so that any snapshots will have the correct
495 * user accounting information (and we won't get confused
496 * about which blocks are part of the snapshot).
9babb374
BB
497 */
498 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 499 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
500 ASSERT(list_link_active(&ds->ds_synced_link));
501 dmu_buf_rele(ds->ds_dbuf, ds);
502 dsl_dataset_sync(ds, zio, tx);
503 }
e8b96c60 504 VERIFY0(zio_wait(zio));
9babb374 505
428870ff 506 /*
29809a6c
MA
507 * Now that the datasets have been completely synced, we can
508 * clean up our in-memory structures accumulated while syncing:
509 *
510 * - move dead blocks from the pending deadlist to the on-disk deadlist
29809a6c 511 * - release hold from dsl_dataset_dirty()
428870ff 512 */
e8b96c60 513 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
29809a6c 514 ASSERTV(objset_t *os = ds->ds_objset);
428870ff
BB
515 bplist_iterate(&ds->ds_pending_deadlist,
516 deadlist_enqueue_cb, &ds->ds_deadlist, tx);
29809a6c
MA
517 ASSERT(!dmu_objset_is_dirty(os, txg));
518 dmu_buf_rele(ds->ds_dbuf, ds);
428870ff
BB
519 }
520
e8b96c60 521 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
34dc7c2f 522 dsl_dir_sync(dd, tx);
e8b96c60 523 }
b128c09f 524
29809a6c
MA
525 /*
526 * The MOS's space is accounted for in the pool/$MOS
527 * (dp_mos_dir). We can't modify the mos while we're syncing
528 * it, so we remember the deltas and apply them here.
529 */
530 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
531 dp->dp_mos_uncompressed_delta != 0) {
532 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
533 dp->dp_mos_used_delta,
534 dp->dp_mos_compressed_delta,
535 dp->dp_mos_uncompressed_delta, tx);
536 dp->dp_mos_used_delta = 0;
537 dp->dp_mos_compressed_delta = 0;
538 dp->dp_mos_uncompressed_delta = 0;
539 }
540
428870ff
BB
541 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
542 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
e8b96c60 543 dsl_pool_sync_mos(dp, tx);
34dc7c2f
BB
544 }
545
29809a6c
MA
546 /*
547 * If we modify a dataset in the same txg that we want to destroy it,
548 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
549 * dsl_dir_destroy_check() will fail if there are unexpected holds.
550 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
551 * and clearing the hold on it) before we process the sync_tasks.
552 * The MOS data dirtied by the sync_tasks will be synced on the next
553 * pass.
554 */
29809a6c 555 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 556 dsl_sync_task_t *dst;
29809a6c
MA
557 /*
558 * No more sync tasks should have been added while we
559 * were syncing.
560 */
e8b96c60
MA
561 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
562 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
13fe0198 563 dsl_sync_task_sync(dst, tx);
29809a6c
MA
564 }
565
34dc7c2f 566 dmu_tx_commit(tx);
b128c09f 567
e8b96c60 568 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
34dc7c2f
BB
569}
570
571void
428870ff 572dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 573{
29809a6c 574 zilog_t *zilog;
34dc7c2f 575
29809a6c 576 while ((zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg))) {
e8b96c60 577 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
29809a6c
MA
578 zil_clean(zilog, txg);
579 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
580 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 581 }
428870ff 582 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
583}
584
585/*
586 * TRUE if the current thread is the tx_sync_thread or if we
587 * are being called from SPA context during pool initialization.
588 */
589int
590dsl_pool_sync_context(dsl_pool_t *dp)
591{
592 return (curthread == dp->dp_tx.tx_sync_thread ||
9ae529ec 593 spa_is_initializing(dp->dp_spa));
34dc7c2f
BB
594}
595
596uint64_t
597dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
598{
599 uint64_t space, resv;
600
601 /*
602 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
603 * efficiency.
604 * XXX The intent log is not accounted for, so it must fit
605 * within this slop.
606 *
607 * If we're trying to assess whether it's OK to do a free,
608 * cut the reservation in half to allow forward progress
609 * (e.g. make it possible to rm(1) files from a full pool).
610 */
611 space = spa_get_dspace(dp->dp_spa);
612 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
613 if (netfree)
614 resv >>= 1;
615
616 return (space - resv);
617}
618
e8b96c60
MA
619boolean_t
620dsl_pool_need_dirty_delay(dsl_pool_t *dp)
34dc7c2f 621{
e8b96c60
MA
622 uint64_t delay_min_bytes =
623 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
624 boolean_t rv;
34dc7c2f 625
e8b96c60
MA
626 mutex_enter(&dp->dp_lock);
627 if (dp->dp_dirty_total > zfs_dirty_data_sync)
628 txg_kick(dp);
629 rv = (dp->dp_dirty_total > delay_min_bytes);
630 mutex_exit(&dp->dp_lock);
631 return (rv);
34dc7c2f
BB
632}
633
634void
e8b96c60 635dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
34dc7c2f 636{
e8b96c60
MA
637 if (space > 0) {
638 mutex_enter(&dp->dp_lock);
639 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
640 dsl_pool_dirty_delta(dp, space);
641 mutex_exit(&dp->dp_lock);
642 }
34dc7c2f
BB
643}
644
645void
e8b96c60 646dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
34dc7c2f 647{
e8b96c60
MA
648 ASSERT3S(space, >=, 0);
649 if (space == 0)
34dc7c2f
BB
650 return;
651
e8b96c60
MA
652 mutex_enter(&dp->dp_lock);
653 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
654 /* XXX writing something we didn't dirty? */
655 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
34dc7c2f 656 }
e8b96c60
MA
657 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
658 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
659 ASSERT3U(dp->dp_dirty_total, >=, space);
660 dsl_pool_dirty_delta(dp, -space);
661 mutex_exit(&dp->dp_lock);
34dc7c2f 662}
b128c09f
BB
663
664/* ARGSUSED */
665static int
13fe0198 666upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
667{
668 dmu_tx_t *tx = arg;
669 dsl_dataset_t *ds, *prev = NULL;
670 int err;
b128c09f 671
13fe0198 672 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
673 if (err)
674 return (err);
675
676 while (ds->ds_phys->ds_prev_snap_obj != 0) {
677 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
678 FTAG, &prev);
679 if (err) {
680 dsl_dataset_rele(ds, FTAG);
681 return (err);
682 }
683
684 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
685 break;
686 dsl_dataset_rele(ds, FTAG);
687 ds = prev;
688 prev = NULL;
689 }
690
691 if (prev == NULL) {
692 prev = dp->dp_origin_snap;
693
694 /*
695 * The $ORIGIN can't have any data, or the accounting
696 * will be wrong.
697 */
13fe0198 698 ASSERT0(prev->ds_phys->ds_bp.blk_birth);
b128c09f
BB
699
700 /* The origin doesn't get attached to itself */
701 if (ds->ds_object == prev->ds_object) {
702 dsl_dataset_rele(ds, FTAG);
703 return (0);
704 }
705
706 dmu_buf_will_dirty(ds->ds_dbuf, tx);
707 ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
708 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
709
710 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
711 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
712
713 dmu_buf_will_dirty(prev->ds_dbuf, tx);
714 prev->ds_phys->ds_num_children++;
715
716 if (ds->ds_phys->ds_next_snap_obj == 0) {
717 ASSERT(ds->ds_prev == NULL);
13fe0198 718 VERIFY0(dsl_dataset_hold_obj(dp,
b128c09f
BB
719 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
720 }
721 }
722
13fe0198
MA
723 ASSERT3U(ds->ds_dir->dd_phys->dd_origin_obj, ==, prev->ds_object);
724 ASSERT3U(ds->ds_phys->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f
BB
725
726 if (prev->ds_phys->ds_next_clones_obj == 0) {
428870ff 727 dmu_buf_will_dirty(prev->ds_dbuf, tx);
b128c09f
BB
728 prev->ds_phys->ds_next_clones_obj =
729 zap_create(dp->dp_meta_objset,
730 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
731 }
13fe0198 732 VERIFY0(zap_add_int(dp->dp_meta_objset,
b128c09f
BB
733 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
734
735 dsl_dataset_rele(ds, FTAG);
736 if (prev != dp->dp_origin_snap)
737 dsl_dataset_rele(prev, FTAG);
738 return (0);
739}
740
741void
742dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
743{
744 ASSERT(dmu_tx_is_syncing(tx));
745 ASSERT(dp->dp_origin_snap != NULL);
746
13fe0198 747 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
428870ff
BB
748 tx, DS_FIND_CHILDREN));
749}
750
751/* ARGSUSED */
752static int
13fe0198 753upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
754{
755 dmu_tx_t *tx = arg;
428870ff
BB
756 objset_t *mos = dp->dp_meta_objset;
757
13fe0198 758 if (ds->ds_dir->dd_phys->dd_origin_obj != 0) {
428870ff
BB
759 dsl_dataset_t *origin;
760
13fe0198 761 VERIFY0(dsl_dataset_hold_obj(dp,
428870ff
BB
762 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin));
763
764 if (origin->ds_dir->dd_phys->dd_clones == 0) {
765 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
766 origin->ds_dir->dd_phys->dd_clones = zap_create(mos,
767 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
768 }
769
13fe0198
MA
770 VERIFY0(zap_add_int(dp->dp_meta_objset,
771 origin->ds_dir->dd_phys->dd_clones, ds->ds_object, tx));
428870ff
BB
772
773 dsl_dataset_rele(origin, FTAG);
774 }
428870ff
BB
775 return (0);
776}
777
778void
779dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
780{
428870ff
BB
781 uint64_t obj;
782
d6320ddb
BB
783 ASSERT(dmu_tx_is_syncing(tx));
784
428870ff 785 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 786 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
787 FREE_DIR_NAME, &dp->dp_free_dir));
788
789 /*
790 * We can't use bpobj_alloc(), because spa_version() still
791 * returns the old version, and we need a new-version bpobj with
792 * subobj support. So call dmu_object_alloc() directly.
793 */
794 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
795 SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 796 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 797 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 798 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 799
13fe0198 800 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
428870ff 801 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
b128c09f
BB
802}
803
804void
805dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
806{
807 uint64_t dsobj;
808 dsl_dataset_t *ds;
809
810 ASSERT(dmu_tx_is_syncing(tx));
811 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 812 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
813
814 /* create the origin dir, ds, & snap-ds */
b128c09f
BB
815 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
816 NULL, 0, kcred, tx);
13fe0198
MA
817 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
818 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
819 VERIFY0(dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
b128c09f
BB
820 dp, &dp->dp_origin_snap));
821 dsl_dataset_rele(ds, FTAG);
b128c09f 822}
9babb374
BB
823
824taskq_t *
3558fd73 825dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 826{
3558fd73 827 return (dp->dp_iput_taskq);
9babb374 828}
428870ff
BB
829
830/*
831 * Walk through the pool-wide zap object of temporary snapshot user holds
832 * and release them.
833 */
834void
835dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
836{
837 zap_attribute_t za;
838 zap_cursor_t zc;
839 objset_t *mos = dp->dp_meta_objset;
840 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
95fd54a1 841 nvlist_t *holds;
428870ff
BB
842
843 if (zapobj == 0)
844 return;
845 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
846
95fd54a1
SH
847 holds = fnvlist_alloc();
848
428870ff
BB
849 for (zap_cursor_init(&zc, mos, zapobj);
850 zap_cursor_retrieve(&zc, &za) == 0;
851 zap_cursor_advance(&zc)) {
852 char *htag;
95fd54a1 853 nvlist_t *tags;
428870ff
BB
854
855 htag = strchr(za.za_name, '-');
856 *htag = '\0';
857 ++htag;
95fd54a1
SH
858 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
859 tags = fnvlist_alloc();
860 fnvlist_add_boolean(tags, htag);
861 fnvlist_add_nvlist(holds, za.za_name, tags);
862 fnvlist_free(tags);
863 } else {
864 fnvlist_add_boolean(tags, htag);
865 }
428870ff 866 }
95fd54a1
SH
867 dsl_dataset_user_release_tmp(dp, holds);
868 fnvlist_free(holds);
428870ff
BB
869 zap_cursor_fini(&zc);
870}
871
872/*
873 * Create the pool-wide zap object for storing temporary snapshot holds.
874 */
875void
876dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
877{
878 objset_t *mos = dp->dp_meta_objset;
879
880 ASSERT(dp->dp_tmp_userrefs_obj == 0);
881 ASSERT(dmu_tx_is_syncing(tx));
882
9ae529ec
CS
883 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
884 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
885}
886
887static int
888dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 889 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
890{
891 objset_t *mos = dp->dp_meta_objset;
892 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
893 char *name;
894 int error;
895
896 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
897 ASSERT(dmu_tx_is_syncing(tx));
898
899 /*
900 * If the pool was created prior to SPA_VERSION_USERREFS, the
901 * zap object for temporary holds might not exist yet.
902 */
903 if (zapobj == 0) {
904 if (holding) {
905 dsl_pool_user_hold_create_obj(dp, tx);
906 zapobj = dp->dp_tmp_userrefs_obj;
907 } else {
2e528b49 908 return (SET_ERROR(ENOENT));
428870ff
BB
909 }
910 }
911
912 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
913 if (holding)
13fe0198 914 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
915 else
916 error = zap_remove(mos, zapobj, name, tx);
917 strfree(name);
918
919 return (error);
920}
921
922/*
923 * Add a temporary hold for the given dataset object and tag.
924 */
925int
926dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 927 uint64_t now, dmu_tx_t *tx)
428870ff
BB
928{
929 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
930}
931
932/*
933 * Release a temporary hold for the given dataset object and tag.
934 */
935int
936dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
937 dmu_tx_t *tx)
938{
13fe0198 939 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
940 tx, B_FALSE));
941}
c409e464 942
13fe0198
MA
943/*
944 * DSL Pool Configuration Lock
945 *
946 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
947 * creation / destruction / rename / property setting). It must be held for
948 * read to hold a dataset or dsl_dir. I.e. you must call
949 * dsl_pool_config_enter() or dsl_pool_hold() before calling
950 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
951 * must be held continuously until all datasets and dsl_dirs are released.
952 *
953 * The only exception to this rule is that if a "long hold" is placed on
954 * a dataset, then the dp_config_rwlock may be dropped while the dataset
955 * is still held. The long hold will prevent the dataset from being
956 * destroyed -- the destroy will fail with EBUSY. A long hold can be
957 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
958 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
959 *
960 * Legitimate long-holders (including owners) should be long-running, cancelable
961 * tasks that should cause "zfs destroy" to fail. This includes DMU
962 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
963 * "zfs send", and "zfs diff". There are several other long-holders whose
964 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
965 *
966 * The usual formula for long-holding would be:
967 * dsl_pool_hold()
968 * dsl_dataset_hold()
969 * ... perform checks ...
970 * dsl_dataset_long_hold()
971 * dsl_pool_rele()
972 * ... perform long-running task ...
973 * dsl_dataset_long_rele()
974 * dsl_dataset_rele()
975 *
976 * Note that when the long hold is released, the dataset is still held but
977 * the pool is not held. The dataset may change arbitrarily during this time
978 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
979 * dataset except release it.
980 *
981 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
982 * or modifying operations.
983 *
984 * Modifying operations should generally use dsl_sync_task(). The synctask
985 * infrastructure enforces proper locking strategy with respect to the
986 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
987 *
988 * Read-only operations will manually hold the pool, then the dataset, obtain
989 * information from the dataset, then release the pool and dataset.
990 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
991 * hold/rele.
992 */
993
994int
995dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
996{
997 spa_t *spa;
998 int error;
999
1000 error = spa_open(name, &spa, tag);
1001 if (error == 0) {
1002 *dp = spa_get_dsl(spa);
1003 dsl_pool_config_enter(*dp, tag);
1004 }
1005 return (error);
1006}
1007
1008void
1009dsl_pool_rele(dsl_pool_t *dp, void *tag)
1010{
1011 dsl_pool_config_exit(dp, tag);
1012 spa_close(dp->dp_spa, tag);
1013}
1014
1015void
1016dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1017{
1018 /*
1019 * We use a "reentrant" reader-writer lock, but not reentrantly.
1020 *
1021 * The rrwlock can (with the track_all flag) track all reading threads,
1022 * which is very useful for debugging which code path failed to release
1023 * the lock, and for verifying that the *current* thread does hold
1024 * the lock.
1025 *
1026 * (Unlike a rwlock, which knows that N threads hold it for
1027 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1028 * if any thread holds it for read, even if this thread doesn't).
1029 */
1030 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1031 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1032}
1033
1034void
1035dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1036{
1037 rrw_exit(&dp->dp_config_rwlock, tag);
1038}
1039
1040boolean_t
1041dsl_pool_config_held(dsl_pool_t *dp)
1042{
1043 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1044}
1045
c409e464 1046#if defined(_KERNEL) && defined(HAVE_SPL)
40a806df
NB
1047EXPORT_SYMBOL(dsl_pool_config_enter);
1048EXPORT_SYMBOL(dsl_pool_config_exit);
1049
d1d7e268 1050/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1051module_param(zfs_dirty_data_max_percent, int, 0444);
1052MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
c409e464 1053
d1d7e268 1054/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1055module_param(zfs_dirty_data_max_max_percent, int, 0444);
1056MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
d1d7e268 1057 "zfs_dirty_data_max upper bound as % of RAM");
c409e464 1058
e8b96c60
MA
1059module_param(zfs_delay_min_dirty_percent, int, 0644);
1060MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
c409e464 1061
e8b96c60
MA
1062module_param(zfs_dirty_data_max, ulong, 0644);
1063MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
c409e464 1064
d1d7e268 1065/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
e8b96c60
MA
1066module_param(zfs_dirty_data_max_max, ulong, 0444);
1067MODULE_PARM_DESC(zfs_dirty_data_max_max,
d1d7e268 1068 "zfs_dirty_data_max upper bound in bytes");
c409e464 1069
e8b96c60
MA
1070module_param(zfs_dirty_data_sync, ulong, 0644);
1071MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
c409e464 1072
e8b96c60
MA
1073module_param(zfs_delay_scale, ulong, 0644);
1074MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
c409e464 1075#endif