]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
cstyle: Resolve C style issues
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 23 * Copyright (c) 2013 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f
BB
27#include <sys/dsl_pool.h>
28#include <sys/dsl_dataset.h>
428870ff 29#include <sys/dsl_prop.h>
34dc7c2f
BB
30#include <sys/dsl_dir.h>
31#include <sys/dsl_synctask.h>
428870ff
BB
32#include <sys/dsl_scan.h>
33#include <sys/dnode.h>
34dc7c2f
BB
34#include <sys/dmu_tx.h>
35#include <sys/dmu_objset.h>
36#include <sys/arc.h>
37#include <sys/zap.h>
38#include <sys/zio.h>
39#include <sys/zfs_context.h>
40#include <sys/fs/zfs.h>
b128c09f
BB
41#include <sys/zfs_znode.h>
42#include <sys/spa_impl.h>
428870ff 43#include <sys/dsl_deadlist.h>
9ae529ec
CS
44#include <sys/bptree.h>
45#include <sys/zfeature.h>
29809a6c 46#include <sys/zil_impl.h>
13fe0198 47#include <sys/dsl_userhold.h>
34dc7c2f 48
e8b96c60
MA
49/*
50 * ZFS Write Throttle
51 * ------------------
52 *
53 * ZFS must limit the rate of incoming writes to the rate at which it is able
54 * to sync data modifications to the backend storage. Throttling by too much
55 * creates an artificial limit; throttling by too little can only be sustained
56 * for short periods and would lead to highly lumpy performance. On a per-pool
57 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
58 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
59 * of dirty data decreases. When the amount of dirty data exceeds a
60 * predetermined threshold further modifications are blocked until the amount
61 * of dirty data decreases (as data is synced out).
62 *
63 * The limit on dirty data is tunable, and should be adjusted according to
64 * both the IO capacity and available memory of the system. The larger the
65 * window, the more ZFS is able to aggregate and amortize metadata (and data)
66 * changes. However, memory is a limited resource, and allowing for more dirty
67 * data comes at the cost of keeping other useful data in memory (for example
68 * ZFS data cached by the ARC).
69 *
70 * Implementation
71 *
72 * As buffers are modified dsl_pool_willuse_space() increments both the per-
73 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
74 * dirty space used; dsl_pool_dirty_space() decrements those values as data
75 * is synced out from dsl_pool_sync(). While only the poolwide value is
76 * relevant, the per-txg value is useful for debugging. The tunable
77 * zfs_dirty_data_max determines the dirty space limit. Once that value is
78 * exceeded, new writes are halted until space frees up.
79 *
80 * The zfs_dirty_data_sync tunable dictates the threshold at which we
81 * ensure that there is a txg syncing (see the comment in txg.c for a full
82 * description of transaction group stages).
83 *
84 * The IO scheduler uses both the dirty space limit and current amount of
85 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
86 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
87 *
88 * The delay is also calculated based on the amount of dirty data. See the
89 * comment above dmu_tx_delay() for details.
90 */
91
92/*
93 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
94 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
95 * parameter.
96 */
97unsigned long zfs_dirty_data_max = 0;
98unsigned long zfs_dirty_data_max_max = 0;
99int zfs_dirty_data_max_percent = 10;
100int zfs_dirty_data_max_max_percent = 25;
b128c09f 101
e8b96c60
MA
102/*
103 * If there is at least this much dirty data, push out a txg.
104 */
105unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024;
34dc7c2f 106
e8b96c60
MA
107/*
108 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
109 * and delay each transaction.
110 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
111 */
112int zfs_delay_min_dirty_percent = 60;
b128c09f 113
e8b96c60
MA
114/*
115 * This controls how quickly the delay approaches infinity.
116 * Larger values cause it to delay more for a given amount of dirty data.
117 * Therefore larger values will cause there to be less dirty data for a
118 * given throughput.
119 *
120 * For the smoothest delay, this value should be about 1 billion divided
121 * by the maximum number of operations per second. This will smoothly
122 * handle between 10x and 1/10th this number.
123 *
124 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
125 * multiply in dmu_tx_delay().
126 */
127unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
b128c09f 128
63fd3c6c
AL
129hrtime_t zfs_throttle_delay = MSEC2NSEC(10);
130hrtime_t zfs_throttle_resolution = MSEC2NSEC(10);
131
428870ff 132int
b128c09f 133dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
134{
135 uint64_t obj;
136 int err;
137
138 err = zap_lookup(dp->dp_meta_objset,
139 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
b128c09f 140 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
141 if (err)
142 return (err);
143
13fe0198 144 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
145}
146
147static dsl_pool_t *
148dsl_pool_open_impl(spa_t *spa, uint64_t txg)
149{
150 dsl_pool_t *dp;
151 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
152
153 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
154 dp->dp_spa = spa;
155 dp->dp_meta_rootbp = *bp;
13fe0198 156 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f
BB
157 txg_init(dp, txg);
158
159 txg_list_create(&dp->dp_dirty_datasets,
160 offsetof(dsl_dataset_t, ds_dirty_link));
29809a6c
MA
161 txg_list_create(&dp->dp_dirty_zilogs,
162 offsetof(zilog_t, zl_dirty_link));
34dc7c2f
BB
163 txg_list_create(&dp->dp_dirty_dirs,
164 offsetof(dsl_dir_t, dd_dirty_link));
165 txg_list_create(&dp->dp_sync_tasks,
13fe0198 166 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f
BB
167
168 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
e8b96c60 169 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 170
3558fd73 171 dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri,
9babb374
BB
172 1, 4, 0);
173
34dc7c2f
BB
174 return (dp);
175}
176
177int
9ae529ec 178dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
179{
180 int err;
181 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec
CS
182
183 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
184 &dp->dp_meta_objset);
185 if (err != 0)
186 dsl_pool_close(dp);
187 else
188 *dpp = dp;
189
190 return (err);
191}
192
193int
194dsl_pool_open(dsl_pool_t *dp)
195{
196 int err;
b128c09f
BB
197 dsl_dir_t *dd;
198 dsl_dataset_t *ds;
428870ff 199 uint64_t obj;
34dc7c2f 200
13fe0198 201 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
202 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
203 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
204 &dp->dp_root_dir_obj);
205 if (err)
206 goto out;
207
13fe0198 208 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
209 NULL, dp, &dp->dp_root_dir);
210 if (err)
211 goto out;
212
b128c09f 213 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
214 if (err)
215 goto out;
216
9ae529ec 217 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
218 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
219 if (err)
220 goto out;
221 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
222 FTAG, &ds);
9babb374
BB
223 if (err == 0) {
224 err = dsl_dataset_hold_obj(dp,
225 ds->ds_phys->ds_prev_snap_obj, dp,
226 &dp->dp_origin_snap);
227 dsl_dataset_rele(ds, FTAG);
228 }
13fe0198 229 dsl_dir_rele(dd, dp);
b128c09f
BB
230 if (err)
231 goto out;
b128c09f
BB
232 }
233
9ae529ec 234 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
235 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
236 &dp->dp_free_dir);
b128c09f
BB
237 if (err)
238 goto out;
428870ff 239
b128c09f 240 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 241 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
242 if (err)
243 goto out;
13fe0198 244 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 245 dp->dp_meta_objset, obj));
b128c09f
BB
246 }
247
9ae529ec
CS
248 if (spa_feature_is_active(dp->dp_spa,
249 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
250 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
251 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
252 &dp->dp_bptree_obj);
253 if (err != 0)
254 goto out;
255 }
256
753c3839
MA
257 if (spa_feature_is_active(dp->dp_spa,
258 &spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ])) {
259 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
260 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
261 &dp->dp_empty_bpobj);
262 if (err != 0)
263 goto out;
264 }
265
428870ff
BB
266 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
267 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
268 &dp->dp_tmp_userrefs_obj);
269 if (err == ENOENT)
270 err = 0;
271 if (err)
272 goto out;
273
9ae529ec 274 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 275
34dc7c2f 276out:
13fe0198 277 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
278 return (err);
279}
280
281void
282dsl_pool_close(dsl_pool_t *dp)
283{
b128c09f 284 /*
e8b96c60
MA
285 * Drop our references from dsl_pool_open().
286 *
b128c09f
BB
287 * Since we held the origin_snap from "syncing" context (which
288 * includes pool-opening context), it actually only got a "ref"
289 * and not a hold, so just drop that here.
290 */
291 if (dp->dp_origin_snap)
13fe0198 292 dsl_dataset_rele(dp->dp_origin_snap, dp);
34dc7c2f 293 if (dp->dp_mos_dir)
13fe0198 294 dsl_dir_rele(dp->dp_mos_dir, dp);
428870ff 295 if (dp->dp_free_dir)
13fe0198 296 dsl_dir_rele(dp->dp_free_dir, dp);
34dc7c2f 297 if (dp->dp_root_dir)
13fe0198 298 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 299
428870ff
BB
300 bpobj_close(&dp->dp_free_bpobj);
301
34dc7c2f
BB
302 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
303 if (dp->dp_meta_objset)
428870ff 304 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
305
306 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 307 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 308 txg_list_destroy(&dp->dp_sync_tasks);
34dc7c2f 309 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f
BB
310
311 arc_flush(dp->dp_spa);
312 txg_fini(dp);
428870ff 313 dsl_scan_fini(dp);
13fe0198 314 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 315 mutex_destroy(&dp->dp_lock);
3558fd73 316 taskq_destroy(dp->dp_iput_taskq);
b128c09f
BB
317 if (dp->dp_blkstats)
318 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
34dc7c2f
BB
319 kmem_free(dp, sizeof (dsl_pool_t));
320}
321
322dsl_pool_t *
b128c09f 323dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
34dc7c2f
BB
324{
325 int err;
326 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
327 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
428870ff 328 objset_t *os;
b128c09f 329 dsl_dataset_t *ds;
428870ff 330 uint64_t obj;
b128c09f 331
13fe0198
MA
332 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
333
b128c09f 334 /* create and open the MOS (meta-objset) */
428870ff
BB
335 dp->dp_meta_objset = dmu_objset_create_impl(spa,
336 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
34dc7c2f
BB
337
338 /* create the pool directory */
339 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
340 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 341 ASSERT0(err);
34dc7c2f 342
428870ff 343 /* Initialize scan structures */
13fe0198 344 VERIFY0(dsl_scan_init(dp, txg));
428870ff 345
34dc7c2f 346 /* create and open the root dir */
b128c09f 347 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 348 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
349 NULL, dp, &dp->dp_root_dir));
350
351 /* create and open the meta-objset dir */
b128c09f 352 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 353 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
354 MOS_DIR_NAME, &dp->dp_mos_dir));
355
428870ff
BB
356 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
357 /* create and open the free dir */
358 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
359 FREE_DIR_NAME, tx);
13fe0198 360 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
361 FREE_DIR_NAME, &dp->dp_free_dir));
362
363 /* create and open the free_bplist */
364 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx);
365 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
366 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 367 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
368 dp->dp_meta_objset, obj));
369 }
370
b128c09f
BB
371 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
372 dsl_pool_create_origin(dp, tx);
373
374 /* create the root dataset */
428870ff 375 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
b128c09f
BB
376
377 /* create the root objset */
13fe0198 378 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
0fe3d820
BB
379 VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds,
380 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx)));
b128c09f 381#ifdef _KERNEL
428870ff 382 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f
BB
383#endif
384 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
385
386 dmu_tx_commit(tx);
387
13fe0198
MA
388 rrw_exit(&dp->dp_config_rwlock, FTAG);
389
34dc7c2f
BB
390 return (dp);
391}
392
29809a6c
MA
393/*
394 * Account for the meta-objset space in its placeholder dsl_dir.
395 */
396void
397dsl_pool_mos_diduse_space(dsl_pool_t *dp,
398 int64_t used, int64_t comp, int64_t uncomp)
399{
400 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
401 mutex_enter(&dp->dp_lock);
402 dp->dp_mos_used_delta += used;
403 dp->dp_mos_compressed_delta += comp;
404 dp->dp_mos_uncompressed_delta += uncomp;
405 mutex_exit(&dp->dp_lock);
406}
407
428870ff
BB
408static int
409deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
410{
411 dsl_deadlist_t *dl = arg;
412 dsl_deadlist_insert(dl, bp, tx);
413 return (0);
414}
415
e8b96c60
MA
416static void
417dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
418{
419 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
420 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
421 VERIFY0(zio_wait(zio));
422 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
423 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
424}
425
426static void
427dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
428{
429 ASSERT(MUTEX_HELD(&dp->dp_lock));
430
431 if (delta < 0)
432 ASSERT3U(-delta, <=, dp->dp_dirty_total);
433
434 dp->dp_dirty_total += delta;
435
436 /*
437 * Note: we signal even when increasing dp_dirty_total.
438 * This ensures forward progress -- each thread wakes the next waiter.
439 */
440 if (dp->dp_dirty_total <= zfs_dirty_data_max)
441 cv_signal(&dp->dp_spaceavail_cv);
442}
443
34dc7c2f
BB
444void
445dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
446{
447 zio_t *zio;
448 dmu_tx_t *tx;
449 dsl_dir_t *dd;
450 dsl_dataset_t *ds;
428870ff 451 objset_t *mos = dp->dp_meta_objset;
29809a6c
MA
452 list_t synced_datasets;
453
454 list_create(&synced_datasets, sizeof (dsl_dataset_t),
455 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f
BB
456
457 tx = dmu_tx_create_assigned(dp, txg);
458
e8b96c60
MA
459 /*
460 * Write out all dirty blocks of dirty datasets.
461 */
34dc7c2f 462 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 463 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
464 /*
465 * We must not sync any non-MOS datasets twice, because
466 * we may have taken a snapshot of them. However, we
467 * may sync newly-created datasets on pass 2.
468 */
469 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 470 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
471 dsl_dataset_sync(ds, zio, tx);
472 }
e8b96c60 473 VERIFY0(zio_wait(zio));
9babb374 474
e8b96c60
MA
475 /*
476 * We have written all of the accounted dirty data, so our
477 * dp_space_towrite should now be zero. However, some seldom-used
478 * code paths do not adhere to this (e.g. dbuf_undirty(), also
479 * rounding error in dbuf_write_physdone).
480 * Shore up the accounting of any dirtied space now.
481 */
482 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
34dc7c2f 483
29809a6c
MA
484 /*
485 * After the data blocks have been written (ensured by the zio_wait()
486 * above), update the user/group space accounting.
487 */
e8b96c60
MA
488 for (ds = list_head(&synced_datasets); ds != NULL;
489 ds = list_next(&synced_datasets, ds)) {
428870ff 490 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
e8b96c60 491 }
9babb374
BB
492
493 /*
494 * Sync the datasets again to push out the changes due to
428870ff 495 * userspace updates. This must be done before we process the
29809a6c
MA
496 * sync tasks, so that any snapshots will have the correct
497 * user accounting information (and we won't get confused
498 * about which blocks are part of the snapshot).
9babb374
BB
499 */
500 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
e8b96c60 501 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
9babb374
BB
502 ASSERT(list_link_active(&ds->ds_synced_link));
503 dmu_buf_rele(ds->ds_dbuf, ds);
504 dsl_dataset_sync(ds, zio, tx);
505 }
e8b96c60 506 VERIFY0(zio_wait(zio));
9babb374 507
428870ff 508 /*
29809a6c
MA
509 * Now that the datasets have been completely synced, we can
510 * clean up our in-memory structures accumulated while syncing:
511 *
512 * - move dead blocks from the pending deadlist to the on-disk deadlist
29809a6c 513 * - release hold from dsl_dataset_dirty()
428870ff 514 */
e8b96c60 515 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
29809a6c 516 ASSERTV(objset_t *os = ds->ds_objset);
428870ff
BB
517 bplist_iterate(&ds->ds_pending_deadlist,
518 deadlist_enqueue_cb, &ds->ds_deadlist, tx);
29809a6c
MA
519 ASSERT(!dmu_objset_is_dirty(os, txg));
520 dmu_buf_rele(ds->ds_dbuf, ds);
428870ff
BB
521 }
522
e8b96c60 523 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
34dc7c2f 524 dsl_dir_sync(dd, tx);
e8b96c60 525 }
b128c09f 526
29809a6c
MA
527 /*
528 * The MOS's space is accounted for in the pool/$MOS
529 * (dp_mos_dir). We can't modify the mos while we're syncing
530 * it, so we remember the deltas and apply them here.
531 */
532 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
533 dp->dp_mos_uncompressed_delta != 0) {
534 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
535 dp->dp_mos_used_delta,
536 dp->dp_mos_compressed_delta,
537 dp->dp_mos_uncompressed_delta, tx);
538 dp->dp_mos_used_delta = 0;
539 dp->dp_mos_compressed_delta = 0;
540 dp->dp_mos_uncompressed_delta = 0;
541 }
542
428870ff
BB
543 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
544 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
e8b96c60 545 dsl_pool_sync_mos(dp, tx);
34dc7c2f
BB
546 }
547
29809a6c
MA
548 /*
549 * If we modify a dataset in the same txg that we want to destroy it,
550 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
551 * dsl_dir_destroy_check() will fail if there are unexpected holds.
552 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
553 * and clearing the hold on it) before we process the sync_tasks.
554 * The MOS data dirtied by the sync_tasks will be synced on the next
555 * pass.
556 */
29809a6c 557 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 558 dsl_sync_task_t *dst;
29809a6c
MA
559 /*
560 * No more sync tasks should have been added while we
561 * were syncing.
562 */
e8b96c60
MA
563 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
564 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
13fe0198 565 dsl_sync_task_sync(dst, tx);
29809a6c
MA
566 }
567
34dc7c2f 568 dmu_tx_commit(tx);
b128c09f 569
e8b96c60 570 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
34dc7c2f
BB
571}
572
573void
428870ff 574dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 575{
29809a6c 576 zilog_t *zilog;
34dc7c2f 577
29809a6c 578 while ((zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg))) {
e8b96c60 579 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
29809a6c
MA
580 zil_clean(zilog, txg);
581 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
582 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 583 }
428870ff 584 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
585}
586
587/*
588 * TRUE if the current thread is the tx_sync_thread or if we
589 * are being called from SPA context during pool initialization.
590 */
591int
592dsl_pool_sync_context(dsl_pool_t *dp)
593{
594 return (curthread == dp->dp_tx.tx_sync_thread ||
9ae529ec 595 spa_is_initializing(dp->dp_spa));
34dc7c2f
BB
596}
597
598uint64_t
599dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
600{
601 uint64_t space, resv;
602
603 /*
604 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
605 * efficiency.
606 * XXX The intent log is not accounted for, so it must fit
607 * within this slop.
608 *
609 * If we're trying to assess whether it's OK to do a free,
610 * cut the reservation in half to allow forward progress
611 * (e.g. make it possible to rm(1) files from a full pool).
612 */
613 space = spa_get_dspace(dp->dp_spa);
614 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
615 if (netfree)
616 resv >>= 1;
617
618 return (space - resv);
619}
620
e8b96c60
MA
621boolean_t
622dsl_pool_need_dirty_delay(dsl_pool_t *dp)
34dc7c2f 623{
e8b96c60
MA
624 uint64_t delay_min_bytes =
625 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
626 boolean_t rv;
34dc7c2f 627
e8b96c60
MA
628 mutex_enter(&dp->dp_lock);
629 if (dp->dp_dirty_total > zfs_dirty_data_sync)
630 txg_kick(dp);
631 rv = (dp->dp_dirty_total > delay_min_bytes);
632 mutex_exit(&dp->dp_lock);
633 return (rv);
34dc7c2f
BB
634}
635
636void
e8b96c60 637dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
34dc7c2f 638{
e8b96c60
MA
639 if (space > 0) {
640 mutex_enter(&dp->dp_lock);
641 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
642 dsl_pool_dirty_delta(dp, space);
643 mutex_exit(&dp->dp_lock);
644 }
34dc7c2f
BB
645}
646
647void
e8b96c60 648dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
34dc7c2f 649{
e8b96c60
MA
650 ASSERT3S(space, >=, 0);
651 if (space == 0)
34dc7c2f
BB
652 return;
653
e8b96c60
MA
654 mutex_enter(&dp->dp_lock);
655 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
656 /* XXX writing something we didn't dirty? */
657 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
34dc7c2f 658 }
e8b96c60
MA
659 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
660 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
661 ASSERT3U(dp->dp_dirty_total, >=, space);
662 dsl_pool_dirty_delta(dp, -space);
663 mutex_exit(&dp->dp_lock);
34dc7c2f 664}
b128c09f
BB
665
666/* ARGSUSED */
667static int
13fe0198 668upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
669{
670 dmu_tx_t *tx = arg;
671 dsl_dataset_t *ds, *prev = NULL;
672 int err;
b128c09f 673
13fe0198 674 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
675 if (err)
676 return (err);
677
678 while (ds->ds_phys->ds_prev_snap_obj != 0) {
679 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
680 FTAG, &prev);
681 if (err) {
682 dsl_dataset_rele(ds, FTAG);
683 return (err);
684 }
685
686 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
687 break;
688 dsl_dataset_rele(ds, FTAG);
689 ds = prev;
690 prev = NULL;
691 }
692
693 if (prev == NULL) {
694 prev = dp->dp_origin_snap;
695
696 /*
697 * The $ORIGIN can't have any data, or the accounting
698 * will be wrong.
699 */
13fe0198 700 ASSERT0(prev->ds_phys->ds_bp.blk_birth);
b128c09f
BB
701
702 /* The origin doesn't get attached to itself */
703 if (ds->ds_object == prev->ds_object) {
704 dsl_dataset_rele(ds, FTAG);
705 return (0);
706 }
707
708 dmu_buf_will_dirty(ds->ds_dbuf, tx);
709 ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
710 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
711
712 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
713 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
714
715 dmu_buf_will_dirty(prev->ds_dbuf, tx);
716 prev->ds_phys->ds_num_children++;
717
718 if (ds->ds_phys->ds_next_snap_obj == 0) {
719 ASSERT(ds->ds_prev == NULL);
13fe0198 720 VERIFY0(dsl_dataset_hold_obj(dp,
b128c09f
BB
721 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
722 }
723 }
724
13fe0198
MA
725 ASSERT3U(ds->ds_dir->dd_phys->dd_origin_obj, ==, prev->ds_object);
726 ASSERT3U(ds->ds_phys->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f
BB
727
728 if (prev->ds_phys->ds_next_clones_obj == 0) {
428870ff 729 dmu_buf_will_dirty(prev->ds_dbuf, tx);
b128c09f
BB
730 prev->ds_phys->ds_next_clones_obj =
731 zap_create(dp->dp_meta_objset,
732 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
733 }
13fe0198 734 VERIFY0(zap_add_int(dp->dp_meta_objset,
b128c09f
BB
735 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
736
737 dsl_dataset_rele(ds, FTAG);
738 if (prev != dp->dp_origin_snap)
739 dsl_dataset_rele(prev, FTAG);
740 return (0);
741}
742
743void
744dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
745{
746 ASSERT(dmu_tx_is_syncing(tx));
747 ASSERT(dp->dp_origin_snap != NULL);
748
13fe0198 749 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
428870ff
BB
750 tx, DS_FIND_CHILDREN));
751}
752
753/* ARGSUSED */
754static int
13fe0198 755upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
756{
757 dmu_tx_t *tx = arg;
428870ff
BB
758 objset_t *mos = dp->dp_meta_objset;
759
13fe0198 760 if (ds->ds_dir->dd_phys->dd_origin_obj != 0) {
428870ff
BB
761 dsl_dataset_t *origin;
762
13fe0198 763 VERIFY0(dsl_dataset_hold_obj(dp,
428870ff
BB
764 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin));
765
766 if (origin->ds_dir->dd_phys->dd_clones == 0) {
767 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
768 origin->ds_dir->dd_phys->dd_clones = zap_create(mos,
769 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
770 }
771
13fe0198
MA
772 VERIFY0(zap_add_int(dp->dp_meta_objset,
773 origin->ds_dir->dd_phys->dd_clones, ds->ds_object, tx));
428870ff
BB
774
775 dsl_dataset_rele(origin, FTAG);
776 }
428870ff
BB
777 return (0);
778}
779
780void
781dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
782{
428870ff
BB
783 uint64_t obj;
784
d6320ddb
BB
785 ASSERT(dmu_tx_is_syncing(tx));
786
428870ff 787 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 788 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
789 FREE_DIR_NAME, &dp->dp_free_dir));
790
791 /*
792 * We can't use bpobj_alloc(), because spa_version() still
793 * returns the old version, and we need a new-version bpobj with
794 * subobj support. So call dmu_object_alloc() directly.
795 */
796 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
797 SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 798 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 799 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 800 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 801
13fe0198 802 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
428870ff 803 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
b128c09f
BB
804}
805
806void
807dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
808{
809 uint64_t dsobj;
810 dsl_dataset_t *ds;
811
812 ASSERT(dmu_tx_is_syncing(tx));
813 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 814 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
815
816 /* create the origin dir, ds, & snap-ds */
b128c09f
BB
817 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
818 NULL, 0, kcred, tx);
13fe0198
MA
819 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
820 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
821 VERIFY0(dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
b128c09f
BB
822 dp, &dp->dp_origin_snap));
823 dsl_dataset_rele(ds, FTAG);
b128c09f 824}
9babb374
BB
825
826taskq_t *
3558fd73 827dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 828{
3558fd73 829 return (dp->dp_iput_taskq);
9babb374 830}
428870ff
BB
831
832/*
833 * Walk through the pool-wide zap object of temporary snapshot user holds
834 * and release them.
835 */
836void
837dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
838{
839 zap_attribute_t za;
840 zap_cursor_t zc;
841 objset_t *mos = dp->dp_meta_objset;
842 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
95fd54a1 843 nvlist_t *holds;
428870ff
BB
844
845 if (zapobj == 0)
846 return;
847 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
848
95fd54a1
SH
849 holds = fnvlist_alloc();
850
428870ff
BB
851 for (zap_cursor_init(&zc, mos, zapobj);
852 zap_cursor_retrieve(&zc, &za) == 0;
853 zap_cursor_advance(&zc)) {
854 char *htag;
95fd54a1 855 nvlist_t *tags;
428870ff
BB
856
857 htag = strchr(za.za_name, '-');
858 *htag = '\0';
859 ++htag;
95fd54a1
SH
860 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
861 tags = fnvlist_alloc();
862 fnvlist_add_boolean(tags, htag);
863 fnvlist_add_nvlist(holds, za.za_name, tags);
864 fnvlist_free(tags);
865 } else {
866 fnvlist_add_boolean(tags, htag);
867 }
428870ff 868 }
95fd54a1
SH
869 dsl_dataset_user_release_tmp(dp, holds);
870 fnvlist_free(holds);
428870ff
BB
871 zap_cursor_fini(&zc);
872}
873
874/*
875 * Create the pool-wide zap object for storing temporary snapshot holds.
876 */
877void
878dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
879{
880 objset_t *mos = dp->dp_meta_objset;
881
882 ASSERT(dp->dp_tmp_userrefs_obj == 0);
883 ASSERT(dmu_tx_is_syncing(tx));
884
9ae529ec
CS
885 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
886 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
887}
888
889static int
890dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 891 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
892{
893 objset_t *mos = dp->dp_meta_objset;
894 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
895 char *name;
896 int error;
897
898 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
899 ASSERT(dmu_tx_is_syncing(tx));
900
901 /*
902 * If the pool was created prior to SPA_VERSION_USERREFS, the
903 * zap object for temporary holds might not exist yet.
904 */
905 if (zapobj == 0) {
906 if (holding) {
907 dsl_pool_user_hold_create_obj(dp, tx);
908 zapobj = dp->dp_tmp_userrefs_obj;
909 } else {
2e528b49 910 return (SET_ERROR(ENOENT));
428870ff
BB
911 }
912 }
913
914 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
915 if (holding)
13fe0198 916 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
917 else
918 error = zap_remove(mos, zapobj, name, tx);
919 strfree(name);
920
921 return (error);
922}
923
924/*
925 * Add a temporary hold for the given dataset object and tag.
926 */
927int
928dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 929 uint64_t now, dmu_tx_t *tx)
428870ff
BB
930{
931 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
932}
933
934/*
935 * Release a temporary hold for the given dataset object and tag.
936 */
937int
938dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
939 dmu_tx_t *tx)
940{
13fe0198 941 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
942 tx, B_FALSE));
943}
c409e464 944
13fe0198
MA
945/*
946 * DSL Pool Configuration Lock
947 *
948 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
949 * creation / destruction / rename / property setting). It must be held for
950 * read to hold a dataset or dsl_dir. I.e. you must call
951 * dsl_pool_config_enter() or dsl_pool_hold() before calling
952 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
953 * must be held continuously until all datasets and dsl_dirs are released.
954 *
955 * The only exception to this rule is that if a "long hold" is placed on
956 * a dataset, then the dp_config_rwlock may be dropped while the dataset
957 * is still held. The long hold will prevent the dataset from being
958 * destroyed -- the destroy will fail with EBUSY. A long hold can be
959 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
960 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
961 *
962 * Legitimate long-holders (including owners) should be long-running, cancelable
963 * tasks that should cause "zfs destroy" to fail. This includes DMU
964 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
965 * "zfs send", and "zfs diff". There are several other long-holders whose
966 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
967 *
968 * The usual formula for long-holding would be:
969 * dsl_pool_hold()
970 * dsl_dataset_hold()
971 * ... perform checks ...
972 * dsl_dataset_long_hold()
973 * dsl_pool_rele()
974 * ... perform long-running task ...
975 * dsl_dataset_long_rele()
976 * dsl_dataset_rele()
977 *
978 * Note that when the long hold is released, the dataset is still held but
979 * the pool is not held. The dataset may change arbitrarily during this time
980 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
981 * dataset except release it.
982 *
983 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
984 * or modifying operations.
985 *
986 * Modifying operations should generally use dsl_sync_task(). The synctask
987 * infrastructure enforces proper locking strategy with respect to the
988 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
989 *
990 * Read-only operations will manually hold the pool, then the dataset, obtain
991 * information from the dataset, then release the pool and dataset.
992 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
993 * hold/rele.
994 */
995
996int
997dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
998{
999 spa_t *spa;
1000 int error;
1001
1002 error = spa_open(name, &spa, tag);
1003 if (error == 0) {
1004 *dp = spa_get_dsl(spa);
1005 dsl_pool_config_enter(*dp, tag);
1006 }
1007 return (error);
1008}
1009
1010void
1011dsl_pool_rele(dsl_pool_t *dp, void *tag)
1012{
1013 dsl_pool_config_exit(dp, tag);
1014 spa_close(dp->dp_spa, tag);
1015}
1016
1017void
1018dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1019{
1020 /*
1021 * We use a "reentrant" reader-writer lock, but not reentrantly.
1022 *
1023 * The rrwlock can (with the track_all flag) track all reading threads,
1024 * which is very useful for debugging which code path failed to release
1025 * the lock, and for verifying that the *current* thread does hold
1026 * the lock.
1027 *
1028 * (Unlike a rwlock, which knows that N threads hold it for
1029 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1030 * if any thread holds it for read, even if this thread doesn't).
1031 */
1032 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1033 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1034}
1035
1036void
1037dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1038{
1039 rrw_exit(&dp->dp_config_rwlock, tag);
1040}
1041
1042boolean_t
1043dsl_pool_config_held(dsl_pool_t *dp)
1044{
1045 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1046}
1047
c409e464 1048#if defined(_KERNEL) && defined(HAVE_SPL)
40a806df
NB
1049EXPORT_SYMBOL(dsl_pool_config_enter);
1050EXPORT_SYMBOL(dsl_pool_config_exit);
1051
d1d7e268 1052/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1053module_param(zfs_dirty_data_max_percent, int, 0444);
1054MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
c409e464 1055
d1d7e268 1056/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
e8b96c60
MA
1057module_param(zfs_dirty_data_max_max_percent, int, 0444);
1058MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
d1d7e268 1059 "zfs_dirty_data_max upper bound as % of RAM");
c409e464 1060
e8b96c60
MA
1061module_param(zfs_delay_min_dirty_percent, int, 0644);
1062MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
c409e464 1063
e8b96c60
MA
1064module_param(zfs_dirty_data_max, ulong, 0644);
1065MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
c409e464 1066
d1d7e268 1067/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
e8b96c60
MA
1068module_param(zfs_dirty_data_max_max, ulong, 0444);
1069MODULE_PARM_DESC(zfs_dirty_data_max_max,
d1d7e268 1070 "zfs_dirty_data_max upper bound in bytes");
c409e464 1071
e8b96c60
MA
1072module_param(zfs_dirty_data_sync, ulong, 0644);
1073MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
c409e464 1074
e8b96c60
MA
1075module_param(zfs_delay_scale, ulong, 0644);
1076MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
c409e464 1077#endif