]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_pool.c
Illumos #3745, #3811
[mirror_zfs.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 23 * Copyright (c) 2013 by Delphix. All rights reserved.
34dc7c2f
BB
24 */
25
34dc7c2f
BB
26#include <sys/dsl_pool.h>
27#include <sys/dsl_dataset.h>
428870ff 28#include <sys/dsl_prop.h>
34dc7c2f
BB
29#include <sys/dsl_dir.h>
30#include <sys/dsl_synctask.h>
428870ff
BB
31#include <sys/dsl_scan.h>
32#include <sys/dnode.h>
34dc7c2f
BB
33#include <sys/dmu_tx.h>
34#include <sys/dmu_objset.h>
35#include <sys/arc.h>
36#include <sys/zap.h>
37#include <sys/zio.h>
38#include <sys/zfs_context.h>
39#include <sys/fs/zfs.h>
b128c09f
BB
40#include <sys/zfs_znode.h>
41#include <sys/spa_impl.h>
428870ff 42#include <sys/dsl_deadlist.h>
9ae529ec
CS
43#include <sys/bptree.h>
44#include <sys/zfeature.h>
29809a6c 45#include <sys/zil_impl.h>
13fe0198 46#include <sys/dsl_userhold.h>
34dc7c2f
BB
47
48int zfs_no_write_throttle = 0;
b128c09f 49int zfs_write_limit_shift = 3; /* 1/8th of physical memory */
572e2857 50int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */
b128c09f 51
c409e464
BB
52unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */
53unsigned long zfs_write_limit_max = 0; /* max data payload per txg */
54unsigned long zfs_write_limit_inflated = 0;
55unsigned long zfs_write_limit_override = 0;
34dc7c2f 56
b128c09f
BB
57kmutex_t zfs_write_limit_lock;
58
59static pgcnt_t old_physmem = 0;
60
63fd3c6c
AL
61hrtime_t zfs_throttle_delay = MSEC2NSEC(10);
62hrtime_t zfs_throttle_resolution = MSEC2NSEC(10);
63
428870ff 64int
b128c09f 65dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
66{
67 uint64_t obj;
68 int err;
69
70 err = zap_lookup(dp->dp_meta_objset,
71 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
b128c09f 72 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
73 if (err)
74 return (err);
75
13fe0198 76 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
77}
78
79static dsl_pool_t *
80dsl_pool_open_impl(spa_t *spa, uint64_t txg)
81{
82 dsl_pool_t *dp;
83 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
84
85 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
86 dp->dp_spa = spa;
87 dp->dp_meta_rootbp = *bp;
13fe0198 88 rrw_init(&dp->dp_config_rwlock, B_TRUE);
34dc7c2f
BB
89 dp->dp_write_limit = zfs_write_limit_min;
90 txg_init(dp, txg);
91
92 txg_list_create(&dp->dp_dirty_datasets,
93 offsetof(dsl_dataset_t, ds_dirty_link));
29809a6c
MA
94 txg_list_create(&dp->dp_dirty_zilogs,
95 offsetof(zilog_t, zl_dirty_link));
34dc7c2f
BB
96 txg_list_create(&dp->dp_dirty_dirs,
97 offsetof(dsl_dir_t, dd_dirty_link));
98 txg_list_create(&dp->dp_sync_tasks,
13fe0198 99 offsetof(dsl_sync_task_t, dst_node));
34dc7c2f
BB
100
101 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
102
3558fd73 103 dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri,
9babb374
BB
104 1, 4, 0);
105
34dc7c2f
BB
106 return (dp);
107}
108
109int
9ae529ec 110dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
111{
112 int err;
113 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec
CS
114
115 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
116 &dp->dp_meta_objset);
117 if (err != 0)
118 dsl_pool_close(dp);
119 else
120 *dpp = dp;
121
122 return (err);
123}
124
125int
126dsl_pool_open(dsl_pool_t *dp)
127{
128 int err;
b128c09f
BB
129 dsl_dir_t *dd;
130 dsl_dataset_t *ds;
428870ff 131 uint64_t obj;
34dc7c2f 132
13fe0198 133 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
34dc7c2f
BB
134 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
135 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
136 &dp->dp_root_dir_obj);
137 if (err)
138 goto out;
139
13fe0198 140 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
141 NULL, dp, &dp->dp_root_dir);
142 if (err)
143 goto out;
144
b128c09f 145 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
146 if (err)
147 goto out;
148
9ae529ec 149 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
150 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
151 if (err)
152 goto out;
153 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
154 FTAG, &ds);
9babb374
BB
155 if (err == 0) {
156 err = dsl_dataset_hold_obj(dp,
157 ds->ds_phys->ds_prev_snap_obj, dp,
158 &dp->dp_origin_snap);
159 dsl_dataset_rele(ds, FTAG);
160 }
13fe0198 161 dsl_dir_rele(dd, dp);
b128c09f
BB
162 if (err)
163 goto out;
b128c09f
BB
164 }
165
9ae529ec 166 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
167 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
168 &dp->dp_free_dir);
b128c09f
BB
169 if (err)
170 goto out;
428870ff 171
b128c09f 172 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 173 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
174 if (err)
175 goto out;
13fe0198 176 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff 177 dp->dp_meta_objset, obj));
b128c09f
BB
178 }
179
9ae529ec
CS
180 if (spa_feature_is_active(dp->dp_spa,
181 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
182 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
183 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
184 &dp->dp_bptree_obj);
185 if (err != 0)
186 goto out;
187 }
188
753c3839
MA
189 if (spa_feature_is_active(dp->dp_spa,
190 &spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ])) {
191 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
192 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
193 &dp->dp_empty_bpobj);
194 if (err != 0)
195 goto out;
196 }
197
428870ff
BB
198 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
199 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
200 &dp->dp_tmp_userrefs_obj);
201 if (err == ENOENT)
202 err = 0;
203 if (err)
204 goto out;
205
9ae529ec 206 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 207
34dc7c2f 208out:
13fe0198 209 rrw_exit(&dp->dp_config_rwlock, FTAG);
34dc7c2f
BB
210 return (err);
211}
212
213void
214dsl_pool_close(dsl_pool_t *dp)
215{
b128c09f
BB
216 /* drop our references from dsl_pool_open() */
217
218 /*
219 * Since we held the origin_snap from "syncing" context (which
220 * includes pool-opening context), it actually only got a "ref"
221 * and not a hold, so just drop that here.
222 */
223 if (dp->dp_origin_snap)
13fe0198 224 dsl_dataset_rele(dp->dp_origin_snap, dp);
34dc7c2f 225 if (dp->dp_mos_dir)
13fe0198 226 dsl_dir_rele(dp->dp_mos_dir, dp);
428870ff 227 if (dp->dp_free_dir)
13fe0198 228 dsl_dir_rele(dp->dp_free_dir, dp);
34dc7c2f 229 if (dp->dp_root_dir)
13fe0198 230 dsl_dir_rele(dp->dp_root_dir, dp);
34dc7c2f 231
428870ff
BB
232 bpobj_close(&dp->dp_free_bpobj);
233
34dc7c2f
BB
234 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
235 if (dp->dp_meta_objset)
428870ff 236 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
237
238 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 239 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 240 txg_list_destroy(&dp->dp_sync_tasks);
34dc7c2f 241 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f
BB
242
243 arc_flush(dp->dp_spa);
244 txg_fini(dp);
428870ff 245 dsl_scan_fini(dp);
13fe0198 246 rrw_destroy(&dp->dp_config_rwlock);
34dc7c2f 247 mutex_destroy(&dp->dp_lock);
3558fd73 248 taskq_destroy(dp->dp_iput_taskq);
b128c09f
BB
249 if (dp->dp_blkstats)
250 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
34dc7c2f
BB
251 kmem_free(dp, sizeof (dsl_pool_t));
252}
253
254dsl_pool_t *
b128c09f 255dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
34dc7c2f
BB
256{
257 int err;
258 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
259 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
428870ff 260 objset_t *os;
b128c09f 261 dsl_dataset_t *ds;
428870ff 262 uint64_t obj;
b128c09f 263
13fe0198
MA
264 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
265
b128c09f 266 /* create and open the MOS (meta-objset) */
428870ff
BB
267 dp->dp_meta_objset = dmu_objset_create_impl(spa,
268 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
34dc7c2f
BB
269
270 /* create the pool directory */
271 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
272 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c99c9001 273 ASSERT0(err);
34dc7c2f 274
428870ff 275 /* Initialize scan structures */
13fe0198 276 VERIFY0(dsl_scan_init(dp, txg));
428870ff 277
34dc7c2f 278 /* create and open the root dir */
b128c09f 279 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
13fe0198 280 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
281 NULL, dp, &dp->dp_root_dir));
282
283 /* create and open the meta-objset dir */
b128c09f 284 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
13fe0198 285 VERIFY0(dsl_pool_open_special_dir(dp,
b128c09f
BB
286 MOS_DIR_NAME, &dp->dp_mos_dir));
287
428870ff
BB
288 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
289 /* create and open the free dir */
290 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
291 FREE_DIR_NAME, tx);
13fe0198 292 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
293 FREE_DIR_NAME, &dp->dp_free_dir));
294
295 /* create and open the free_bplist */
296 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx);
297 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
298 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
13fe0198 299 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
428870ff
BB
300 dp->dp_meta_objset, obj));
301 }
302
b128c09f
BB
303 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
304 dsl_pool_create_origin(dp, tx);
305
306 /* create the root dataset */
428870ff 307 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
b128c09f
BB
308
309 /* create the root objset */
13fe0198 310 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
0fe3d820
BB
311 VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds,
312 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx)));
b128c09f 313#ifdef _KERNEL
428870ff 314 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f
BB
315#endif
316 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
317
318 dmu_tx_commit(tx);
319
13fe0198
MA
320 rrw_exit(&dp->dp_config_rwlock, FTAG);
321
34dc7c2f
BB
322 return (dp);
323}
324
29809a6c
MA
325/*
326 * Account for the meta-objset space in its placeholder dsl_dir.
327 */
328void
329dsl_pool_mos_diduse_space(dsl_pool_t *dp,
330 int64_t used, int64_t comp, int64_t uncomp)
331{
332 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
333 mutex_enter(&dp->dp_lock);
334 dp->dp_mos_used_delta += used;
335 dp->dp_mos_compressed_delta += comp;
336 dp->dp_mos_uncompressed_delta += uncomp;
337 mutex_exit(&dp->dp_lock);
338}
339
428870ff
BB
340static int
341deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
342{
343 dsl_deadlist_t *dl = arg;
344 dsl_deadlist_insert(dl, bp, tx);
345 return (0);
346}
347
34dc7c2f
BB
348void
349dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
350{
351 zio_t *zio;
352 dmu_tx_t *tx;
353 dsl_dir_t *dd;
354 dsl_dataset_t *ds;
428870ff 355 objset_t *mos = dp->dp_meta_objset;
b128c09f
BB
356 hrtime_t start, write_time;
357 uint64_t data_written;
34dc7c2f 358 int err;
29809a6c
MA
359 list_t synced_datasets;
360
361 list_create(&synced_datasets, sizeof (dsl_dataset_t),
362 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f 363
428870ff
BB
364 /*
365 * We need to copy dp_space_towrite() before doing
13fe0198 366 * dsl_sync_task_sync(), because
428870ff
BB
367 * dsl_dataset_snapshot_reserve_space() will increase
368 * dp_space_towrite but not actually write anything.
369 */
370 data_written = dp->dp_space_towrite[txg & TXG_MASK];
371
34dc7c2f
BB
372 tx = dmu_tx_create_assigned(dp, txg);
373
b128c09f 374 dp->dp_read_overhead = 0;
9babb374
BB
375 start = gethrtime();
376
34dc7c2f 377 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
c65aa5b2 378 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
9babb374
BB
379 /*
380 * We must not sync any non-MOS datasets twice, because
381 * we may have taken a snapshot of them. However, we
382 * may sync newly-created datasets on pass 2.
383 */
384 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 385 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
386 dsl_dataset_sync(ds, zio, tx);
387 }
b128c09f 388 DTRACE_PROBE(pool_sync__1setup);
34dc7c2f 389 err = zio_wait(zio);
9babb374 390
b128c09f 391 write_time = gethrtime() - start;
34dc7c2f 392 ASSERT(err == 0);
b128c09f 393 DTRACE_PROBE(pool_sync__2rootzio);
34dc7c2f 394
29809a6c
MA
395 /*
396 * After the data blocks have been written (ensured by the zio_wait()
397 * above), update the user/group space accounting.
398 */
399 for (ds = list_head(&synced_datasets); ds;
400 ds = list_next(&synced_datasets, ds))
428870ff 401 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
9babb374
BB
402
403 /*
404 * Sync the datasets again to push out the changes due to
428870ff 405 * userspace updates. This must be done before we process the
29809a6c
MA
406 * sync tasks, so that any snapshots will have the correct
407 * user accounting information (and we won't get confused
408 * about which blocks are part of the snapshot).
9babb374
BB
409 */
410 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
c65aa5b2 411 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
9babb374
BB
412 ASSERT(list_link_active(&ds->ds_synced_link));
413 dmu_buf_rele(ds->ds_dbuf, ds);
414 dsl_dataset_sync(ds, zio, tx);
415 }
416 err = zio_wait(zio);
417
428870ff 418 /*
29809a6c
MA
419 * Now that the datasets have been completely synced, we can
420 * clean up our in-memory structures accumulated while syncing:
421 *
422 * - move dead blocks from the pending deadlist to the on-disk deadlist
423 * - clean up zil records
424 * - release hold from dsl_dataset_dirty()
428870ff 425 */
29809a6c
MA
426 while ((ds = list_remove_head(&synced_datasets))) {
427 ASSERTV(objset_t *os = ds->ds_objset);
428870ff
BB
428 bplist_iterate(&ds->ds_pending_deadlist,
429 deadlist_enqueue_cb, &ds->ds_deadlist, tx);
29809a6c
MA
430 ASSERT(!dmu_objset_is_dirty(os, txg));
431 dmu_buf_rele(ds->ds_dbuf, ds);
428870ff
BB
432 }
433
b128c09f 434 start = gethrtime();
c65aa5b2 435 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)))
34dc7c2f 436 dsl_dir_sync(dd, tx);
b128c09f
BB
437 write_time += gethrtime() - start;
438
29809a6c
MA
439 /*
440 * The MOS's space is accounted for in the pool/$MOS
441 * (dp_mos_dir). We can't modify the mos while we're syncing
442 * it, so we remember the deltas and apply them here.
443 */
444 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
445 dp->dp_mos_uncompressed_delta != 0) {
446 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
447 dp->dp_mos_used_delta,
448 dp->dp_mos_compressed_delta,
449 dp->dp_mos_uncompressed_delta, tx);
450 dp->dp_mos_used_delta = 0;
451 dp->dp_mos_compressed_delta = 0;
452 dp->dp_mos_uncompressed_delta = 0;
453 }
454
b128c09f 455 start = gethrtime();
428870ff
BB
456 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
457 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
34dc7c2f 458 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
428870ff 459 dmu_objset_sync(mos, zio, tx);
34dc7c2f
BB
460 err = zio_wait(zio);
461 ASSERT(err == 0);
462 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
463 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
464 }
b128c09f
BB
465 write_time += gethrtime() - start;
466 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time,
467 hrtime_t, dp->dp_read_overhead);
468 write_time -= dp->dp_read_overhead;
34dc7c2f 469
29809a6c
MA
470 /*
471 * If we modify a dataset in the same txg that we want to destroy it,
472 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
473 * dsl_dir_destroy_check() will fail if there are unexpected holds.
474 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
475 * and clearing the hold on it) before we process the sync_tasks.
476 * The MOS data dirtied by the sync_tasks will be synced on the next
477 * pass.
478 */
479 DTRACE_PROBE(pool_sync__3task);
480 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
13fe0198 481 dsl_sync_task_t *dst;
29809a6c
MA
482 /*
483 * No more sync tasks should have been added while we
484 * were syncing.
485 */
486 ASSERT(spa_sync_pass(dp->dp_spa) == 1);
13fe0198
MA
487 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)))
488 dsl_sync_task_sync(dst, tx);
29809a6c
MA
489 }
490
34dc7c2f 491 dmu_tx_commit(tx);
b128c09f 492
b128c09f
BB
493 dp->dp_space_towrite[txg & TXG_MASK] = 0;
494 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0);
495
496 /*
497 * If the write limit max has not been explicitly set, set it
498 * to a fraction of available physical memory (default 1/8th).
499 * Note that we must inflate the limit because the spa
500 * inflates write sizes to account for data replication.
501 * Check this each sync phase to catch changing memory size.
502 */
503 if (physmem != old_physmem && zfs_write_limit_shift) {
504 mutex_enter(&zfs_write_limit_lock);
505 old_physmem = physmem;
506 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
507 zfs_write_limit_inflated = MAX(zfs_write_limit_min,
508 spa_get_asize(dp->dp_spa, zfs_write_limit_max));
509 mutex_exit(&zfs_write_limit_lock);
510 }
511
512 /*
513 * Attempt to keep the sync time consistent by adjusting the
514 * amount of write traffic allowed into each transaction group.
515 * Weight the throughput calculation towards the current value:
516 * thru = 3/4 old_thru + 1/4 new_thru
428870ff 517 *
63fd3c6c
AL
518 * Note: write_time is in nanosecs while dp_throughput is expressed in
519 * bytes per millisecond.
b128c09f
BB
520 */
521 ASSERT(zfs_write_limit_min > 0);
63fd3c6c
AL
522 if (data_written > zfs_write_limit_min / 8 &&
523 write_time > MSEC2NSEC(1)) {
524 uint64_t throughput = data_written / NSEC2MSEC(write_time);
428870ff 525
b128c09f
BB
526 if (dp->dp_throughput)
527 dp->dp_throughput = throughput / 4 +
528 3 * dp->dp_throughput / 4;
529 else
530 dp->dp_throughput = throughput;
531 dp->dp_write_limit = MIN(zfs_write_limit_inflated,
532 MAX(zfs_write_limit_min,
428870ff 533 dp->dp_throughput * zfs_txg_synctime_ms));
b128c09f 534 }
34dc7c2f
BB
535}
536
537void
428870ff 538dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 539{
29809a6c 540 zilog_t *zilog;
34dc7c2f
BB
541 dsl_dataset_t *ds;
542
29809a6c
MA
543 while ((zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg))) {
544 ds = dmu_objset_ds(zilog->zl_os);
545 zil_clean(zilog, txg);
546 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
547 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 548 }
428870ff 549 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
550}
551
552/*
553 * TRUE if the current thread is the tx_sync_thread or if we
554 * are being called from SPA context during pool initialization.
555 */
556int
557dsl_pool_sync_context(dsl_pool_t *dp)
558{
559 return (curthread == dp->dp_tx.tx_sync_thread ||
9ae529ec 560 spa_is_initializing(dp->dp_spa));
34dc7c2f
BB
561}
562
563uint64_t
564dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
565{
566 uint64_t space, resv;
567
568 /*
569 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
570 * efficiency.
571 * XXX The intent log is not accounted for, so it must fit
572 * within this slop.
573 *
574 * If we're trying to assess whether it's OK to do a free,
575 * cut the reservation in half to allow forward progress
576 * (e.g. make it possible to rm(1) files from a full pool).
577 */
578 space = spa_get_dspace(dp->dp_spa);
579 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
580 if (netfree)
581 resv >>= 1;
582
583 return (space - resv);
584}
585
586int
587dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
588{
589 uint64_t reserved = 0;
590 uint64_t write_limit = (zfs_write_limit_override ?
591 zfs_write_limit_override : dp->dp_write_limit);
592
593 if (zfs_no_write_throttle) {
b128c09f
BB
594 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK],
595 space);
34dc7c2f
BB
596 return (0);
597 }
598
599 /*
600 * Check to see if we have exceeded the maximum allowed IO for
601 * this transaction group. We can do this without locks since
602 * a little slop here is ok. Note that we do the reserved check
603 * with only half the requested reserve: this is because the
604 * reserve requests are worst-case, and we really don't want to
605 * throttle based off of worst-case estimates.
606 */
607 if (write_limit > 0) {
608 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK]
609 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
610
570827e1
BB
611 if (reserved && reserved > write_limit) {
612 DMU_TX_STAT_BUMP(dmu_tx_write_limit);
2e528b49 613 return (SET_ERROR(ERESTART));
570827e1 614 }
34dc7c2f
BB
615 }
616
617 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
618
619 /*
620 * If this transaction group is over 7/8ths capacity, delay
621 * the caller 1 clock tick. This will slow down the "fill"
622 * rate until the sync process can catch up with us.
623 */
63fd3c6c
AL
624 if (reserved && reserved > (write_limit - (write_limit >> 3))) {
625 txg_delay(dp, tx->tx_txg, zfs_throttle_delay,
626 zfs_throttle_resolution);
627 }
34dc7c2f
BB
628
629 return (0);
630}
631
632void
633dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
634{
635 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space);
636 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space);
637}
638
639void
640dsl_pool_memory_pressure(dsl_pool_t *dp)
641{
34dc7c2f
BB
642 uint64_t space_inuse = 0;
643 int i;
644
645 if (dp->dp_write_limit == zfs_write_limit_min)
646 return;
647
648 for (i = 0; i < TXG_SIZE; i++) {
649 space_inuse += dp->dp_space_towrite[i];
650 space_inuse += dp->dp_tempreserved[i];
651 }
652 dp->dp_write_limit = MAX(zfs_write_limit_min,
653 MIN(dp->dp_write_limit, space_inuse / 4));
654}
655
656void
657dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
658{
659 if (space > 0) {
660 mutex_enter(&dp->dp_lock);
661 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space;
662 mutex_exit(&dp->dp_lock);
663 }
664}
b128c09f
BB
665
666/* ARGSUSED */
667static int
13fe0198 668upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
b128c09f
BB
669{
670 dmu_tx_t *tx = arg;
671 dsl_dataset_t *ds, *prev = NULL;
672 int err;
b128c09f 673
13fe0198 674 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
b128c09f
BB
675 if (err)
676 return (err);
677
678 while (ds->ds_phys->ds_prev_snap_obj != 0) {
679 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
680 FTAG, &prev);
681 if (err) {
682 dsl_dataset_rele(ds, FTAG);
683 return (err);
684 }
685
686 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
687 break;
688 dsl_dataset_rele(ds, FTAG);
689 ds = prev;
690 prev = NULL;
691 }
692
693 if (prev == NULL) {
694 prev = dp->dp_origin_snap;
695
696 /*
697 * The $ORIGIN can't have any data, or the accounting
698 * will be wrong.
699 */
13fe0198 700 ASSERT0(prev->ds_phys->ds_bp.blk_birth);
b128c09f
BB
701
702 /* The origin doesn't get attached to itself */
703 if (ds->ds_object == prev->ds_object) {
704 dsl_dataset_rele(ds, FTAG);
705 return (0);
706 }
707
708 dmu_buf_will_dirty(ds->ds_dbuf, tx);
709 ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
710 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
711
712 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
713 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
714
715 dmu_buf_will_dirty(prev->ds_dbuf, tx);
716 prev->ds_phys->ds_num_children++;
717
718 if (ds->ds_phys->ds_next_snap_obj == 0) {
719 ASSERT(ds->ds_prev == NULL);
13fe0198 720 VERIFY0(dsl_dataset_hold_obj(dp,
b128c09f
BB
721 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
722 }
723 }
724
13fe0198
MA
725 ASSERT3U(ds->ds_dir->dd_phys->dd_origin_obj, ==, prev->ds_object);
726 ASSERT3U(ds->ds_phys->ds_prev_snap_obj, ==, prev->ds_object);
b128c09f
BB
727
728 if (prev->ds_phys->ds_next_clones_obj == 0) {
428870ff 729 dmu_buf_will_dirty(prev->ds_dbuf, tx);
b128c09f
BB
730 prev->ds_phys->ds_next_clones_obj =
731 zap_create(dp->dp_meta_objset,
732 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
733 }
13fe0198 734 VERIFY0(zap_add_int(dp->dp_meta_objset,
b128c09f
BB
735 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
736
737 dsl_dataset_rele(ds, FTAG);
738 if (prev != dp->dp_origin_snap)
739 dsl_dataset_rele(prev, FTAG);
740 return (0);
741}
742
743void
744dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
745{
746 ASSERT(dmu_tx_is_syncing(tx));
747 ASSERT(dp->dp_origin_snap != NULL);
748
13fe0198 749 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
428870ff
BB
750 tx, DS_FIND_CHILDREN));
751}
752
753/* ARGSUSED */
754static int
13fe0198 755upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
428870ff
BB
756{
757 dmu_tx_t *tx = arg;
428870ff
BB
758 objset_t *mos = dp->dp_meta_objset;
759
13fe0198 760 if (ds->ds_dir->dd_phys->dd_origin_obj != 0) {
428870ff
BB
761 dsl_dataset_t *origin;
762
13fe0198 763 VERIFY0(dsl_dataset_hold_obj(dp,
428870ff
BB
764 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin));
765
766 if (origin->ds_dir->dd_phys->dd_clones == 0) {
767 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
768 origin->ds_dir->dd_phys->dd_clones = zap_create(mos,
769 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
770 }
771
13fe0198
MA
772 VERIFY0(zap_add_int(dp->dp_meta_objset,
773 origin->ds_dir->dd_phys->dd_clones, ds->ds_object, tx));
428870ff
BB
774
775 dsl_dataset_rele(origin, FTAG);
776 }
428870ff
BB
777 return (0);
778}
779
780void
781dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
782{
428870ff
BB
783 uint64_t obj;
784
d6320ddb
BB
785 ASSERT(dmu_tx_is_syncing(tx));
786
428870ff 787 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
13fe0198 788 VERIFY0(dsl_pool_open_special_dir(dp,
428870ff
BB
789 FREE_DIR_NAME, &dp->dp_free_dir));
790
791 /*
792 * We can't use bpobj_alloc(), because spa_version() still
793 * returns the old version, and we need a new-version bpobj with
794 * subobj support. So call dmu_object_alloc() directly.
795 */
796 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
797 SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
13fe0198 798 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 799 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
13fe0198 800 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
428870ff 801
13fe0198 802 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
428870ff 803 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
b128c09f
BB
804}
805
806void
807dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
808{
809 uint64_t dsobj;
810 dsl_dataset_t *ds;
811
812 ASSERT(dmu_tx_is_syncing(tx));
813 ASSERT(dp->dp_origin_snap == NULL);
13fe0198 814 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
b128c09f
BB
815
816 /* create the origin dir, ds, & snap-ds */
b128c09f
BB
817 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
818 NULL, 0, kcred, tx);
13fe0198
MA
819 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
820 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
821 VERIFY0(dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
b128c09f
BB
822 dp, &dp->dp_origin_snap));
823 dsl_dataset_rele(ds, FTAG);
b128c09f 824}
9babb374
BB
825
826taskq_t *
3558fd73 827dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 828{
3558fd73 829 return (dp->dp_iput_taskq);
9babb374 830}
428870ff
BB
831
832/*
833 * Walk through the pool-wide zap object of temporary snapshot user holds
834 * and release them.
835 */
836void
837dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
838{
839 zap_attribute_t za;
840 zap_cursor_t zc;
841 objset_t *mos = dp->dp_meta_objset;
842 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
843
844 if (zapobj == 0)
845 return;
846 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
847
848 for (zap_cursor_init(&zc, mos, zapobj);
849 zap_cursor_retrieve(&zc, &za) == 0;
850 zap_cursor_advance(&zc)) {
851 char *htag;
852 uint64_t dsobj;
853
854 htag = strchr(za.za_name, '-');
855 *htag = '\0';
856 ++htag;
857 dsobj = strtonum(za.za_name, NULL);
13fe0198 858 dsl_dataset_user_release_tmp(dp, dsobj, htag);
428870ff
BB
859 }
860 zap_cursor_fini(&zc);
861}
862
863/*
864 * Create the pool-wide zap object for storing temporary snapshot holds.
865 */
866void
867dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
868{
869 objset_t *mos = dp->dp_meta_objset;
870
871 ASSERT(dp->dp_tmp_userrefs_obj == 0);
872 ASSERT(dmu_tx_is_syncing(tx));
873
9ae529ec
CS
874 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
875 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
876}
877
878static int
879dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
13fe0198 880 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
428870ff
BB
881{
882 objset_t *mos = dp->dp_meta_objset;
883 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
884 char *name;
885 int error;
886
887 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
888 ASSERT(dmu_tx_is_syncing(tx));
889
890 /*
891 * If the pool was created prior to SPA_VERSION_USERREFS, the
892 * zap object for temporary holds might not exist yet.
893 */
894 if (zapobj == 0) {
895 if (holding) {
896 dsl_pool_user_hold_create_obj(dp, tx);
897 zapobj = dp->dp_tmp_userrefs_obj;
898 } else {
2e528b49 899 return (SET_ERROR(ENOENT));
428870ff
BB
900 }
901 }
902
903 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
904 if (holding)
13fe0198 905 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
428870ff
BB
906 else
907 error = zap_remove(mos, zapobj, name, tx);
908 strfree(name);
909
910 return (error);
911}
912
913/*
914 * Add a temporary hold for the given dataset object and tag.
915 */
916int
917dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
13fe0198 918 uint64_t now, dmu_tx_t *tx)
428870ff
BB
919{
920 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
921}
922
923/*
924 * Release a temporary hold for the given dataset object and tag.
925 */
926int
927dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
928 dmu_tx_t *tx)
929{
13fe0198 930 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
428870ff
BB
931 tx, B_FALSE));
932}
c409e464 933
13fe0198
MA
934/*
935 * DSL Pool Configuration Lock
936 *
937 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
938 * creation / destruction / rename / property setting). It must be held for
939 * read to hold a dataset or dsl_dir. I.e. you must call
940 * dsl_pool_config_enter() or dsl_pool_hold() before calling
941 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
942 * must be held continuously until all datasets and dsl_dirs are released.
943 *
944 * The only exception to this rule is that if a "long hold" is placed on
945 * a dataset, then the dp_config_rwlock may be dropped while the dataset
946 * is still held. The long hold will prevent the dataset from being
947 * destroyed -- the destroy will fail with EBUSY. A long hold can be
948 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
949 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
950 *
951 * Legitimate long-holders (including owners) should be long-running, cancelable
952 * tasks that should cause "zfs destroy" to fail. This includes DMU
953 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
954 * "zfs send", and "zfs diff". There are several other long-holders whose
955 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
956 *
957 * The usual formula for long-holding would be:
958 * dsl_pool_hold()
959 * dsl_dataset_hold()
960 * ... perform checks ...
961 * dsl_dataset_long_hold()
962 * dsl_pool_rele()
963 * ... perform long-running task ...
964 * dsl_dataset_long_rele()
965 * dsl_dataset_rele()
966 *
967 * Note that when the long hold is released, the dataset is still held but
968 * the pool is not held. The dataset may change arbitrarily during this time
969 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
970 * dataset except release it.
971 *
972 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
973 * or modifying operations.
974 *
975 * Modifying operations should generally use dsl_sync_task(). The synctask
976 * infrastructure enforces proper locking strategy with respect to the
977 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
978 *
979 * Read-only operations will manually hold the pool, then the dataset, obtain
980 * information from the dataset, then release the pool and dataset.
981 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
982 * hold/rele.
983 */
984
985int
986dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
987{
988 spa_t *spa;
989 int error;
990
991 error = spa_open(name, &spa, tag);
992 if (error == 0) {
993 *dp = spa_get_dsl(spa);
994 dsl_pool_config_enter(*dp, tag);
995 }
996 return (error);
997}
998
999void
1000dsl_pool_rele(dsl_pool_t *dp, void *tag)
1001{
1002 dsl_pool_config_exit(dp, tag);
1003 spa_close(dp->dp_spa, tag);
1004}
1005
1006void
1007dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1008{
1009 /*
1010 * We use a "reentrant" reader-writer lock, but not reentrantly.
1011 *
1012 * The rrwlock can (with the track_all flag) track all reading threads,
1013 * which is very useful for debugging which code path failed to release
1014 * the lock, and for verifying that the *current* thread does hold
1015 * the lock.
1016 *
1017 * (Unlike a rwlock, which knows that N threads hold it for
1018 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1019 * if any thread holds it for read, even if this thread doesn't).
1020 */
1021 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1022 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1023}
1024
1025void
1026dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1027{
1028 rrw_exit(&dp->dp_config_rwlock, tag);
1029}
1030
1031boolean_t
1032dsl_pool_config_held(dsl_pool_t *dp)
1033{
1034 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1035}
1036
c409e464 1037#if defined(_KERNEL) && defined(HAVE_SPL)
40a806df
NB
1038EXPORT_SYMBOL(dsl_pool_config_enter);
1039EXPORT_SYMBOL(dsl_pool_config_exit);
1040
c409e464
BB
1041module_param(zfs_no_write_throttle, int, 0644);
1042MODULE_PARM_DESC(zfs_no_write_throttle, "Disable write throttling");
1043
1044module_param(zfs_write_limit_shift, int, 0444);
1045MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg");
1046
1047module_param(zfs_txg_synctime_ms, int, 0644);
5b7e5b5a 1048MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between txg sync");
c409e464
BB
1049
1050module_param(zfs_write_limit_min, ulong, 0444);
5b7e5b5a 1051MODULE_PARM_DESC(zfs_write_limit_min, "Min txg write limit");
c409e464
BB
1052
1053module_param(zfs_write_limit_max, ulong, 0444);
5b7e5b5a 1054MODULE_PARM_DESC(zfs_write_limit_max, "Max txg write limit");
c409e464
BB
1055
1056module_param(zfs_write_limit_inflated, ulong, 0444);
5b7e5b5a 1057MODULE_PARM_DESC(zfs_write_limit_inflated, "Inflated txg write limit");
c409e464
BB
1058
1059module_param(zfs_write_limit_override, ulong, 0444);
5b7e5b5a 1060MODULE_PARM_DESC(zfs_write_limit_override, "Override txg write limit");
c409e464 1061#endif