4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/dsl_pool.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_prop.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dsl_scan.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/dsl_deadlist.h>
43 int zfs_no_write_throttle
= 0;
44 int zfs_write_limit_shift
= 3; /* 1/8th of physical memory */
45 int zfs_txg_synctime_ms
= 1000; /* target millisecs to sync a txg */
47 unsigned long zfs_write_limit_min
= 32 << 20; /* min write limit is 32MB */
48 unsigned long zfs_write_limit_max
= 0; /* max data payload per txg */
49 unsigned long zfs_write_limit_inflated
= 0;
50 unsigned long zfs_write_limit_override
= 0;
52 kmutex_t zfs_write_limit_lock
;
54 static pgcnt_t old_physmem
= 0;
57 dsl_pool_open_special_dir(dsl_pool_t
*dp
, const char *name
, dsl_dir_t
**ddp
)
62 err
= zap_lookup(dp
->dp_meta_objset
,
63 dp
->dp_root_dir
->dd_phys
->dd_child_dir_zapobj
,
64 name
, sizeof (obj
), 1, &obj
);
68 return (dsl_dir_open_obj(dp
, obj
, name
, dp
, ddp
));
72 dsl_pool_open_impl(spa_t
*spa
, uint64_t txg
)
75 blkptr_t
*bp
= spa_get_rootblkptr(spa
);
77 dp
= kmem_zalloc(sizeof (dsl_pool_t
), KM_SLEEP
);
79 dp
->dp_meta_rootbp
= *bp
;
80 rw_init(&dp
->dp_config_rwlock
, NULL
, RW_DEFAULT
, NULL
);
81 dp
->dp_write_limit
= zfs_write_limit_min
;
84 txg_list_create(&dp
->dp_dirty_datasets
,
85 offsetof(dsl_dataset_t
, ds_dirty_link
));
86 txg_list_create(&dp
->dp_dirty_dirs
,
87 offsetof(dsl_dir_t
, dd_dirty_link
));
88 txg_list_create(&dp
->dp_sync_tasks
,
89 offsetof(dsl_sync_task_group_t
, dstg_node
));
90 list_create(&dp
->dp_synced_datasets
, sizeof (dsl_dataset_t
),
91 offsetof(dsl_dataset_t
, ds_synced_link
));
93 mutex_init(&dp
->dp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
95 dp
->dp_iput_taskq
= taskq_create("zfs_iput_taskq", 1, minclsyspri
,
102 dsl_pool_open(spa_t
*spa
, uint64_t txg
, dsl_pool_t
**dpp
)
105 dsl_pool_t
*dp
= dsl_pool_open_impl(spa
, txg
);
110 rw_enter(&dp
->dp_config_rwlock
, RW_WRITER
);
111 err
= dmu_objset_open_impl(spa
, NULL
, &dp
->dp_meta_rootbp
,
112 &dp
->dp_meta_objset
);
116 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
117 DMU_POOL_ROOT_DATASET
, sizeof (uint64_t), 1,
118 &dp
->dp_root_dir_obj
);
122 err
= dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
,
123 NULL
, dp
, &dp
->dp_root_dir
);
127 err
= dsl_pool_open_special_dir(dp
, MOS_DIR_NAME
, &dp
->dp_mos_dir
);
131 if (spa_version(spa
) >= SPA_VERSION_ORIGIN
) {
132 err
= dsl_pool_open_special_dir(dp
, ORIGIN_DIR_NAME
, &dd
);
135 err
= dsl_dataset_hold_obj(dp
, dd
->dd_phys
->dd_head_dataset_obj
,
138 err
= dsl_dataset_hold_obj(dp
,
139 ds
->ds_phys
->ds_prev_snap_obj
, dp
,
140 &dp
->dp_origin_snap
);
141 dsl_dataset_rele(ds
, FTAG
);
143 dsl_dir_close(dd
, dp
);
148 if (spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
149 err
= dsl_pool_open_special_dir(dp
, FREE_DIR_NAME
,
154 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
155 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
);
158 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
159 dp
->dp_meta_objset
, obj
));
162 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
163 DMU_POOL_TMP_USERREFS
, sizeof (uint64_t), 1,
164 &dp
->dp_tmp_userrefs_obj
);
170 err
= dsl_scan_init(dp
, txg
);
173 rw_exit(&dp
->dp_config_rwlock
);
183 dsl_pool_close(dsl_pool_t
*dp
)
185 /* drop our references from dsl_pool_open() */
188 * Since we held the origin_snap from "syncing" context (which
189 * includes pool-opening context), it actually only got a "ref"
190 * and not a hold, so just drop that here.
192 if (dp
->dp_origin_snap
)
193 dsl_dataset_drop_ref(dp
->dp_origin_snap
, dp
);
195 dsl_dir_close(dp
->dp_mos_dir
, dp
);
197 dsl_dir_close(dp
->dp_free_dir
, dp
);
199 dsl_dir_close(dp
->dp_root_dir
, dp
);
201 bpobj_close(&dp
->dp_free_bpobj
);
203 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
204 if (dp
->dp_meta_objset
)
205 dmu_objset_evict(dp
->dp_meta_objset
);
207 txg_list_destroy(&dp
->dp_dirty_datasets
);
208 txg_list_destroy(&dp
->dp_sync_tasks
);
209 txg_list_destroy(&dp
->dp_dirty_dirs
);
210 list_destroy(&dp
->dp_synced_datasets
);
212 arc_flush(dp
->dp_spa
);
215 rw_destroy(&dp
->dp_config_rwlock
);
216 mutex_destroy(&dp
->dp_lock
);
217 taskq_destroy(dp
->dp_iput_taskq
);
219 kmem_free(dp
->dp_blkstats
, sizeof (zfs_all_blkstats_t
));
220 kmem_free(dp
, sizeof (dsl_pool_t
));
224 dsl_pool_create(spa_t
*spa
, nvlist_t
*zplprops
, uint64_t txg
)
227 dsl_pool_t
*dp
= dsl_pool_open_impl(spa
, txg
);
228 dmu_tx_t
*tx
= dmu_tx_create_assigned(dp
, txg
);
233 /* create and open the MOS (meta-objset) */
234 dp
->dp_meta_objset
= dmu_objset_create_impl(spa
,
235 NULL
, &dp
->dp_meta_rootbp
, DMU_OST_META
, tx
);
237 /* create the pool directory */
238 err
= zap_create_claim(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
239 DMU_OT_OBJECT_DIRECTORY
, DMU_OT_NONE
, 0, tx
);
240 ASSERT3U(err
, ==, 0);
242 /* Initialize scan structures */
243 VERIFY3U(0, ==, dsl_scan_init(dp
, txg
));
245 /* create and open the root dir */
246 dp
->dp_root_dir_obj
= dsl_dir_create_sync(dp
, NULL
, NULL
, tx
);
247 VERIFY(0 == dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
,
248 NULL
, dp
, &dp
->dp_root_dir
));
250 /* create and open the meta-objset dir */
251 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
, MOS_DIR_NAME
, tx
);
252 VERIFY(0 == dsl_pool_open_special_dir(dp
,
253 MOS_DIR_NAME
, &dp
->dp_mos_dir
));
255 if (spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
256 /* create and open the free dir */
257 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
,
259 VERIFY(0 == dsl_pool_open_special_dir(dp
,
260 FREE_DIR_NAME
, &dp
->dp_free_dir
));
262 /* create and open the free_bplist */
263 obj
= bpobj_alloc(dp
->dp_meta_objset
, SPA_MAXBLOCKSIZE
, tx
);
264 VERIFY(zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
265 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
, tx
) == 0);
266 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
267 dp
->dp_meta_objset
, obj
));
270 if (spa_version(spa
) >= SPA_VERSION_DSL_SCRUB
)
271 dsl_pool_create_origin(dp
, tx
);
273 /* create the root dataset */
274 obj
= dsl_dataset_create_sync_dd(dp
->dp_root_dir
, NULL
, 0, tx
);
276 /* create the root objset */
277 VERIFY(0 == dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
));
278 VERIFY(NULL
!= (os
= dmu_objset_create_impl(dp
->dp_spa
, ds
,
279 dsl_dataset_get_blkptr(ds
), DMU_OST_ZFS
, tx
)));
281 zfs_create_fs(os
, kcred
, zplprops
, tx
);
283 dsl_dataset_rele(ds
, FTAG
);
291 deadlist_enqueue_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
293 dsl_deadlist_t
*dl
= arg
;
294 dsl_deadlist_insert(dl
, bp
, tx
);
299 dsl_pool_sync(dsl_pool_t
*dp
, uint64_t txg
)
305 dsl_sync_task_group_t
*dstg
;
306 objset_t
*mos
= dp
->dp_meta_objset
;
307 hrtime_t start
, write_time
;
308 uint64_t data_written
;
312 * We need to copy dp_space_towrite() before doing
313 * dsl_sync_task_group_sync(), because
314 * dsl_dataset_snapshot_reserve_space() will increase
315 * dp_space_towrite but not actually write anything.
317 data_written
= dp
->dp_space_towrite
[txg
& TXG_MASK
];
319 tx
= dmu_tx_create_assigned(dp
, txg
);
321 dp
->dp_read_overhead
= 0;
324 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
325 while ((ds
= txg_list_remove(&dp
->dp_dirty_datasets
, txg
))) {
327 * We must not sync any non-MOS datasets twice, because
328 * we may have taken a snapshot of them. However, we
329 * may sync newly-created datasets on pass 2.
331 ASSERT(!list_link_active(&ds
->ds_synced_link
));
332 list_insert_tail(&dp
->dp_synced_datasets
, ds
);
333 dsl_dataset_sync(ds
, zio
, tx
);
335 DTRACE_PROBE(pool_sync__1setup
);
338 write_time
= gethrtime() - start
;
340 DTRACE_PROBE(pool_sync__2rootzio
);
342 for (ds
= list_head(&dp
->dp_synced_datasets
); ds
;
343 ds
= list_next(&dp
->dp_synced_datasets
, ds
))
344 dmu_objset_do_userquota_updates(ds
->ds_objset
, tx
);
347 * Sync the datasets again to push out the changes due to
348 * userspace updates. This must be done before we process the
349 * sync tasks, because that could cause a snapshot of a dataset
350 * whose ds_bp will be rewritten when we do this 2nd sync.
352 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
353 while ((ds
= txg_list_remove(&dp
->dp_dirty_datasets
, txg
))) {
354 ASSERT(list_link_active(&ds
->ds_synced_link
));
355 dmu_buf_rele(ds
->ds_dbuf
, ds
);
356 dsl_dataset_sync(ds
, zio
, tx
);
361 * Move dead blocks from the pending deadlist to the on-disk
364 for (ds
= list_head(&dp
->dp_synced_datasets
); ds
;
365 ds
= list_next(&dp
->dp_synced_datasets
, ds
)) {
366 bplist_iterate(&ds
->ds_pending_deadlist
,
367 deadlist_enqueue_cb
, &ds
->ds_deadlist
, tx
);
370 while ((dstg
= txg_list_remove(&dp
->dp_sync_tasks
, txg
))) {
372 * No more sync tasks should have been added while we
375 ASSERT(spa_sync_pass(dp
->dp_spa
) == 1);
376 dsl_sync_task_group_sync(dstg
, tx
);
378 DTRACE_PROBE(pool_sync__3task
);
381 while ((dd
= txg_list_remove(&dp
->dp_dirty_dirs
, txg
)))
382 dsl_dir_sync(dd
, tx
);
383 write_time
+= gethrtime() - start
;
386 if (list_head(&mos
->os_dirty_dnodes
[txg
& TXG_MASK
]) != NULL
||
387 list_head(&mos
->os_free_dnodes
[txg
& TXG_MASK
]) != NULL
) {
388 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
389 dmu_objset_sync(mos
, zio
, tx
);
392 dprintf_bp(&dp
->dp_meta_rootbp
, "meta objset rootbp is %s", "");
393 spa_set_rootblkptr(dp
->dp_spa
, &dp
->dp_meta_rootbp
);
395 write_time
+= gethrtime() - start
;
396 DTRACE_PROBE2(pool_sync__4io
, hrtime_t
, write_time
,
397 hrtime_t
, dp
->dp_read_overhead
);
398 write_time
-= dp
->dp_read_overhead
;
402 dp
->dp_space_towrite
[txg
& TXG_MASK
] = 0;
403 ASSERT(dp
->dp_tempreserved
[txg
& TXG_MASK
] == 0);
406 * If the write limit max has not been explicitly set, set it
407 * to a fraction of available physical memory (default 1/8th).
408 * Note that we must inflate the limit because the spa
409 * inflates write sizes to account for data replication.
410 * Check this each sync phase to catch changing memory size.
412 if (physmem
!= old_physmem
&& zfs_write_limit_shift
) {
413 mutex_enter(&zfs_write_limit_lock
);
414 old_physmem
= physmem
;
415 zfs_write_limit_max
= ptob(physmem
) >> zfs_write_limit_shift
;
416 zfs_write_limit_inflated
= MAX(zfs_write_limit_min
,
417 spa_get_asize(dp
->dp_spa
, zfs_write_limit_max
));
418 mutex_exit(&zfs_write_limit_lock
);
422 * Attempt to keep the sync time consistent by adjusting the
423 * amount of write traffic allowed into each transaction group.
424 * Weight the throughput calculation towards the current value:
425 * thru = 3/4 old_thru + 1/4 new_thru
427 * Note: write_time is in nanosecs, so write_time/MICROSEC
430 ASSERT(zfs_write_limit_min
> 0);
431 if (data_written
> zfs_write_limit_min
/ 8 && write_time
> MICROSEC
) {
432 uint64_t throughput
= data_written
/ (write_time
/ MICROSEC
);
434 if (dp
->dp_throughput
)
435 dp
->dp_throughput
= throughput
/ 4 +
436 3 * dp
->dp_throughput
/ 4;
438 dp
->dp_throughput
= throughput
;
439 dp
->dp_write_limit
= MIN(zfs_write_limit_inflated
,
440 MAX(zfs_write_limit_min
,
441 dp
->dp_throughput
* zfs_txg_synctime_ms
));
446 dsl_pool_sync_done(dsl_pool_t
*dp
, uint64_t txg
)
451 while ((ds
= list_head(&dp
->dp_synced_datasets
))) {
452 list_remove(&dp
->dp_synced_datasets
, ds
);
454 zil_clean(os
->os_zil
, txg
);
455 ASSERT(!dmu_objset_is_dirty(os
, txg
));
456 dmu_buf_rele(ds
->ds_dbuf
, ds
);
458 ASSERT(!dmu_objset_is_dirty(dp
->dp_meta_objset
, txg
));
462 * TRUE if the current thread is the tx_sync_thread or if we
463 * are being called from SPA context during pool initialization.
466 dsl_pool_sync_context(dsl_pool_t
*dp
)
468 return (curthread
== dp
->dp_tx
.tx_sync_thread
||
469 spa_get_dsl(dp
->dp_spa
) == NULL
);
473 dsl_pool_adjustedsize(dsl_pool_t
*dp
, boolean_t netfree
)
475 uint64_t space
, resv
;
478 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
480 * XXX The intent log is not accounted for, so it must fit
483 * If we're trying to assess whether it's OK to do a free,
484 * cut the reservation in half to allow forward progress
485 * (e.g. make it possible to rm(1) files from a full pool).
487 space
= spa_get_dspace(dp
->dp_spa
);
488 resv
= MAX(space
>> 6, SPA_MINDEVSIZE
>> 1);
492 return (space
- resv
);
496 dsl_pool_tempreserve_space(dsl_pool_t
*dp
, uint64_t space
, dmu_tx_t
*tx
)
498 uint64_t reserved
= 0;
499 uint64_t write_limit
= (zfs_write_limit_override
?
500 zfs_write_limit_override
: dp
->dp_write_limit
);
502 if (zfs_no_write_throttle
) {
503 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
],
509 * Check to see if we have exceeded the maximum allowed IO for
510 * this transaction group. We can do this without locks since
511 * a little slop here is ok. Note that we do the reserved check
512 * with only half the requested reserve: this is because the
513 * reserve requests are worst-case, and we really don't want to
514 * throttle based off of worst-case estimates.
516 if (write_limit
> 0) {
517 reserved
= dp
->dp_space_towrite
[tx
->tx_txg
& TXG_MASK
]
518 + dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
] / 2;
520 if (reserved
&& reserved
> write_limit
) {
521 DMU_TX_STAT_BUMP(dmu_tx_write_limit
);
526 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
], space
);
529 * If this transaction group is over 7/8ths capacity, delay
530 * the caller 1 clock tick. This will slow down the "fill"
531 * rate until the sync process can catch up with us.
533 if (reserved
&& reserved
> (write_limit
- (write_limit
>> 3)))
534 txg_delay(dp
, tx
->tx_txg
, 1);
540 dsl_pool_tempreserve_clear(dsl_pool_t
*dp
, int64_t space
, dmu_tx_t
*tx
)
542 ASSERT(dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
] >= space
);
543 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
], -space
);
547 dsl_pool_memory_pressure(dsl_pool_t
*dp
)
549 uint64_t space_inuse
= 0;
552 if (dp
->dp_write_limit
== zfs_write_limit_min
)
555 for (i
= 0; i
< TXG_SIZE
; i
++) {
556 space_inuse
+= dp
->dp_space_towrite
[i
];
557 space_inuse
+= dp
->dp_tempreserved
[i
];
559 dp
->dp_write_limit
= MAX(zfs_write_limit_min
,
560 MIN(dp
->dp_write_limit
, space_inuse
/ 4));
564 dsl_pool_willuse_space(dsl_pool_t
*dp
, int64_t space
, dmu_tx_t
*tx
)
567 mutex_enter(&dp
->dp_lock
);
568 dp
->dp_space_towrite
[tx
->tx_txg
& TXG_MASK
] += space
;
569 mutex_exit(&dp
->dp_lock
);
575 upgrade_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
578 dsl_dataset_t
*ds
, *prev
= NULL
;
580 dsl_pool_t
*dp
= spa_get_dsl(spa
);
582 err
= dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
);
586 while (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
587 err
= dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
590 dsl_dataset_rele(ds
, FTAG
);
594 if (prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
)
596 dsl_dataset_rele(ds
, FTAG
);
602 prev
= dp
->dp_origin_snap
;
605 * The $ORIGIN can't have any data, or the accounting
608 ASSERT(prev
->ds_phys
->ds_bp
.blk_birth
== 0);
610 /* The origin doesn't get attached to itself */
611 if (ds
->ds_object
== prev
->ds_object
) {
612 dsl_dataset_rele(ds
, FTAG
);
616 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
617 ds
->ds_phys
->ds_prev_snap_obj
= prev
->ds_object
;
618 ds
->ds_phys
->ds_prev_snap_txg
= prev
->ds_phys
->ds_creation_txg
;
620 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
621 ds
->ds_dir
->dd_phys
->dd_origin_obj
= prev
->ds_object
;
623 dmu_buf_will_dirty(prev
->ds_dbuf
, tx
);
624 prev
->ds_phys
->ds_num_children
++;
626 if (ds
->ds_phys
->ds_next_snap_obj
== 0) {
627 ASSERT(ds
->ds_prev
== NULL
);
628 VERIFY(0 == dsl_dataset_hold_obj(dp
,
629 ds
->ds_phys
->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
633 ASSERT(ds
->ds_dir
->dd_phys
->dd_origin_obj
== prev
->ds_object
);
634 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
== prev
->ds_object
);
636 if (prev
->ds_phys
->ds_next_clones_obj
== 0) {
637 dmu_buf_will_dirty(prev
->ds_dbuf
, tx
);
638 prev
->ds_phys
->ds_next_clones_obj
=
639 zap_create(dp
->dp_meta_objset
,
640 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
642 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
643 prev
->ds_phys
->ds_next_clones_obj
, ds
->ds_object
, tx
));
645 dsl_dataset_rele(ds
, FTAG
);
646 if (prev
!= dp
->dp_origin_snap
)
647 dsl_dataset_rele(prev
, FTAG
);
652 dsl_pool_upgrade_clones(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
654 ASSERT(dmu_tx_is_syncing(tx
));
655 ASSERT(dp
->dp_origin_snap
!= NULL
);
657 VERIFY3U(0, ==, dmu_objset_find_spa(dp
->dp_spa
, NULL
, upgrade_clones_cb
,
658 tx
, DS_FIND_CHILDREN
));
663 upgrade_dir_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
667 dsl_pool_t
*dp
= spa_get_dsl(spa
);
668 objset_t
*mos
= dp
->dp_meta_objset
;
670 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
672 if (ds
->ds_dir
->dd_phys
->dd_origin_obj
) {
673 dsl_dataset_t
*origin
;
675 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
676 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &origin
));
678 if (origin
->ds_dir
->dd_phys
->dd_clones
== 0) {
679 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
680 origin
->ds_dir
->dd_phys
->dd_clones
= zap_create(mos
,
681 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
684 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
685 origin
->ds_dir
->dd_phys
->dd_clones
, dsobj
, tx
));
687 dsl_dataset_rele(origin
, FTAG
);
690 dsl_dataset_rele(ds
, FTAG
);
695 dsl_pool_upgrade_dir_clones(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
699 ASSERT(dmu_tx_is_syncing(tx
));
701 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
, FREE_DIR_NAME
, tx
);
702 VERIFY(0 == dsl_pool_open_special_dir(dp
,
703 FREE_DIR_NAME
, &dp
->dp_free_dir
));
706 * We can't use bpobj_alloc(), because spa_version() still
707 * returns the old version, and we need a new-version bpobj with
708 * subobj support. So call dmu_object_alloc() directly.
710 obj
= dmu_object_alloc(dp
->dp_meta_objset
, DMU_OT_BPOBJ
,
711 SPA_MAXBLOCKSIZE
, DMU_OT_BPOBJ_HDR
, sizeof (bpobj_phys_t
), tx
);
712 VERIFY3U(0, ==, zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
713 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
, tx
));
714 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
715 dp
->dp_meta_objset
, obj
));
717 VERIFY3U(0, ==, dmu_objset_find_spa(dp
->dp_spa
, NULL
,
718 upgrade_dir_clones_cb
, tx
, DS_FIND_CHILDREN
));
722 dsl_pool_create_origin(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
727 ASSERT(dmu_tx_is_syncing(tx
));
728 ASSERT(dp
->dp_origin_snap
== NULL
);
730 /* create the origin dir, ds, & snap-ds */
731 rw_enter(&dp
->dp_config_rwlock
, RW_WRITER
);
732 dsobj
= dsl_dataset_create_sync(dp
->dp_root_dir
, ORIGIN_DIR_NAME
,
734 VERIFY(0 == dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
735 dsl_dataset_snapshot_sync(ds
, ORIGIN_DIR_NAME
, tx
);
736 VERIFY(0 == dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
737 dp
, &dp
->dp_origin_snap
));
738 dsl_dataset_rele(ds
, FTAG
);
739 rw_exit(&dp
->dp_config_rwlock
);
743 dsl_pool_iput_taskq(dsl_pool_t
*dp
)
745 return (dp
->dp_iput_taskq
);
749 * Walk through the pool-wide zap object of temporary snapshot user holds
753 dsl_pool_clean_tmp_userrefs(dsl_pool_t
*dp
)
757 objset_t
*mos
= dp
->dp_meta_objset
;
758 uint64_t zapobj
= dp
->dp_tmp_userrefs_obj
;
762 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
764 for (zap_cursor_init(&zc
, mos
, zapobj
);
765 zap_cursor_retrieve(&zc
, &za
) == 0;
766 zap_cursor_advance(&zc
)) {
770 htag
= strchr(za
.za_name
, '-');
773 dsobj
= strtonum(za
.za_name
, NULL
);
774 (void) dsl_dataset_user_release_tmp(dp
, dsobj
, htag
, B_FALSE
);
776 zap_cursor_fini(&zc
);
780 * Create the pool-wide zap object for storing temporary snapshot holds.
783 dsl_pool_user_hold_create_obj(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
785 objset_t
*mos
= dp
->dp_meta_objset
;
787 ASSERT(dp
->dp_tmp_userrefs_obj
== 0);
788 ASSERT(dmu_tx_is_syncing(tx
));
790 dp
->dp_tmp_userrefs_obj
= zap_create(mos
, DMU_OT_USERREFS
,
793 VERIFY(zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_TMP_USERREFS
,
794 sizeof (uint64_t), 1, &dp
->dp_tmp_userrefs_obj
, tx
) == 0);
798 dsl_pool_user_hold_rele_impl(dsl_pool_t
*dp
, uint64_t dsobj
,
799 const char *tag
, uint64_t *now
, dmu_tx_t
*tx
, boolean_t holding
)
801 objset_t
*mos
= dp
->dp_meta_objset
;
802 uint64_t zapobj
= dp
->dp_tmp_userrefs_obj
;
806 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
807 ASSERT(dmu_tx_is_syncing(tx
));
810 * If the pool was created prior to SPA_VERSION_USERREFS, the
811 * zap object for temporary holds might not exist yet.
815 dsl_pool_user_hold_create_obj(dp
, tx
);
816 zapobj
= dp
->dp_tmp_userrefs_obj
;
822 name
= kmem_asprintf("%llx-%s", (u_longlong_t
)dsobj
, tag
);
824 error
= zap_add(mos
, zapobj
, name
, 8, 1, now
, tx
);
826 error
= zap_remove(mos
, zapobj
, name
, tx
);
833 * Add a temporary hold for the given dataset object and tag.
836 dsl_pool_user_hold(dsl_pool_t
*dp
, uint64_t dsobj
, const char *tag
,
837 uint64_t *now
, dmu_tx_t
*tx
)
839 return (dsl_pool_user_hold_rele_impl(dp
, dsobj
, tag
, now
, tx
, B_TRUE
));
843 * Release a temporary hold for the given dataset object and tag.
846 dsl_pool_user_release(dsl_pool_t
*dp
, uint64_t dsobj
, const char *tag
,
849 return (dsl_pool_user_hold_rele_impl(dp
, dsobj
, tag
, NULL
,
853 #if defined(_KERNEL) && defined(HAVE_SPL)
854 module_param(zfs_no_write_throttle
, int, 0644);
855 MODULE_PARM_DESC(zfs_no_write_throttle
, "Disable write throttling");
857 module_param(zfs_write_limit_shift
, int, 0444);
858 MODULE_PARM_DESC(zfs_write_limit_shift
, "log2(fraction of memory) per txg");
860 module_param(zfs_txg_synctime_ms
, int, 0644);
861 MODULE_PARM_DESC(zfs_txg_synctime_ms
, "Target milliseconds between tgx sync");
863 module_param(zfs_write_limit_min
, ulong
, 0444);
864 MODULE_PARM_DESC(zfs_write_limit_min
, "Min tgx write limit");
866 module_param(zfs_write_limit_max
, ulong
, 0444);
867 MODULE_PARM_DESC(zfs_write_limit_max
, "Max tgx write limit");
869 module_param(zfs_write_limit_inflated
, ulong
, 0444);
870 MODULE_PARM_DESC(zfs_write_limit_inflated
, "Inflated tgx write limit");
872 module_param(zfs_write_limit_override
, ulong
, 0444);
873 MODULE_PARM_DESC(zfs_write_limit_override
, "Override tgx write limit");