4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_onexit.h>
43 #include <sys/dsl_scan.h>
44 #include <sys/dsl_deadlist.h>
46 static char *dsl_reaper
= "the grim reaper";
48 static dsl_checkfunc_t dsl_dataset_destroy_begin_check
;
49 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync
;
50 static dsl_syncfunc_t dsl_dataset_set_reservation_sync
;
52 #define SWITCH64(x, y) \
54 uint64_t __tmp = (x); \
59 #define DS_REF_MAX (1ULL << 62)
61 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
63 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
67 * Figure out how much of this delta should be propogated to the dsl_dir
68 * layer. If there's a refreservation, that space has already been
69 * partially accounted for in our ancestors.
72 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
74 uint64_t old_bytes
, new_bytes
;
76 if (ds
->ds_reserved
== 0)
79 old_bytes
= MAX(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
80 new_bytes
= MAX(ds
->ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
82 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
83 return (new_bytes
- old_bytes
);
87 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
89 int used
, compressed
, uncompressed
;
92 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
93 compressed
= BP_GET_PSIZE(bp
);
94 uncompressed
= BP_GET_UCSIZE(bp
);
96 dprintf_bp(bp
, "ds=%p", ds
);
98 ASSERT(dmu_tx_is_syncing(tx
));
99 /* It could have been compressed away to nothing */
102 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
103 ASSERT3U(BP_GET_TYPE(bp
), <, DMU_OT_NUMTYPES
);
106 * Account for the meta-objset space in its placeholder
109 ASSERT3U(compressed
, ==, uncompressed
); /* it's all metadata */
110 dsl_dir_diduse_space(tx
->tx_pool
->dp_mos_dir
, DD_USED_HEAD
,
111 used
, compressed
, uncompressed
, tx
);
112 dsl_dir_dirty(tx
->tx_pool
->dp_mos_dir
, tx
);
115 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
117 mutex_enter(&ds
->ds_dir
->dd_lock
);
118 mutex_enter(&ds
->ds_lock
);
119 delta
= parent_delta(ds
, used
);
120 ds
->ds_phys
->ds_used_bytes
+= used
;
121 ds
->ds_phys
->ds_compressed_bytes
+= compressed
;
122 ds
->ds_phys
->ds_uncompressed_bytes
+= uncompressed
;
123 ds
->ds_phys
->ds_unique_bytes
+= used
;
124 mutex_exit(&ds
->ds_lock
);
125 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
, delta
,
126 compressed
, uncompressed
, tx
);
127 dsl_dir_transfer_space(ds
->ds_dir
, used
- delta
,
128 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
129 mutex_exit(&ds
->ds_dir
->dd_lock
);
133 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
136 int used
, compressed
, uncompressed
;
141 ASSERT(dmu_tx_is_syncing(tx
));
142 ASSERT(bp
->blk_birth
<= tx
->tx_txg
);
144 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
145 compressed
= BP_GET_PSIZE(bp
);
146 uncompressed
= BP_GET_UCSIZE(bp
);
151 * Account for the meta-objset space in its placeholder
154 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
156 dsl_dir_diduse_space(tx
->tx_pool
->dp_mos_dir
, DD_USED_HEAD
,
157 -used
, -compressed
, -uncompressed
, tx
);
158 dsl_dir_dirty(tx
->tx_pool
->dp_mos_dir
, tx
);
161 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
163 ASSERT(!dsl_dataset_is_snapshot(ds
));
164 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
166 if (bp
->blk_birth
> ds
->ds_phys
->ds_prev_snap_txg
) {
169 dprintf_bp(bp
, "freeing ds=%llu", ds
->ds_object
);
170 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
172 mutex_enter(&ds
->ds_dir
->dd_lock
);
173 mutex_enter(&ds
->ds_lock
);
174 ASSERT(ds
->ds_phys
->ds_unique_bytes
>= used
||
175 !DS_UNIQUE_IS_ACCURATE(ds
));
176 delta
= parent_delta(ds
, -used
);
177 ds
->ds_phys
->ds_unique_bytes
-= used
;
178 mutex_exit(&ds
->ds_lock
);
179 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
180 delta
, -compressed
, -uncompressed
, tx
);
181 dsl_dir_transfer_space(ds
->ds_dir
, -used
- delta
,
182 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
183 mutex_exit(&ds
->ds_dir
->dd_lock
);
185 dprintf_bp(bp
, "putting on dead list: %s", "");
188 * We are here as part of zio's write done callback,
189 * which means we're a zio interrupt thread. We can't
190 * call dsl_deadlist_insert() now because it may block
191 * waiting for I/O. Instead, put bp on the deferred
192 * queue and let dsl_pool_sync() finish the job.
194 bplist_append(&ds
->ds_pending_deadlist
, bp
);
196 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, tx
);
198 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
199 ds
->ds_phys
->ds_prev_snap_obj
);
200 ASSERT(ds
->ds_prev
->ds_phys
->ds_num_children
> 0);
201 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
202 if (ds
->ds_prev
->ds_phys
->ds_next_snap_obj
==
203 ds
->ds_object
&& bp
->blk_birth
>
204 ds
->ds_prev
->ds_phys
->ds_prev_snap_txg
) {
205 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
206 mutex_enter(&ds
->ds_prev
->ds_lock
);
207 ds
->ds_prev
->ds_phys
->ds_unique_bytes
+= used
;
208 mutex_exit(&ds
->ds_prev
->ds_lock
);
210 if (bp
->blk_birth
> ds
->ds_dir
->dd_origin_txg
) {
211 dsl_dir_transfer_space(ds
->ds_dir
, used
,
212 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
215 mutex_enter(&ds
->ds_lock
);
216 ASSERT3U(ds
->ds_phys
->ds_used_bytes
, >=, used
);
217 ds
->ds_phys
->ds_used_bytes
-= used
;
218 ASSERT3U(ds
->ds_phys
->ds_compressed_bytes
, >=, compressed
);
219 ds
->ds_phys
->ds_compressed_bytes
-= compressed
;
220 ASSERT3U(ds
->ds_phys
->ds_uncompressed_bytes
, >=, uncompressed
);
221 ds
->ds_phys
->ds_uncompressed_bytes
-= uncompressed
;
222 mutex_exit(&ds
->ds_lock
);
228 dsl_dataset_prev_snap_txg(dsl_dataset_t
*ds
)
230 uint64_t trysnap
= 0;
235 * The snapshot creation could fail, but that would cause an
236 * incorrect FALSE return, which would only result in an
237 * overestimation of the amount of space that an operation would
238 * consume, which is OK.
240 * There's also a small window where we could miss a pending
241 * snapshot, because we could set the sync task in the quiescing
242 * phase. So this should only be used as a guess.
244 if (ds
->ds_trysnap_txg
>
245 spa_last_synced_txg(ds
->ds_dir
->dd_pool
->dp_spa
))
246 trysnap
= ds
->ds_trysnap_txg
;
247 return (MAX(ds
->ds_phys
->ds_prev_snap_txg
, trysnap
));
251 dsl_dataset_block_freeable(dsl_dataset_t
*ds
, const blkptr_t
*bp
,
254 if (blk_birth
<= dsl_dataset_prev_snap_txg(ds
))
257 ddt_prefetch(dsl_dataset_get_spa(ds
), bp
);
264 dsl_dataset_evict(dmu_buf_t
*db
, void *dsv
)
266 dsl_dataset_t
*ds
= dsv
;
268 ASSERT(ds
->ds_owner
== NULL
|| DSL_DATASET_IS_DESTROYED(ds
));
270 unique_remove(ds
->ds_fsid_guid
);
272 if (ds
->ds_objset
!= NULL
)
273 dmu_objset_evict(ds
->ds_objset
);
276 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
280 bplist_destroy(&ds
->ds_pending_deadlist
);
282 dsl_deadlist_close(&ds
->ds_deadlist
);
284 ASSERT(ds
->ds_deadlist
.dl_dbuf
== NULL
);
285 ASSERT(!ds
->ds_deadlist
.dl_oldfmt
);
288 dsl_dir_close(ds
->ds_dir
, ds
);
290 ASSERT(!list_link_active(&ds
->ds_synced_link
));
292 mutex_destroy(&ds
->ds_lock
);
293 mutex_destroy(&ds
->ds_recvlock
);
294 mutex_destroy(&ds
->ds_opening_lock
);
295 rw_destroy(&ds
->ds_rwlock
);
296 cv_destroy(&ds
->ds_exclusive_cv
);
298 kmem_free(ds
, sizeof (dsl_dataset_t
));
302 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
304 dsl_dataset_phys_t
*headphys
;
307 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
308 objset_t
*mos
= dp
->dp_meta_objset
;
310 if (ds
->ds_snapname
[0])
312 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
315 err
= dmu_bonus_hold(mos
, ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
,
319 headphys
= headdbuf
->db_data
;
320 err
= zap_value_search(dp
->dp_meta_objset
,
321 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
);
322 dmu_buf_rele(headdbuf
, FTAG
);
327 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
329 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
330 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
334 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
339 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
340 value
, mt
, NULL
, 0, NULL
);
341 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
342 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
347 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, char *name
, dmu_tx_t
*tx
)
349 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
350 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
354 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
356 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
361 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
362 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
363 err
= zap_remove(mos
, snapobj
, name
, tx
);
368 dsl_dataset_get_ref(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
371 objset_t
*mos
= dp
->dp_meta_objset
;
375 dmu_object_info_t doi
;
377 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
378 dsl_pool_sync_context(dp
));
380 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
384 /* Make sure dsobj has the correct object type. */
385 dmu_object_info_from_db(dbuf
, &doi
);
386 if (doi
.doi_type
!= DMU_OT_DSL_DATASET
)
389 ds
= dmu_buf_get_user(dbuf
);
391 dsl_dataset_t
*winner
= NULL
;
393 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
395 ds
->ds_object
= dsobj
;
396 ds
->ds_phys
= dbuf
->db_data
;
397 list_link_init(&ds
->ds_synced_link
);
399 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
400 mutex_init(&ds
->ds_recvlock
, NULL
, MUTEX_DEFAULT
, NULL
);
401 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
402 rw_init(&ds
->ds_rwlock
, NULL
, RW_DEFAULT
, NULL
);
403 cv_init(&ds
->ds_exclusive_cv
, NULL
, CV_DEFAULT
, NULL
);
405 bplist_create(&ds
->ds_pending_deadlist
);
406 dsl_deadlist_open(&ds
->ds_deadlist
,
407 mos
, ds
->ds_phys
->ds_deadlist_obj
);
410 err
= dsl_dir_open_obj(dp
,
411 ds
->ds_phys
->ds_dir_obj
, NULL
, ds
, &ds
->ds_dir
);
414 mutex_destroy(&ds
->ds_lock
);
415 mutex_destroy(&ds
->ds_recvlock
);
416 mutex_destroy(&ds
->ds_opening_lock
);
417 rw_destroy(&ds
->ds_rwlock
);
418 cv_destroy(&ds
->ds_exclusive_cv
);
419 bplist_destroy(&ds
->ds_pending_deadlist
);
420 dsl_deadlist_close(&ds
->ds_deadlist
);
421 kmem_free(ds
, sizeof (dsl_dataset_t
));
422 dmu_buf_rele(dbuf
, tag
);
426 if (!dsl_dataset_is_snapshot(ds
)) {
427 ds
->ds_snapname
[0] = '\0';
428 if (ds
->ds_phys
->ds_prev_snap_obj
) {
429 err
= dsl_dataset_get_ref(dp
,
430 ds
->ds_phys
->ds_prev_snap_obj
,
434 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
435 err
= dsl_dataset_get_snapname(ds
);
436 if (err
== 0 && ds
->ds_phys
->ds_userrefs_obj
!= 0) {
438 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
439 ds
->ds_phys
->ds_userrefs_obj
,
444 if (err
== 0 && !dsl_dataset_is_snapshot(ds
)) {
446 * In sync context, we're called with either no lock
447 * or with the write lock. If we're not syncing,
448 * we're always called with the read lock held.
450 boolean_t need_lock
=
451 !RW_WRITE_HELD(&dp
->dp_config_rwlock
) &&
452 dsl_pool_sync_context(dp
);
455 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
457 err
= dsl_prop_get_ds(ds
,
458 "refreservation", sizeof (uint64_t), 1,
459 &ds
->ds_reserved
, NULL
);
461 err
= dsl_prop_get_ds(ds
,
462 "refquota", sizeof (uint64_t), 1,
463 &ds
->ds_quota
, NULL
);
467 rw_exit(&dp
->dp_config_rwlock
);
469 ds
->ds_reserved
= ds
->ds_quota
= 0;
473 winner
= dmu_buf_set_user_ie(dbuf
, ds
, &ds
->ds_phys
,
477 bplist_destroy(&ds
->ds_pending_deadlist
);
478 dsl_deadlist_close(&ds
->ds_deadlist
);
480 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
481 dsl_dir_close(ds
->ds_dir
, ds
);
482 mutex_destroy(&ds
->ds_lock
);
483 mutex_destroy(&ds
->ds_recvlock
);
484 mutex_destroy(&ds
->ds_opening_lock
);
485 rw_destroy(&ds
->ds_rwlock
);
486 cv_destroy(&ds
->ds_exclusive_cv
);
487 kmem_free(ds
, sizeof (dsl_dataset_t
));
489 dmu_buf_rele(dbuf
, tag
);
495 unique_insert(ds
->ds_phys
->ds_fsid_guid
);
498 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
499 ASSERT3P(ds
->ds_phys
, ==, dbuf
->db_data
);
500 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
!= 0 ||
501 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
502 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
503 mutex_enter(&ds
->ds_lock
);
504 if (!dsl_pool_sync_context(dp
) && DSL_DATASET_IS_DESTROYED(ds
)) {
505 mutex_exit(&ds
->ds_lock
);
506 dmu_buf_rele(ds
->ds_dbuf
, tag
);
509 mutex_exit(&ds
->ds_lock
);
515 dsl_dataset_hold_ref(dsl_dataset_t
*ds
, void *tag
)
517 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
520 * In syncing context we don't want the rwlock lock: there
521 * may be an existing writer waiting for sync phase to
522 * finish. We don't need to worry about such writers, since
523 * sync phase is single-threaded, so the writer can't be
524 * doing anything while we are active.
526 if (dsl_pool_sync_context(dp
)) {
527 ASSERT(!DSL_DATASET_IS_DESTROYED(ds
));
532 * Normal users will hold the ds_rwlock as a READER until they
533 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
534 * drop their READER lock after they set the ds_owner field.
536 * If the dataset is being destroyed, the destroy thread will
537 * obtain a WRITER lock for exclusive access after it's done its
538 * open-context work and then change the ds_owner to
539 * dsl_reaper once destruction is assured. So threads
540 * may block here temporarily, until the "destructability" of
541 * the dataset is determined.
543 ASSERT(!RW_WRITE_HELD(&dp
->dp_config_rwlock
));
544 mutex_enter(&ds
->ds_lock
);
545 while (!rw_tryenter(&ds
->ds_rwlock
, RW_READER
)) {
546 rw_exit(&dp
->dp_config_rwlock
);
547 cv_wait(&ds
->ds_exclusive_cv
, &ds
->ds_lock
);
548 if (DSL_DATASET_IS_DESTROYED(ds
)) {
549 mutex_exit(&ds
->ds_lock
);
550 dsl_dataset_drop_ref(ds
, tag
);
551 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
555 * The dp_config_rwlock lives above the ds_lock. And
556 * we need to check DSL_DATASET_IS_DESTROYED() while
557 * holding the ds_lock, so we have to drop and reacquire
560 mutex_exit(&ds
->ds_lock
);
561 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
562 mutex_enter(&ds
->ds_lock
);
564 mutex_exit(&ds
->ds_lock
);
569 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
572 int err
= dsl_dataset_get_ref(dp
, dsobj
, tag
, dsp
);
576 return (dsl_dataset_hold_ref(*dsp
, tag
));
580 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
, boolean_t inconsistentok
,
581 void *tag
, dsl_dataset_t
**dsp
)
583 int err
= dsl_dataset_hold_obj(dp
, dsobj
, tag
, dsp
);
586 if (!dsl_dataset_tryown(*dsp
, inconsistentok
, tag
)) {
587 dsl_dataset_rele(*dsp
, tag
);
595 dsl_dataset_hold(const char *name
, void *tag
, dsl_dataset_t
**dsp
)
599 const char *snapname
;
603 err
= dsl_dir_open_spa(NULL
, name
, FTAG
, &dd
, &snapname
);
608 obj
= dd
->dd_phys
->dd_head_dataset_obj
;
609 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
611 err
= dsl_dataset_get_ref(dp
, obj
, tag
, dsp
);
617 err
= dsl_dataset_hold_ref(*dsp
, tag
);
619 /* we may be looking for a snapshot */
620 if (err
== 0 && snapname
!= NULL
) {
621 dsl_dataset_t
*ds
= NULL
;
623 if (*snapname
++ != '@') {
624 dsl_dataset_rele(*dsp
, tag
);
629 dprintf("looking for snapshot '%s'\n", snapname
);
630 err
= dsl_dataset_snap_lookup(*dsp
, snapname
, &obj
);
632 err
= dsl_dataset_get_ref(dp
, obj
, tag
, &ds
);
633 dsl_dataset_rele(*dsp
, tag
);
635 ASSERT3U((err
== 0), ==, (ds
!= NULL
));
638 mutex_enter(&ds
->ds_lock
);
639 if (ds
->ds_snapname
[0] == 0)
640 (void) strlcpy(ds
->ds_snapname
, snapname
,
641 sizeof (ds
->ds_snapname
));
642 mutex_exit(&ds
->ds_lock
);
643 err
= dsl_dataset_hold_ref(ds
, tag
);
644 *dsp
= err
? NULL
: ds
;
648 rw_exit(&dp
->dp_config_rwlock
);
649 dsl_dir_close(dd
, FTAG
);
654 dsl_dataset_own(const char *name
, boolean_t inconsistentok
,
655 void *tag
, dsl_dataset_t
**dsp
)
657 int err
= dsl_dataset_hold(name
, tag
, dsp
);
660 if (!dsl_dataset_tryown(*dsp
, inconsistentok
, tag
)) {
661 dsl_dataset_rele(*dsp
, tag
);
668 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
671 (void) strcpy(name
, "mos");
673 dsl_dir_name(ds
->ds_dir
, name
);
674 VERIFY(0 == dsl_dataset_get_snapname(ds
));
675 if (ds
->ds_snapname
[0]) {
676 (void) strcat(name
, "@");
678 * We use a "recursive" mutex so that we
679 * can call dprintf_ds() with ds_lock held.
681 if (!MUTEX_HELD(&ds
->ds_lock
)) {
682 mutex_enter(&ds
->ds_lock
);
683 (void) strcat(name
, ds
->ds_snapname
);
684 mutex_exit(&ds
->ds_lock
);
686 (void) strcat(name
, ds
->ds_snapname
);
693 dsl_dataset_namelen(dsl_dataset_t
*ds
)
698 result
= 3; /* "mos" */
700 result
= dsl_dir_namelen(ds
->ds_dir
);
701 VERIFY(0 == dsl_dataset_get_snapname(ds
));
702 if (ds
->ds_snapname
[0]) {
703 ++result
; /* adding one for the @-sign */
704 if (!MUTEX_HELD(&ds
->ds_lock
)) {
705 mutex_enter(&ds
->ds_lock
);
706 result
+= strlen(ds
->ds_snapname
);
707 mutex_exit(&ds
->ds_lock
);
709 result
+= strlen(ds
->ds_snapname
);
718 dsl_dataset_drop_ref(dsl_dataset_t
*ds
, void *tag
)
720 dmu_buf_rele(ds
->ds_dbuf
, tag
);
724 dsl_dataset_rele(dsl_dataset_t
*ds
, void *tag
)
726 if (!dsl_pool_sync_context(ds
->ds_dir
->dd_pool
)) {
727 rw_exit(&ds
->ds_rwlock
);
729 dsl_dataset_drop_ref(ds
, tag
);
733 dsl_dataset_disown(dsl_dataset_t
*ds
, void *tag
)
735 ASSERT((ds
->ds_owner
== tag
&& ds
->ds_dbuf
) ||
736 (DSL_DATASET_IS_DESTROYED(ds
) && ds
->ds_dbuf
== NULL
));
738 mutex_enter(&ds
->ds_lock
);
740 if (RW_WRITE_HELD(&ds
->ds_rwlock
)) {
741 rw_exit(&ds
->ds_rwlock
);
742 cv_broadcast(&ds
->ds_exclusive_cv
);
744 mutex_exit(&ds
->ds_lock
);
746 dsl_dataset_drop_ref(ds
, tag
);
748 dsl_dataset_evict(NULL
, ds
);
752 dsl_dataset_tryown(dsl_dataset_t
*ds
, boolean_t inconsistentok
, void *tag
)
754 boolean_t gotit
= FALSE
;
756 mutex_enter(&ds
->ds_lock
);
757 if (ds
->ds_owner
== NULL
&&
758 (!DS_IS_INCONSISTENT(ds
) || inconsistentok
)) {
760 if (!dsl_pool_sync_context(ds
->ds_dir
->dd_pool
))
761 rw_exit(&ds
->ds_rwlock
);
764 mutex_exit(&ds
->ds_lock
);
769 dsl_dataset_make_exclusive(dsl_dataset_t
*ds
, void *owner
)
771 ASSERT3P(owner
, ==, ds
->ds_owner
);
772 if (!RW_WRITE_HELD(&ds
->ds_rwlock
))
773 rw_enter(&ds
->ds_rwlock
, RW_WRITER
);
777 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
778 uint64_t flags
, dmu_tx_t
*tx
)
780 dsl_pool_t
*dp
= dd
->dd_pool
;
782 dsl_dataset_phys_t
*dsphys
;
784 objset_t
*mos
= dp
->dp_meta_objset
;
787 origin
= dp
->dp_origin_snap
;
789 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
790 ASSERT(origin
== NULL
|| origin
->ds_phys
->ds_num_children
> 0);
791 ASSERT(dmu_tx_is_syncing(tx
));
792 ASSERT(dd
->dd_phys
->dd_head_dataset_obj
== 0);
794 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
795 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
796 VERIFY(0 == dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
797 dmu_buf_will_dirty(dbuf
, tx
);
798 dsphys
= dbuf
->db_data
;
799 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
800 dsphys
->ds_dir_obj
= dd
->dd_object
;
801 dsphys
->ds_flags
= flags
;
802 dsphys
->ds_fsid_guid
= unique_create();
803 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
804 sizeof (dsphys
->ds_guid
));
805 dsphys
->ds_snapnames_zapobj
=
806 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
808 dsphys
->ds_creation_time
= gethrestime_sec();
809 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
811 if (origin
== NULL
) {
812 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
816 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
817 dsphys
->ds_prev_snap_txg
=
818 origin
->ds_phys
->ds_creation_txg
;
819 dsphys
->ds_used_bytes
=
820 origin
->ds_phys
->ds_used_bytes
;
821 dsphys
->ds_compressed_bytes
=
822 origin
->ds_phys
->ds_compressed_bytes
;
823 dsphys
->ds_uncompressed_bytes
=
824 origin
->ds_phys
->ds_uncompressed_bytes
;
825 dsphys
->ds_bp
= origin
->ds_phys
->ds_bp
;
826 dsphys
->ds_flags
|= origin
->ds_phys
->ds_flags
;
828 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
829 origin
->ds_phys
->ds_num_children
++;
831 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
832 origin
->ds_dir
->dd_phys
->dd_head_dataset_obj
, FTAG
, &ohds
));
833 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
834 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
835 dsl_dataset_rele(ohds
, FTAG
);
837 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
838 if (origin
->ds_phys
->ds_next_clones_obj
== 0) {
839 origin
->ds_phys
->ds_next_clones_obj
=
841 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
843 VERIFY(0 == zap_add_int(mos
,
844 origin
->ds_phys
->ds_next_clones_obj
,
848 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
849 dd
->dd_phys
->dd_origin_obj
= origin
->ds_object
;
850 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
851 if (origin
->ds_dir
->dd_phys
->dd_clones
== 0) {
852 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
853 origin
->ds_dir
->dd_phys
->dd_clones
=
855 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
857 VERIFY3U(0, ==, zap_add_int(mos
,
858 origin
->ds_dir
->dd_phys
->dd_clones
, dsobj
, tx
));
862 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
863 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
865 dmu_buf_rele(dbuf
, FTAG
);
867 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
868 dd
->dd_phys
->dd_head_dataset_obj
= dsobj
;
874 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
875 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
, dmu_tx_t
*tx
)
877 dsl_pool_t
*dp
= pdd
->dd_pool
;
878 uint64_t dsobj
, ddobj
;
881 ASSERT(lastname
[0] != '@');
883 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
884 VERIFY(0 == dsl_dir_open_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
886 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
, flags
, tx
);
888 dsl_deleg_set_create_perms(dd
, tx
, cr
);
890 dsl_dir_close(dd
, FTAG
);
893 * If we are creating a clone, make sure we zero out any stale
894 * data from the origin snapshots zil header.
896 if (origin
!= NULL
) {
900 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
901 VERIFY3U(0, ==, dmu_objset_from_ds(ds
, &os
));
902 bzero(&os
->os_zil_header
, sizeof (os
->os_zil_header
));
903 dsl_dataset_dirty(ds
, tx
);
904 dsl_dataset_rele(ds
, FTAG
);
911 dsl_sync_task_group_t
*dstg
;
918 dsl_snapshot_destroy_one(const char *name
, void *arg
)
920 struct destroyarg
*da
= arg
;
925 dsname
= kmem_asprintf("%s@%s", name
, da
->snapname
);
926 err
= dsl_dataset_own(dsname
, B_TRUE
, da
->dstg
, &ds
);
929 struct dsl_ds_destroyarg
*dsda
;
931 dsl_dataset_make_exclusive(ds
, da
->dstg
);
932 dsda
= kmem_zalloc(sizeof (struct dsl_ds_destroyarg
), KM_SLEEP
);
934 dsda
->defer
= da
->defer
;
935 dsl_sync_task_create(da
->dstg
, dsl_dataset_destroy_check
,
936 dsl_dataset_destroy_sync
, dsda
, da
->dstg
, 0);
937 } else if (err
== ENOENT
) {
940 (void) strcpy(da
->failed
, name
);
946 * Destroy 'snapname' in all descendants of 'fsname'.
948 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
950 dsl_snapshots_destroy(char *fsname
, char *snapname
, boolean_t defer
)
953 struct destroyarg da
;
954 dsl_sync_task_t
*dst
;
957 err
= spa_open(fsname
, &spa
, FTAG
);
960 da
.dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
961 da
.snapname
= snapname
;
965 err
= dmu_objset_find(fsname
,
966 dsl_snapshot_destroy_one
, &da
, DS_FIND_CHILDREN
);
969 err
= dsl_sync_task_group_wait(da
.dstg
);
971 for (dst
= list_head(&da
.dstg
->dstg_tasks
); dst
;
972 dst
= list_next(&da
.dstg
->dstg_tasks
, dst
)) {
973 struct dsl_ds_destroyarg
*dsda
= dst
->dst_arg1
;
974 dsl_dataset_t
*ds
= dsda
->ds
;
977 * Return the file system name that triggered the error
980 dsl_dataset_name(ds
, fsname
);
981 *strchr(fsname
, '@') = '\0';
983 ASSERT3P(dsda
->rm_origin
, ==, NULL
);
984 dsl_dataset_disown(ds
, da
.dstg
);
985 kmem_free(dsda
, sizeof (struct dsl_ds_destroyarg
));
988 dsl_sync_task_group_destroy(da
.dstg
);
989 spa_close(spa
, FTAG
);
994 dsl_dataset_might_destroy_origin(dsl_dataset_t
*ds
)
996 boolean_t might_destroy
= B_FALSE
;
998 mutex_enter(&ds
->ds_lock
);
999 if (ds
->ds_phys
->ds_num_children
== 2 && ds
->ds_userrefs
== 0 &&
1000 DS_IS_DEFER_DESTROY(ds
))
1001 might_destroy
= B_TRUE
;
1002 mutex_exit(&ds
->ds_lock
);
1004 return (might_destroy
);
1008 * If we're removing a clone, and these three conditions are true:
1009 * 1) the clone's origin has no other children
1010 * 2) the clone's origin has no user references
1011 * 3) the clone's origin has been marked for deferred destruction
1012 * Then, prepare to remove the origin as part of this sync task group.
1015 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg
*dsda
, void *tag
)
1017 dsl_dataset_t
*ds
= dsda
->ds
;
1018 dsl_dataset_t
*origin
= ds
->ds_prev
;
1020 if (dsl_dataset_might_destroy_origin(origin
)) {
1025 namelen
= dsl_dataset_namelen(origin
) + 1;
1026 name
= kmem_alloc(namelen
, KM_SLEEP
);
1027 dsl_dataset_name(origin
, name
);
1029 error
= zfs_unmount_snap(name
, NULL
);
1031 kmem_free(name
, namelen
);
1035 error
= dsl_dataset_own(name
, B_TRUE
, tag
, &origin
);
1036 kmem_free(name
, namelen
);
1039 dsda
->rm_origin
= origin
;
1040 dsl_dataset_make_exclusive(origin
, tag
);
1047 * ds must be opened as OWNER. On return (whether successful or not),
1048 * ds will be closed and caller can no longer dereference it.
1051 dsl_dataset_destroy(dsl_dataset_t
*ds
, void *tag
, boolean_t defer
)
1054 dsl_sync_task_group_t
*dstg
;
1058 struct dsl_ds_destroyarg dsda
= { 0 };
1059 dsl_dataset_t
*dummy_ds
;
1063 if (dsl_dataset_is_snapshot(ds
)) {
1064 /* Destroying a snapshot is simpler */
1065 dsl_dataset_make_exclusive(ds
, tag
);
1068 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1069 dsl_dataset_destroy_check
, dsl_dataset_destroy_sync
,
1071 ASSERT3P(dsda
.rm_origin
, ==, NULL
);
1079 dummy_ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
1080 dummy_ds
->ds_dir
= dd
;
1081 dummy_ds
->ds_object
= ds
->ds_object
;
1084 * Check for errors and mark this ds as inconsistent, in
1085 * case we crash while freeing the objects.
1087 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dataset_destroy_begin_check
,
1088 dsl_dataset_destroy_begin_sync
, ds
, NULL
, 0);
1092 err
= dmu_objset_from_ds(ds
, &os
);
1097 * remove the objects in open context, so that we won't
1098 * have too much to do in syncing context.
1100 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
,
1101 ds
->ds_phys
->ds_prev_snap_txg
)) {
1103 * Ignore errors, if there is not enough disk space
1104 * we will deal with it in dsl_dataset_destroy_sync().
1106 (void) dmu_free_object(os
, obj
);
1112 * Only the ZIL knows how to free log blocks.
1114 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
1117 * Sync out all in-flight IO.
1119 txg_wait_synced(dd
->dd_pool
, 0);
1122 * If we managed to free all the objects in open
1123 * context, the user space accounting should be zero.
1125 if (ds
->ds_phys
->ds_bp
.blk_fill
== 0 &&
1126 dmu_objset_userused_enabled(os
)) {
1127 ASSERTV(uint64_t count
);
1128 ASSERT(zap_count(os
, DMU_USERUSED_OBJECT
, &count
) != 0 ||
1130 ASSERT(zap_count(os
, DMU_GROUPUSED_OBJECT
, &count
) != 0 ||
1134 rw_enter(&dd
->dd_pool
->dp_config_rwlock
, RW_READER
);
1135 err
= dsl_dir_open_obj(dd
->dd_pool
, dd
->dd_object
, NULL
, FTAG
, &dd
);
1136 rw_exit(&dd
->dd_pool
->dp_config_rwlock
);
1142 * Blow away the dsl_dir + head dataset.
1144 dsl_dataset_make_exclusive(ds
, tag
);
1146 * If we're removing a clone, we might also need to remove its
1150 dsda
.need_prep
= B_FALSE
;
1151 if (dsl_dir_is_clone(dd
)) {
1152 err
= dsl_dataset_origin_rm_prep(&dsda
, tag
);
1154 dsl_dir_close(dd
, FTAG
);
1159 dstg
= dsl_sync_task_group_create(ds
->ds_dir
->dd_pool
);
1160 dsl_sync_task_create(dstg
, dsl_dataset_destroy_check
,
1161 dsl_dataset_destroy_sync
, &dsda
, tag
, 0);
1162 dsl_sync_task_create(dstg
, dsl_dir_destroy_check
,
1163 dsl_dir_destroy_sync
, dummy_ds
, FTAG
, 0);
1164 err
= dsl_sync_task_group_wait(dstg
);
1165 dsl_sync_task_group_destroy(dstg
);
1168 * We could be racing against 'zfs release' or 'zfs destroy -d'
1169 * on the origin snap, in which case we can get EBUSY if we
1170 * needed to destroy the origin snap but were not ready to
1173 if (dsda
.need_prep
) {
1174 ASSERT(err
== EBUSY
);
1175 ASSERT(dsl_dir_is_clone(dd
));
1176 ASSERT(dsda
.rm_origin
== NULL
);
1178 } while (dsda
.need_prep
);
1180 if (dsda
.rm_origin
!= NULL
)
1181 dsl_dataset_disown(dsda
.rm_origin
, tag
);
1183 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1185 dsl_dir_close(dd
, FTAG
);
1188 kmem_free(dummy_ds
, sizeof (dsl_dataset_t
));
1190 dsl_dataset_disown(ds
, tag
);
1195 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
1197 return (&ds
->ds_phys
->ds_bp
);
1201 dsl_dataset_set_blkptr(dsl_dataset_t
*ds
, blkptr_t
*bp
, dmu_tx_t
*tx
)
1203 ASSERT(dmu_tx_is_syncing(tx
));
1204 /* If it's the meta-objset, set dp_meta_rootbp */
1206 tx
->tx_pool
->dp_meta_rootbp
= *bp
;
1208 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1209 ds
->ds_phys
->ds_bp
= *bp
;
1214 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
1216 return (ds
->ds_dir
->dd_pool
->dp_spa
);
1220 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1224 if (ds
== NULL
) /* this is the meta-objset */
1227 ASSERT(ds
->ds_objset
!= NULL
);
1229 if (ds
->ds_phys
->ds_next_snap_obj
!= 0)
1230 panic("dirtying snapshot!");
1232 dp
= ds
->ds_dir
->dd_pool
;
1234 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
) == 0) {
1235 /* up the hold count until we can be written out */
1236 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1241 * The unique space in the head dataset can be calculated by subtracting
1242 * the space used in the most recent snapshot, that is still being used
1243 * in this file system, from the space currently in use. To figure out
1244 * the space in the most recent snapshot still in use, we need to take
1245 * the total space used in the snapshot and subtract out the space that
1246 * has been freed up since the snapshot was taken.
1249 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
1252 uint64_t dlused
, dlcomp
, dluncomp
;
1254 ASSERT(!dsl_dataset_is_snapshot(ds
));
1256 if (ds
->ds_phys
->ds_prev_snap_obj
!= 0)
1257 mrs_used
= ds
->ds_prev
->ds_phys
->ds_used_bytes
;
1261 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
1263 ASSERT3U(dlused
, <=, mrs_used
);
1264 ds
->ds_phys
->ds_unique_bytes
=
1265 ds
->ds_phys
->ds_used_bytes
- (mrs_used
- dlused
);
1267 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
1268 SPA_VERSION_UNIQUE_ACCURATE
)
1269 ds
->ds_phys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1279 kill_blkptr(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
1280 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
1282 struct killarg
*ka
= arg
;
1283 dmu_tx_t
*tx
= ka
->tx
;
1288 if (zb
->zb_level
== ZB_ZIL_LEVEL
) {
1289 ASSERT(zilog
!= NULL
);
1291 * It's a block in the intent log. It has no
1292 * accounting, so just free it.
1294 dsl_free(ka
->tx
->tx_pool
, ka
->tx
->tx_txg
, bp
);
1296 ASSERT(zilog
== NULL
);
1297 ASSERT3U(bp
->blk_birth
, >, ka
->ds
->ds_phys
->ds_prev_snap_txg
);
1298 (void) dsl_dataset_block_kill(ka
->ds
, bp
, tx
, B_FALSE
);
1306 dsl_dataset_destroy_begin_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1308 dsl_dataset_t
*ds
= arg1
;
1309 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1314 * Can't delete a head dataset if there are snapshots of it.
1315 * (Except if the only snapshots are from the branch we cloned
1318 if (ds
->ds_prev
!= NULL
&&
1319 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
)
1323 * This is really a dsl_dir thing, but check it here so that
1324 * we'll be less likely to leave this dataset inconsistent &
1327 err
= zap_count(mos
, ds
->ds_dir
->dd_phys
->dd_child_dir_zapobj
, &count
);
1338 dsl_dataset_destroy_begin_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1340 dsl_dataset_t
*ds
= arg1
;
1341 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1343 /* Mark it as inconsistent on-disk, in case we crash */
1344 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1345 ds
->ds_phys
->ds_flags
|= DS_FLAG_INCONSISTENT
;
1347 spa_history_log_internal(LOG_DS_DESTROY_BEGIN
, dp
->dp_spa
, tx
,
1348 "dataset = %llu", ds
->ds_object
);
1352 dsl_dataset_origin_check(struct dsl_ds_destroyarg
*dsda
, void *tag
,
1355 dsl_dataset_t
*ds
= dsda
->ds
;
1356 dsl_dataset_t
*ds_prev
= ds
->ds_prev
;
1358 if (dsl_dataset_might_destroy_origin(ds_prev
)) {
1359 struct dsl_ds_destroyarg ndsda
= {0};
1362 * If we're not prepared to remove the origin, don't remove
1365 if (dsda
->rm_origin
== NULL
) {
1366 dsda
->need_prep
= B_TRUE
;
1371 ndsda
.is_origin_rm
= B_TRUE
;
1372 return (dsl_dataset_destroy_check(&ndsda
, tag
, tx
));
1376 * If we're not going to remove the origin after all,
1377 * undo the open context setup.
1379 if (dsda
->rm_origin
!= NULL
) {
1380 dsl_dataset_disown(dsda
->rm_origin
, tag
);
1381 dsda
->rm_origin
= NULL
;
1388 * If you add new checks here, you may need to add
1389 * additional checks to the "temporary" case in
1390 * snapshot_check() in dmu_objset.c.
1394 dsl_dataset_destroy_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1396 struct dsl_ds_destroyarg
*dsda
= arg1
;
1397 dsl_dataset_t
*ds
= dsda
->ds
;
1399 /* we have an owner hold, so noone else can destroy us */
1400 ASSERT(!DSL_DATASET_IS_DESTROYED(ds
));
1403 * Only allow deferred destroy on pools that support it.
1404 * NOTE: deferred destroy is only supported on snapshots.
1407 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
1408 SPA_VERSION_USERREFS
)
1410 ASSERT(dsl_dataset_is_snapshot(ds
));
1415 * Can't delete a head dataset if there are snapshots of it.
1416 * (Except if the only snapshots are from the branch we cloned
1419 if (ds
->ds_prev
!= NULL
&&
1420 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
)
1424 * If we made changes this txg, traverse_dsl_dataset won't find
1427 if (ds
->ds_phys
->ds_bp
.blk_birth
>= tx
->tx_txg
)
1430 if (dsl_dataset_is_snapshot(ds
)) {
1432 * If this snapshot has an elevated user reference count,
1433 * we can't destroy it yet.
1435 if (ds
->ds_userrefs
> 0 && !dsda
->releasing
)
1438 mutex_enter(&ds
->ds_lock
);
1440 * Can't delete a branch point. However, if we're destroying
1441 * a clone and removing its origin due to it having a user
1442 * hold count of 0 and having been marked for deferred destroy,
1443 * it's OK for the origin to have a single clone.
1445 if (ds
->ds_phys
->ds_num_children
>
1446 (dsda
->is_origin_rm
? 2 : 1)) {
1447 mutex_exit(&ds
->ds_lock
);
1450 mutex_exit(&ds
->ds_lock
);
1451 } else if (dsl_dir_is_clone(ds
->ds_dir
)) {
1452 return (dsl_dataset_origin_check(dsda
, arg2
, tx
));
1455 /* XXX we should do some i/o error checking... */
1467 dsl_dataset_refs_gone(dmu_buf_t
*db
, void *argv
)
1469 struct refsarg
*arg
= argv
;
1471 mutex_enter(&arg
->lock
);
1473 cv_signal(&arg
->cv
);
1474 mutex_exit(&arg
->lock
);
1478 dsl_dataset_drain_refs(dsl_dataset_t
*ds
, void *tag
)
1482 mutex_init(&arg
.lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1483 cv_init(&arg
.cv
, NULL
, CV_DEFAULT
, NULL
);
1485 (void) dmu_buf_update_user(ds
->ds_dbuf
, ds
, &arg
, &ds
->ds_phys
,
1486 dsl_dataset_refs_gone
);
1487 dmu_buf_rele(ds
->ds_dbuf
, tag
);
1488 mutex_enter(&arg
.lock
);
1490 cv_wait(&arg
.cv
, &arg
.lock
);
1492 mutex_exit(&arg
.lock
);
1495 mutex_destroy(&arg
.lock
);
1496 cv_destroy(&arg
.cv
);
1500 remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
, dmu_tx_t
*tx
)
1502 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1504 ASSERTV(uint64_t count
);
1506 ASSERT(ds
->ds_phys
->ds_num_children
>= 2);
1507 err
= zap_remove_int(mos
, ds
->ds_phys
->ds_next_clones_obj
, obj
, tx
);
1509 * The err should not be ENOENT, but a bug in a previous version
1510 * of the code could cause upgrade_clones_cb() to not set
1511 * ds_next_snap_obj when it should, leading to a missing entry.
1512 * If we knew that the pool was created after
1513 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1514 * ENOENT. However, at least we can check that we don't have
1515 * too many entries in the next_clones_obj even after failing to
1518 if (err
!= ENOENT
) {
1519 VERIFY3U(err
, ==, 0);
1521 ASSERT3U(0, ==, zap_count(mos
, ds
->ds_phys
->ds_next_clones_obj
,
1523 ASSERT3U(count
, <=, ds
->ds_phys
->ds_num_children
- 2);
1527 dsl_dataset_remove_clones_key(dsl_dataset_t
*ds
, uint64_t mintxg
, dmu_tx_t
*tx
)
1529 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1534 * If it is the old version, dd_clones doesn't exist so we can't
1535 * find the clones, but deadlist_remove_key() is a no-op so it
1538 if (ds
->ds_dir
->dd_phys
->dd_clones
== 0)
1541 for (zap_cursor_init(&zc
, mos
, ds
->ds_dir
->dd_phys
->dd_clones
);
1542 zap_cursor_retrieve(&zc
, &za
) == 0;
1543 zap_cursor_advance(&zc
)) {
1544 dsl_dataset_t
*clone
;
1546 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
1547 za
.za_first_integer
, FTAG
, &clone
));
1548 if (clone
->ds_dir
->dd_origin_txg
> mintxg
) {
1549 dsl_deadlist_remove_key(&clone
->ds_deadlist
,
1551 dsl_dataset_remove_clones_key(clone
, mintxg
, tx
);
1553 dsl_dataset_rele(clone
, FTAG
);
1555 zap_cursor_fini(&zc
);
1558 struct process_old_arg
{
1560 dsl_dataset_t
*ds_prev
;
1561 boolean_t after_branch_point
;
1563 uint64_t used
, comp
, uncomp
;
1567 process_old_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1569 struct process_old_arg
*poa
= arg
;
1570 dsl_pool_t
*dp
= poa
->ds
->ds_dir
->dd_pool
;
1572 if (bp
->blk_birth
<= poa
->ds
->ds_phys
->ds_prev_snap_txg
) {
1573 dsl_deadlist_insert(&poa
->ds
->ds_deadlist
, bp
, tx
);
1574 if (poa
->ds_prev
&& !poa
->after_branch_point
&&
1576 poa
->ds_prev
->ds_phys
->ds_prev_snap_txg
) {
1577 poa
->ds_prev
->ds_phys
->ds_unique_bytes
+=
1578 bp_get_dsize_sync(dp
->dp_spa
, bp
);
1581 poa
->used
+= bp_get_dsize_sync(dp
->dp_spa
, bp
);
1582 poa
->comp
+= BP_GET_PSIZE(bp
);
1583 poa
->uncomp
+= BP_GET_UCSIZE(bp
);
1584 dsl_free_sync(poa
->pio
, dp
, tx
->tx_txg
, bp
);
1590 process_old_deadlist(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_prev
,
1591 dsl_dataset_t
*ds_next
, boolean_t after_branch_point
, dmu_tx_t
*tx
)
1593 struct process_old_arg poa
= { 0 };
1594 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1595 objset_t
*mos
= dp
->dp_meta_objset
;
1597 ASSERT(ds
->ds_deadlist
.dl_oldfmt
);
1598 ASSERT(ds_next
->ds_deadlist
.dl_oldfmt
);
1601 poa
.ds_prev
= ds_prev
;
1602 poa
.after_branch_point
= after_branch_point
;
1603 poa
.pio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1604 VERIFY3U(0, ==, bpobj_iterate(&ds_next
->ds_deadlist
.dl_bpobj
,
1605 process_old_cb
, &poa
, tx
));
1606 VERIFY3U(zio_wait(poa
.pio
), ==, 0);
1607 ASSERT3U(poa
.used
, ==, ds
->ds_phys
->ds_unique_bytes
);
1609 /* change snapused */
1610 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
1611 -poa
.used
, -poa
.comp
, -poa
.uncomp
, tx
);
1613 /* swap next's deadlist to our deadlist */
1614 dsl_deadlist_close(&ds
->ds_deadlist
);
1615 dsl_deadlist_close(&ds_next
->ds_deadlist
);
1616 SWITCH64(ds_next
->ds_phys
->ds_deadlist_obj
,
1617 ds
->ds_phys
->ds_deadlist_obj
);
1618 dsl_deadlist_open(&ds
->ds_deadlist
, mos
, ds
->ds_phys
->ds_deadlist_obj
);
1619 dsl_deadlist_open(&ds_next
->ds_deadlist
, mos
,
1620 ds_next
->ds_phys
->ds_deadlist_obj
);
1624 dsl_dataset_destroy_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
1626 struct dsl_ds_destroyarg
*dsda
= arg1
;
1627 dsl_dataset_t
*ds
= dsda
->ds
;
1629 int after_branch_point
= FALSE
;
1630 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1631 objset_t
*mos
= dp
->dp_meta_objset
;
1632 dsl_dataset_t
*ds_prev
= NULL
;
1633 boolean_t wont_destroy
;
1636 wont_destroy
= (dsda
->defer
&&
1637 (ds
->ds_userrefs
> 0 || ds
->ds_phys
->ds_num_children
> 1));
1639 ASSERT(ds
->ds_owner
|| wont_destroy
);
1640 ASSERT(dsda
->defer
|| ds
->ds_phys
->ds_num_children
<= 1);
1641 ASSERT(ds
->ds_prev
== NULL
||
1642 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
);
1643 ASSERT3U(ds
->ds_phys
->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
1646 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
1647 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1648 ds
->ds_phys
->ds_flags
|= DS_FLAG_DEFER_DESTROY
;
1652 /* signal any waiters that this dataset is going away */
1653 mutex_enter(&ds
->ds_lock
);
1654 ds
->ds_owner
= dsl_reaper
;
1655 cv_broadcast(&ds
->ds_exclusive_cv
);
1656 mutex_exit(&ds
->ds_lock
);
1658 /* Remove our reservation */
1659 if (ds
->ds_reserved
!= 0) {
1660 dsl_prop_setarg_t psa
;
1663 dsl_prop_setarg_init_uint64(&psa
, "refreservation",
1664 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
1666 psa
.psa_effective_value
= 0; /* predict default value */
1668 dsl_dataset_set_reservation_sync(ds
, &psa
, tx
);
1669 ASSERT3U(ds
->ds_reserved
, ==, 0);
1672 ASSERT(RW_WRITE_HELD(&dp
->dp_config_rwlock
));
1674 dsl_scan_ds_destroyed(ds
, tx
);
1676 obj
= ds
->ds_object
;
1678 if (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
1680 ds_prev
= ds
->ds_prev
;
1682 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1683 ds
->ds_phys
->ds_prev_snap_obj
, FTAG
, &ds_prev
));
1685 after_branch_point
=
1686 (ds_prev
->ds_phys
->ds_next_snap_obj
!= obj
);
1688 dmu_buf_will_dirty(ds_prev
->ds_dbuf
, tx
);
1689 if (after_branch_point
&&
1690 ds_prev
->ds_phys
->ds_next_clones_obj
!= 0) {
1691 remove_from_next_clones(ds_prev
, obj
, tx
);
1692 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
1693 VERIFY(0 == zap_add_int(mos
,
1694 ds_prev
->ds_phys
->ds_next_clones_obj
,
1695 ds
->ds_phys
->ds_next_snap_obj
, tx
));
1698 if (after_branch_point
&&
1699 ds
->ds_phys
->ds_next_snap_obj
== 0) {
1700 /* This clone is toast. */
1701 ASSERT(ds_prev
->ds_phys
->ds_num_children
> 1);
1702 ds_prev
->ds_phys
->ds_num_children
--;
1705 * If the clone's origin has no other clones, no
1706 * user holds, and has been marked for deferred
1707 * deletion, then we should have done the necessary
1708 * destroy setup for it.
1710 if (ds_prev
->ds_phys
->ds_num_children
== 1 &&
1711 ds_prev
->ds_userrefs
== 0 &&
1712 DS_IS_DEFER_DESTROY(ds_prev
)) {
1713 ASSERT3P(dsda
->rm_origin
, !=, NULL
);
1715 ASSERT3P(dsda
->rm_origin
, ==, NULL
);
1717 } else if (!after_branch_point
) {
1718 ds_prev
->ds_phys
->ds_next_snap_obj
=
1719 ds
->ds_phys
->ds_next_snap_obj
;
1723 if (dsl_dataset_is_snapshot(ds
)) {
1724 dsl_dataset_t
*ds_next
;
1725 uint64_t old_unique
;
1726 uint64_t used
= 0, comp
= 0, uncomp
= 0;
1728 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1729 ds
->ds_phys
->ds_next_snap_obj
, FTAG
, &ds_next
));
1730 ASSERT3U(ds_next
->ds_phys
->ds_prev_snap_obj
, ==, obj
);
1732 old_unique
= ds_next
->ds_phys
->ds_unique_bytes
;
1734 dmu_buf_will_dirty(ds_next
->ds_dbuf
, tx
);
1735 ds_next
->ds_phys
->ds_prev_snap_obj
=
1736 ds
->ds_phys
->ds_prev_snap_obj
;
1737 ds_next
->ds_phys
->ds_prev_snap_txg
=
1738 ds
->ds_phys
->ds_prev_snap_txg
;
1739 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, ==,
1740 ds_prev
? ds_prev
->ds_phys
->ds_creation_txg
: 0);
1743 if (ds_next
->ds_deadlist
.dl_oldfmt
) {
1744 process_old_deadlist(ds
, ds_prev
, ds_next
,
1745 after_branch_point
, tx
);
1747 /* Adjust prev's unique space. */
1748 if (ds_prev
&& !after_branch_point
) {
1749 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
1750 ds_prev
->ds_phys
->ds_prev_snap_txg
,
1751 ds
->ds_phys
->ds_prev_snap_txg
,
1752 &used
, &comp
, &uncomp
);
1753 ds_prev
->ds_phys
->ds_unique_bytes
+= used
;
1756 /* Adjust snapused. */
1757 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
1758 ds
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
1759 &used
, &comp
, &uncomp
);
1760 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
1761 -used
, -comp
, -uncomp
, tx
);
1763 /* Move blocks to be freed to pool's free list. */
1764 dsl_deadlist_move_bpobj(&ds_next
->ds_deadlist
,
1765 &dp
->dp_free_bpobj
, ds
->ds_phys
->ds_prev_snap_txg
,
1767 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
1768 DD_USED_HEAD
, used
, comp
, uncomp
, tx
);
1769 dsl_dir_dirty(tx
->tx_pool
->dp_free_dir
, tx
);
1771 /* Merge our deadlist into next's and free it. */
1772 dsl_deadlist_merge(&ds_next
->ds_deadlist
,
1773 ds
->ds_phys
->ds_deadlist_obj
, tx
);
1775 dsl_deadlist_close(&ds
->ds_deadlist
);
1776 dsl_deadlist_free(mos
, ds
->ds_phys
->ds_deadlist_obj
, tx
);
1778 /* Collapse range in clone heads */
1779 dsl_dataset_remove_clones_key(ds
,
1780 ds
->ds_phys
->ds_creation_txg
, tx
);
1782 if (dsl_dataset_is_snapshot(ds_next
)) {
1783 dsl_dataset_t
*ds_nextnext
;
1787 * Update next's unique to include blocks which
1788 * were previously shared by only this snapshot
1789 * and it. Those blocks will be born after the
1790 * prev snap and before this snap, and will have
1791 * died after the next snap and before the one
1792 * after that (ie. be on the snap after next's
1795 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1796 ds_next
->ds_phys
->ds_next_snap_obj
,
1797 FTAG
, &ds_nextnext
));
1798 dsl_deadlist_space_range(&ds_nextnext
->ds_deadlist
,
1799 ds
->ds_phys
->ds_prev_snap_txg
,
1800 ds
->ds_phys
->ds_creation_txg
,
1801 &used
, &comp
, &uncomp
);
1802 ds_next
->ds_phys
->ds_unique_bytes
+= used
;
1803 dsl_dataset_rele(ds_nextnext
, FTAG
);
1804 ASSERT3P(ds_next
->ds_prev
, ==, NULL
);
1806 /* Collapse range in this head. */
1807 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
1808 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
,
1810 dsl_deadlist_remove_key(&hds
->ds_deadlist
,
1811 ds
->ds_phys
->ds_creation_txg
, tx
);
1812 dsl_dataset_rele(hds
, FTAG
);
1815 ASSERT3P(ds_next
->ds_prev
, ==, ds
);
1816 dsl_dataset_drop_ref(ds_next
->ds_prev
, ds_next
);
1817 ds_next
->ds_prev
= NULL
;
1819 VERIFY(0 == dsl_dataset_get_ref(dp
,
1820 ds
->ds_phys
->ds_prev_snap_obj
,
1821 ds_next
, &ds_next
->ds_prev
));
1824 dsl_dataset_recalc_head_uniq(ds_next
);
1827 * Reduce the amount of our unconsmed refreservation
1828 * being charged to our parent by the amount of
1829 * new unique data we have gained.
1831 if (old_unique
< ds_next
->ds_reserved
) {
1833 uint64_t new_unique
=
1834 ds_next
->ds_phys
->ds_unique_bytes
;
1836 ASSERT(old_unique
<= new_unique
);
1837 mrsdelta
= MIN(new_unique
- old_unique
,
1838 ds_next
->ds_reserved
- old_unique
);
1839 dsl_dir_diduse_space(ds
->ds_dir
,
1840 DD_USED_REFRSRV
, -mrsdelta
, 0, 0, tx
);
1843 dsl_dataset_rele(ds_next
, FTAG
);
1846 * There's no next snapshot, so this is a head dataset.
1847 * Destroy the deadlist. Unless it's a clone, the
1848 * deadlist should be empty. (If it's a clone, it's
1849 * safe to ignore the deadlist contents.)
1853 dsl_deadlist_close(&ds
->ds_deadlist
);
1854 dsl_deadlist_free(mos
, ds
->ds_phys
->ds_deadlist_obj
, tx
);
1855 ds
->ds_phys
->ds_deadlist_obj
= 0;
1858 * Free everything that we point to (that's born after
1859 * the previous snapshot, if we are a clone)
1861 * NB: this should be very quick, because we already
1862 * freed all the objects in open context.
1866 err
= traverse_dataset(ds
, ds
->ds_phys
->ds_prev_snap_txg
,
1867 TRAVERSE_POST
, kill_blkptr
, &ka
);
1868 ASSERT3U(err
, ==, 0);
1869 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
1870 ds
->ds_phys
->ds_unique_bytes
== 0);
1872 if (ds
->ds_prev
!= NULL
) {
1873 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
1874 VERIFY3U(0, ==, zap_remove_int(mos
,
1875 ds
->ds_prev
->ds_dir
->dd_phys
->dd_clones
,
1876 ds
->ds_object
, tx
));
1878 dsl_dataset_rele(ds
->ds_prev
, ds
);
1879 ds
->ds_prev
= ds_prev
= NULL
;
1884 * This must be done after the dsl_traverse(), because it will
1885 * re-open the objset.
1887 if (ds
->ds_objset
) {
1888 dmu_objset_evict(ds
->ds_objset
);
1889 ds
->ds_objset
= NULL
;
1892 if (ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
== ds
->ds_object
) {
1893 /* Erase the link in the dir */
1894 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
1895 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
= 0;
1896 ASSERT(ds
->ds_phys
->ds_snapnames_zapobj
!= 0);
1897 err
= zap_destroy(mos
, ds
->ds_phys
->ds_snapnames_zapobj
, tx
);
1900 /* remove from snapshot namespace */
1901 dsl_dataset_t
*ds_head
;
1902 ASSERT(ds
->ds_phys
->ds_snapnames_zapobj
== 0);
1903 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1904 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
, FTAG
, &ds_head
));
1905 VERIFY(0 == dsl_dataset_get_snapname(ds
));
1910 err
= dsl_dataset_snap_lookup(ds_head
,
1911 ds
->ds_snapname
, &val
);
1912 ASSERT3U(err
, ==, 0);
1913 ASSERT3U(val
, ==, obj
);
1916 err
= dsl_dataset_snap_remove(ds_head
, ds
->ds_snapname
, tx
);
1918 dsl_dataset_rele(ds_head
, FTAG
);
1921 if (ds_prev
&& ds
->ds_prev
!= ds_prev
)
1922 dsl_dataset_rele(ds_prev
, FTAG
);
1924 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
1925 spa_history_log_internal(LOG_DS_DESTROY
, dp
->dp_spa
, tx
,
1926 "dataset = %llu", ds
->ds_object
);
1928 if (ds
->ds_phys
->ds_next_clones_obj
!= 0) {
1929 ASSERTV(uint64_t count
);
1930 ASSERT(0 == zap_count(mos
,
1931 ds
->ds_phys
->ds_next_clones_obj
, &count
) && count
== 0);
1932 VERIFY(0 == dmu_object_free(mos
,
1933 ds
->ds_phys
->ds_next_clones_obj
, tx
));
1935 if (ds
->ds_phys
->ds_props_obj
!= 0)
1936 VERIFY(0 == zap_destroy(mos
, ds
->ds_phys
->ds_props_obj
, tx
));
1937 if (ds
->ds_phys
->ds_userrefs_obj
!= 0)
1938 VERIFY(0 == zap_destroy(mos
, ds
->ds_phys
->ds_userrefs_obj
, tx
));
1939 dsl_dir_close(ds
->ds_dir
, ds
);
1941 dsl_dataset_drain_refs(ds
, tag
);
1942 VERIFY(0 == dmu_object_free(mos
, obj
, tx
));
1944 if (dsda
->rm_origin
) {
1946 * Remove the origin of the clone we just destroyed.
1948 struct dsl_ds_destroyarg ndsda
= {0};
1950 ndsda
.ds
= dsda
->rm_origin
;
1951 dsl_dataset_destroy_sync(&ndsda
, tag
, tx
);
1956 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1960 if (!dmu_tx_is_syncing(tx
))
1964 * If there's an fs-only reservation, any blocks that might become
1965 * owned by the snapshot dataset must be accommodated by space
1966 * outside of the reservation.
1968 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1969 asize
= MIN(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
1970 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1974 * Propogate any reserved space for this snapshot to other
1975 * snapshot checks in this sync group.
1978 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1984 dsl_dataset_snapshot_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1986 dsl_dataset_t
*ds
= arg1
;
1987 const char *snapname
= arg2
;
1992 * We don't allow multiple snapshots of the same txg. If there
1993 * is already one, try again.
1995 if (ds
->ds_phys
->ds_prev_snap_txg
>= tx
->tx_txg
)
1999 * Check for conflicting name snapshot name.
2001 err
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
2008 * Check that the dataset's name is not too long. Name consists
2009 * of the dataset's length + 1 for the @-sign + snapshot name's length
2011 if (dsl_dataset_namelen(ds
) + 1 + strlen(snapname
) >= MAXNAMELEN
)
2012 return (ENAMETOOLONG
);
2014 err
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
2018 ds
->ds_trysnap_txg
= tx
->tx_txg
;
2023 dsl_dataset_snapshot_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2025 dsl_dataset_t
*ds
= arg1
;
2026 const char *snapname
= arg2
;
2027 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2029 dsl_dataset_phys_t
*dsphys
;
2030 uint64_t dsobj
, crtxg
;
2031 objset_t
*mos
= dp
->dp_meta_objset
;
2034 ASSERT(RW_WRITE_HELD(&dp
->dp_config_rwlock
));
2037 * The origin's ds_creation_txg has to be < TXG_INITIAL
2039 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
2044 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
2045 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
2046 VERIFY(0 == dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
2047 dmu_buf_will_dirty(dbuf
, tx
);
2048 dsphys
= dbuf
->db_data
;
2049 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
2050 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
2051 dsphys
->ds_fsid_guid
= unique_create();
2052 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
2053 sizeof (dsphys
->ds_guid
));
2054 dsphys
->ds_prev_snap_obj
= ds
->ds_phys
->ds_prev_snap_obj
;
2055 dsphys
->ds_prev_snap_txg
= ds
->ds_phys
->ds_prev_snap_txg
;
2056 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
2057 dsphys
->ds_num_children
= 1;
2058 dsphys
->ds_creation_time
= gethrestime_sec();
2059 dsphys
->ds_creation_txg
= crtxg
;
2060 dsphys
->ds_deadlist_obj
= ds
->ds_phys
->ds_deadlist_obj
;
2061 dsphys
->ds_used_bytes
= ds
->ds_phys
->ds_used_bytes
;
2062 dsphys
->ds_compressed_bytes
= ds
->ds_phys
->ds_compressed_bytes
;
2063 dsphys
->ds_uncompressed_bytes
= ds
->ds_phys
->ds_uncompressed_bytes
;
2064 dsphys
->ds_flags
= ds
->ds_phys
->ds_flags
;
2065 dsphys
->ds_bp
= ds
->ds_phys
->ds_bp
;
2066 dmu_buf_rele(dbuf
, FTAG
);
2068 ASSERT3U(ds
->ds_prev
!= 0, ==, ds
->ds_phys
->ds_prev_snap_obj
!= 0);
2070 uint64_t next_clones_obj
=
2071 ds
->ds_prev
->ds_phys
->ds_next_clones_obj
;
2072 ASSERT(ds
->ds_prev
->ds_phys
->ds_next_snap_obj
==
2074 ds
->ds_prev
->ds_phys
->ds_num_children
> 1);
2075 if (ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
) {
2076 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
2077 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, ==,
2078 ds
->ds_prev
->ds_phys
->ds_creation_txg
);
2079 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
= dsobj
;
2080 } else if (next_clones_obj
!= 0) {
2081 remove_from_next_clones(ds
->ds_prev
,
2082 dsphys
->ds_next_snap_obj
, tx
);
2083 VERIFY3U(0, ==, zap_add_int(mos
,
2084 next_clones_obj
, dsobj
, tx
));
2089 * If we have a reference-reservation on this dataset, we will
2090 * need to increase the amount of refreservation being charged
2091 * since our unique space is going to zero.
2093 if (ds
->ds_reserved
) {
2095 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
2096 delta
= MIN(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
2097 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
2101 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2102 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2103 ds
->ds_dir
->dd_myname
, snapname
, dsobj
,
2104 ds
->ds_phys
->ds_prev_snap_txg
);
2105 ds
->ds_phys
->ds_deadlist_obj
= dsl_deadlist_clone(&ds
->ds_deadlist
,
2106 UINT64_MAX
, ds
->ds_phys
->ds_prev_snap_obj
, tx
);
2107 dsl_deadlist_close(&ds
->ds_deadlist
);
2108 dsl_deadlist_open(&ds
->ds_deadlist
, mos
, ds
->ds_phys
->ds_deadlist_obj
);
2109 dsl_deadlist_add_key(&ds
->ds_deadlist
,
2110 ds
->ds_phys
->ds_prev_snap_txg
, tx
);
2112 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, <, tx
->tx_txg
);
2113 ds
->ds_phys
->ds_prev_snap_obj
= dsobj
;
2114 ds
->ds_phys
->ds_prev_snap_txg
= crtxg
;
2115 ds
->ds_phys
->ds_unique_bytes
= 0;
2116 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
2117 ds
->ds_phys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
2119 err
= zap_add(mos
, ds
->ds_phys
->ds_snapnames_zapobj
,
2120 snapname
, 8, 1, &dsobj
, tx
);
2124 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
2125 VERIFY(0 == dsl_dataset_get_ref(dp
,
2126 ds
->ds_phys
->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
2128 dsl_scan_ds_snapshotted(ds
, tx
);
2130 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
2132 spa_history_log_internal(LOG_DS_SNAPSHOT
, dp
->dp_spa
, tx
,
2133 "dataset = %llu", dsobj
);
2137 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*zio
, dmu_tx_t
*tx
)
2139 ASSERT(dmu_tx_is_syncing(tx
));
2140 ASSERT(ds
->ds_objset
!= NULL
);
2141 ASSERT(ds
->ds_phys
->ds_next_snap_obj
== 0);
2144 * in case we had to change ds_fsid_guid when we opened it,
2147 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2148 ds
->ds_phys
->ds_fsid_guid
= ds
->ds_fsid_guid
;
2150 dsl_dir_dirty(ds
->ds_dir
, tx
);
2151 dmu_objset_sync(ds
->ds_objset
, zio
, tx
);
2155 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2157 uint64_t refd
, avail
, uobjs
, aobjs
, ratio
;
2159 dsl_dir_stats(ds
->ds_dir
, nv
);
2161 dsl_dataset_space(ds
, &refd
, &avail
, &uobjs
, &aobjs
);
2162 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
, avail
);
2163 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
, refd
);
2165 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
2166 ds
->ds_phys
->ds_creation_time
);
2167 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
2168 ds
->ds_phys
->ds_creation_txg
);
2169 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
2171 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
2173 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
2174 ds
->ds_phys
->ds_guid
);
2175 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
2176 ds
->ds_phys
->ds_unique_bytes
);
2177 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
2179 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
2181 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
2182 DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
2184 ratio
= ds
->ds_phys
->ds_compressed_bytes
== 0 ? 100 :
2185 (ds
->ds_phys
->ds_uncompressed_bytes
* 100 /
2186 ds
->ds_phys
->ds_compressed_bytes
);
2187 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
, ratio
);
2189 if (ds
->ds_phys
->ds_next_snap_obj
) {
2191 * This is a snapshot; override the dd's space used with
2192 * our unique space and compression ratio.
2194 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
2195 ds
->ds_phys
->ds_unique_bytes
);
2196 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
, ratio
);
2201 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
2203 stat
->dds_creation_txg
= ds
->ds_phys
->ds_creation_txg
;
2204 stat
->dds_inconsistent
= ds
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
;
2205 stat
->dds_guid
= ds
->ds_phys
->ds_guid
;
2206 if (ds
->ds_phys
->ds_next_snap_obj
) {
2207 stat
->dds_is_snapshot
= B_TRUE
;
2208 stat
->dds_num_clones
= ds
->ds_phys
->ds_num_children
- 1;
2210 stat
->dds_is_snapshot
= B_FALSE
;
2211 stat
->dds_num_clones
= 0;
2214 /* clone origin is really a dsl_dir thing... */
2215 rw_enter(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
, RW_READER
);
2216 if (dsl_dir_is_clone(ds
->ds_dir
)) {
2219 VERIFY(0 == dsl_dataset_get_ref(ds
->ds_dir
->dd_pool
,
2220 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &ods
));
2221 dsl_dataset_name(ods
, stat
->dds_origin
);
2222 dsl_dataset_drop_ref(ods
, FTAG
);
2224 stat
->dds_origin
[0] = '\0';
2226 rw_exit(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
);
2230 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
2232 return (ds
->ds_fsid_guid
);
2236 dsl_dataset_space(dsl_dataset_t
*ds
,
2237 uint64_t *refdbytesp
, uint64_t *availbytesp
,
2238 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2240 *refdbytesp
= ds
->ds_phys
->ds_used_bytes
;
2241 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
2242 if (ds
->ds_reserved
> ds
->ds_phys
->ds_unique_bytes
)
2243 *availbytesp
+= ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
;
2244 if (ds
->ds_quota
!= 0) {
2246 * Adjust available bytes according to refquota
2248 if (*refdbytesp
< ds
->ds_quota
)
2249 *availbytesp
= MIN(*availbytesp
,
2250 ds
->ds_quota
- *refdbytesp
);
2254 *usedobjsp
= ds
->ds_phys
->ds_bp
.blk_fill
;
2255 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
2259 dsl_dataset_modified_since_lastsnap(dsl_dataset_t
*ds
)
2261 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
2263 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
2264 dsl_pool_sync_context(dp
));
2265 if (ds
->ds_prev
== NULL
)
2267 if (ds
->ds_phys
->ds_bp
.blk_birth
>
2268 ds
->ds_prev
->ds_phys
->ds_creation_txg
) {
2269 objset_t
*os
, *os_prev
;
2271 * It may be that only the ZIL differs, because it was
2272 * reset in the head. Don't count that as being
2275 if (dmu_objset_from_ds(ds
, &os
) != 0)
2277 if (dmu_objset_from_ds(ds
->ds_prev
, &os_prev
) != 0)
2279 return (bcmp(&os
->os_phys
->os_meta_dnode
,
2280 &os_prev
->os_phys
->os_meta_dnode
,
2281 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
2288 dsl_dataset_snapshot_rename_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2290 dsl_dataset_t
*ds
= arg1
;
2291 char *newsnapname
= arg2
;
2292 dsl_dir_t
*dd
= ds
->ds_dir
;
2297 err
= dsl_dataset_hold_obj(dd
->dd_pool
,
2298 dd
->dd_phys
->dd_head_dataset_obj
, FTAG
, &hds
);
2302 /* new name better not be in use */
2303 err
= dsl_dataset_snap_lookup(hds
, newsnapname
, &val
);
2304 dsl_dataset_rele(hds
, FTAG
);
2308 else if (err
== ENOENT
)
2311 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2312 if (dsl_dir_namelen(ds
->ds_dir
) + 1 + strlen(newsnapname
) >= MAXNAMELEN
)
2319 dsl_dataset_snapshot_rename_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2321 dsl_dataset_t
*ds
= arg1
;
2322 const char *newsnapname
= arg2
;
2323 dsl_dir_t
*dd
= ds
->ds_dir
;
2324 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
2328 ASSERT(ds
->ds_phys
->ds_next_snap_obj
!= 0);
2330 VERIFY(0 == dsl_dataset_hold_obj(dd
->dd_pool
,
2331 dd
->dd_phys
->dd_head_dataset_obj
, FTAG
, &hds
));
2333 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2334 err
= dsl_dataset_snap_remove(hds
, ds
->ds_snapname
, tx
);
2335 ASSERT3U(err
, ==, 0);
2336 mutex_enter(&ds
->ds_lock
);
2337 (void) strcpy(ds
->ds_snapname
, newsnapname
);
2338 mutex_exit(&ds
->ds_lock
);
2339 err
= zap_add(mos
, hds
->ds_phys
->ds_snapnames_zapobj
,
2340 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
);
2341 ASSERT3U(err
, ==, 0);
2343 spa_history_log_internal(LOG_DS_RENAME
, dd
->dd_pool
->dp_spa
, tx
,
2344 "dataset = %llu", ds
->ds_object
);
2345 dsl_dataset_rele(hds
, FTAG
);
2348 struct renamesnaparg
{
2349 dsl_sync_task_group_t
*dstg
;
2350 char failed
[MAXPATHLEN
];
2356 dsl_snapshot_rename_one(const char *name
, void *arg
)
2358 struct renamesnaparg
*ra
= arg
;
2359 dsl_dataset_t
*ds
= NULL
;
2363 snapname
= kmem_asprintf("%s@%s", name
, ra
->oldsnap
);
2364 (void) strlcpy(ra
->failed
, snapname
, sizeof (ra
->failed
));
2367 * For recursive snapshot renames the parent won't be changing
2368 * so we just pass name for both the to/from argument.
2370 err
= zfs_secpolicy_rename_perms(snapname
, snapname
, CRED());
2373 return (err
== ENOENT
? 0 : err
);
2378 * For all filesystems undergoing rename, we'll need to unmount it.
2380 (void) zfs_unmount_snap(snapname
, NULL
);
2382 err
= dsl_dataset_hold(snapname
, ra
->dstg
, &ds
);
2385 return (err
== ENOENT
? 0 : err
);
2387 dsl_sync_task_create(ra
->dstg
, dsl_dataset_snapshot_rename_check
,
2388 dsl_dataset_snapshot_rename_sync
, ds
, ra
->newsnap
, 0);
2394 dsl_recursive_rename(char *oldname
, const char *newname
)
2397 struct renamesnaparg
*ra
;
2398 dsl_sync_task_t
*dst
;
2400 char *cp
, *fsname
= spa_strdup(oldname
);
2401 int len
= strlen(oldname
) + 1;
2403 /* truncate the snapshot name to get the fsname */
2404 cp
= strchr(fsname
, '@');
2407 err
= spa_open(fsname
, &spa
, FTAG
);
2409 kmem_free(fsname
, len
);
2412 ra
= kmem_alloc(sizeof (struct renamesnaparg
), KM_SLEEP
);
2413 ra
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
2415 ra
->oldsnap
= strchr(oldname
, '@') + 1;
2416 ra
->newsnap
= strchr(newname
, '@') + 1;
2419 err
= dmu_objset_find(fsname
, dsl_snapshot_rename_one
, ra
,
2421 kmem_free(fsname
, len
);
2424 err
= dsl_sync_task_group_wait(ra
->dstg
);
2427 for (dst
= list_head(&ra
->dstg
->dstg_tasks
); dst
;
2428 dst
= list_next(&ra
->dstg
->dstg_tasks
, dst
)) {
2429 dsl_dataset_t
*ds
= dst
->dst_arg1
;
2431 dsl_dir_name(ds
->ds_dir
, ra
->failed
);
2432 (void) strlcat(ra
->failed
, "@", sizeof (ra
->failed
));
2433 (void) strlcat(ra
->failed
, ra
->newsnap
,
2434 sizeof (ra
->failed
));
2436 dsl_dataset_rele(ds
, ra
->dstg
);
2440 (void) strlcpy(oldname
, ra
->failed
, sizeof (ra
->failed
));
2442 dsl_sync_task_group_destroy(ra
->dstg
);
2443 kmem_free(ra
, sizeof (struct renamesnaparg
));
2444 spa_close(spa
, FTAG
);
2449 dsl_valid_rename(const char *oldname
, void *arg
)
2451 int delta
= *(int *)arg
;
2453 if (strlen(oldname
) + delta
>= MAXNAMELEN
)
2454 return (ENAMETOOLONG
);
2459 #pragma weak dmu_objset_rename = dsl_dataset_rename
2461 dsl_dataset_rename(char *oldname
, const char *newname
, boolean_t recursive
)
2468 err
= dsl_dir_open(oldname
, FTAG
, &dd
, &tail
);
2473 int delta
= strlen(newname
) - strlen(oldname
);
2475 /* if we're growing, validate child name lengths */
2477 err
= dmu_objset_find(oldname
, dsl_valid_rename
,
2478 &delta
, DS_FIND_CHILDREN
| DS_FIND_SNAPSHOTS
);
2481 err
= dsl_dir_rename(dd
, newname
);
2482 dsl_dir_close(dd
, FTAG
);
2486 if (tail
[0] != '@') {
2487 /* the name ended in a nonexistent component */
2488 dsl_dir_close(dd
, FTAG
);
2492 dsl_dir_close(dd
, FTAG
);
2494 /* new name must be snapshot in same filesystem */
2495 tail
= strchr(newname
, '@');
2499 if (strncmp(oldname
, newname
, tail
- newname
) != 0)
2503 err
= dsl_recursive_rename(oldname
, newname
);
2505 err
= dsl_dataset_hold(oldname
, FTAG
, &ds
);
2509 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
2510 dsl_dataset_snapshot_rename_check
,
2511 dsl_dataset_snapshot_rename_sync
, ds
, (char *)tail
, 1);
2513 dsl_dataset_rele(ds
, FTAG
);
2519 struct promotenode
{
2525 list_t shared_snaps
, origin_snaps
, clone_snaps
;
2526 dsl_dataset_t
*origin_origin
;
2527 uint64_t used
, comp
, uncomp
, unique
, cloneusedsnap
, originusedsnap
;
2531 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
2534 dsl_dataset_promote_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2536 dsl_dataset_t
*hds
= arg1
;
2537 struct promotearg
*pa
= arg2
;
2538 struct promotenode
*snap
= list_head(&pa
->shared_snaps
);
2539 dsl_dataset_t
*origin_ds
= snap
->ds
;
2543 /* Check that it is a real clone */
2544 if (!dsl_dir_is_clone(hds
->ds_dir
))
2547 /* Since this is so expensive, don't do the preliminary check */
2548 if (!dmu_tx_is_syncing(tx
))
2551 if (hds
->ds_phys
->ds_flags
& DS_FLAG_NOPROMOTE
)
2554 /* compute origin's new unique space */
2555 snap
= list_tail(&pa
->clone_snaps
);
2556 ASSERT3U(snap
->ds
->ds_phys
->ds_prev_snap_obj
, ==, origin_ds
->ds_object
);
2557 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2558 origin_ds
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
2559 &pa
->unique
, &unused
, &unused
);
2562 * Walk the snapshots that we are moving
2564 * Compute space to transfer. Consider the incremental changes
2565 * to used for each snapshot:
2566 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2567 * So each snapshot gave birth to:
2568 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2569 * So a sequence would look like:
2570 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2571 * Which simplifies to:
2572 * uN + kN + kN-1 + ... + k1 + k0
2573 * Note however, if we stop before we reach the ORIGIN we get:
2574 * uN + kN + kN-1 + ... + kM - uM-1
2576 pa
->used
= origin_ds
->ds_phys
->ds_used_bytes
;
2577 pa
->comp
= origin_ds
->ds_phys
->ds_compressed_bytes
;
2578 pa
->uncomp
= origin_ds
->ds_phys
->ds_uncompressed_bytes
;
2579 for (snap
= list_head(&pa
->shared_snaps
); snap
;
2580 snap
= list_next(&pa
->shared_snaps
, snap
)) {
2581 uint64_t val
, dlused
, dlcomp
, dluncomp
;
2582 dsl_dataset_t
*ds
= snap
->ds
;
2584 /* Check that the snapshot name does not conflict */
2585 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2586 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
2594 /* The very first snapshot does not have a deadlist */
2595 if (ds
->ds_phys
->ds_prev_snap_obj
== 0)
2598 dsl_deadlist_space(&ds
->ds_deadlist
,
2599 &dlused
, &dlcomp
, &dluncomp
);
2602 pa
->uncomp
+= dluncomp
;
2606 * If we are a clone of a clone then we never reached ORIGIN,
2607 * so we need to subtract out the clone origin's used space.
2609 if (pa
->origin_origin
) {
2610 pa
->used
-= pa
->origin_origin
->ds_phys
->ds_used_bytes
;
2611 pa
->comp
-= pa
->origin_origin
->ds_phys
->ds_compressed_bytes
;
2612 pa
->uncomp
-= pa
->origin_origin
->ds_phys
->ds_uncompressed_bytes
;
2615 /* Check that there is enough space here */
2616 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
2622 * Compute the amounts of space that will be used by snapshots
2623 * after the promotion (for both origin and clone). For each,
2624 * it is the amount of space that will be on all of their
2625 * deadlists (that was not born before their new origin).
2627 if (hds
->ds_dir
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2631 * Note, typically this will not be a clone of a clone,
2632 * so dd_origin_txg will be < TXG_INITIAL, so
2633 * these snaplist_space() -> dsl_deadlist_space_range()
2634 * calls will be fast because they do not have to
2635 * iterate over all bps.
2637 snap
= list_head(&pa
->origin_snaps
);
2638 err
= snaplist_space(&pa
->shared_snaps
,
2639 snap
->ds
->ds_dir
->dd_origin_txg
, &pa
->cloneusedsnap
);
2643 err
= snaplist_space(&pa
->clone_snaps
,
2644 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
2647 pa
->cloneusedsnap
+= space
;
2649 if (origin_ds
->ds_dir
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2650 err
= snaplist_space(&pa
->origin_snaps
,
2651 origin_ds
->ds_phys
->ds_creation_txg
, &pa
->originusedsnap
);
2658 pa
->err_ds
= snap
->ds
->ds_snapname
;
2663 dsl_dataset_promote_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2665 dsl_dataset_t
*hds
= arg1
;
2666 struct promotearg
*pa
= arg2
;
2667 struct promotenode
*snap
= list_head(&pa
->shared_snaps
);
2668 dsl_dataset_t
*origin_ds
= snap
->ds
;
2669 dsl_dataset_t
*origin_head
;
2670 dsl_dir_t
*dd
= hds
->ds_dir
;
2671 dsl_pool_t
*dp
= hds
->ds_dir
->dd_pool
;
2672 dsl_dir_t
*odd
= NULL
;
2673 uint64_t oldnext_obj
;
2676 ASSERT(0 == (hds
->ds_phys
->ds_flags
& DS_FLAG_NOPROMOTE
));
2678 snap
= list_head(&pa
->origin_snaps
);
2679 origin_head
= snap
->ds
;
2682 * We need to explicitly open odd, since origin_ds's dd will be
2685 VERIFY(0 == dsl_dir_open_obj(dp
, origin_ds
->ds_dir
->dd_object
,
2688 /* change origin's next snap */
2689 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
2690 oldnext_obj
= origin_ds
->ds_phys
->ds_next_snap_obj
;
2691 snap
= list_tail(&pa
->clone_snaps
);
2692 ASSERT3U(snap
->ds
->ds_phys
->ds_prev_snap_obj
, ==, origin_ds
->ds_object
);
2693 origin_ds
->ds_phys
->ds_next_snap_obj
= snap
->ds
->ds_object
;
2695 /* change the origin's next clone */
2696 if (origin_ds
->ds_phys
->ds_next_clones_obj
) {
2697 remove_from_next_clones(origin_ds
, snap
->ds
->ds_object
, tx
);
2698 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2699 origin_ds
->ds_phys
->ds_next_clones_obj
,
2704 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
2705 ASSERT3U(dd
->dd_phys
->dd_origin_obj
, ==, origin_ds
->ds_object
);
2706 dd
->dd_phys
->dd_origin_obj
= odd
->dd_phys
->dd_origin_obj
;
2707 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
2708 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
2709 odd
->dd_phys
->dd_origin_obj
= origin_ds
->ds_object
;
2710 origin_head
->ds_dir
->dd_origin_txg
=
2711 origin_ds
->ds_phys
->ds_creation_txg
;
2713 /* change dd_clone entries */
2714 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2715 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2716 odd
->dd_phys
->dd_clones
, hds
->ds_object
, tx
));
2717 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2718 pa
->origin_origin
->ds_dir
->dd_phys
->dd_clones
,
2719 hds
->ds_object
, tx
));
2721 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2722 pa
->origin_origin
->ds_dir
->dd_phys
->dd_clones
,
2723 origin_head
->ds_object
, tx
));
2724 if (dd
->dd_phys
->dd_clones
== 0) {
2725 dd
->dd_phys
->dd_clones
= zap_create(dp
->dp_meta_objset
,
2726 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
2728 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2729 dd
->dd_phys
->dd_clones
, origin_head
->ds_object
, tx
));
2733 /* move snapshots to this dir */
2734 for (snap
= list_head(&pa
->shared_snaps
); snap
;
2735 snap
= list_next(&pa
->shared_snaps
, snap
)) {
2736 dsl_dataset_t
*ds
= snap
->ds
;
2738 /* unregister props as dsl_dir is changing */
2739 if (ds
->ds_objset
) {
2740 dmu_objset_evict(ds
->ds_objset
);
2741 ds
->ds_objset
= NULL
;
2743 /* move snap name entry */
2744 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2745 VERIFY(0 == dsl_dataset_snap_remove(origin_head
,
2746 ds
->ds_snapname
, tx
));
2747 VERIFY(0 == zap_add(dp
->dp_meta_objset
,
2748 hds
->ds_phys
->ds_snapnames_zapobj
, ds
->ds_snapname
,
2749 8, 1, &ds
->ds_object
, tx
));
2751 /* change containing dsl_dir */
2752 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2753 ASSERT3U(ds
->ds_phys
->ds_dir_obj
, ==, odd
->dd_object
);
2754 ds
->ds_phys
->ds_dir_obj
= dd
->dd_object
;
2755 ASSERT3P(ds
->ds_dir
, ==, odd
);
2756 dsl_dir_close(ds
->ds_dir
, ds
);
2757 VERIFY(0 == dsl_dir_open_obj(dp
, dd
->dd_object
,
2758 NULL
, ds
, &ds
->ds_dir
));
2760 /* move any clone references */
2761 if (ds
->ds_phys
->ds_next_clones_obj
&&
2762 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2766 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2767 ds
->ds_phys
->ds_next_clones_obj
);
2768 zap_cursor_retrieve(&zc
, &za
) == 0;
2769 zap_cursor_advance(&zc
)) {
2770 dsl_dataset_t
*cnds
;
2773 if (za
.za_first_integer
== oldnext_obj
) {
2775 * We've already moved the
2776 * origin's reference.
2781 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
2782 za
.za_first_integer
, FTAG
, &cnds
));
2783 o
= cnds
->ds_dir
->dd_phys
->dd_head_dataset_obj
;
2785 VERIFY3U(zap_remove_int(dp
->dp_meta_objset
,
2786 odd
->dd_phys
->dd_clones
, o
, tx
), ==, 0);
2787 VERIFY3U(zap_add_int(dp
->dp_meta_objset
,
2788 dd
->dd_phys
->dd_clones
, o
, tx
), ==, 0);
2789 dsl_dataset_rele(cnds
, FTAG
);
2791 zap_cursor_fini(&zc
);
2794 ASSERT3U(dsl_prop_numcb(ds
), ==, 0);
2798 * Change space accounting.
2799 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2800 * both be valid, or both be 0 (resulting in delta == 0). This
2801 * is true for each of {clone,origin} independently.
2804 delta
= pa
->cloneusedsnap
-
2805 dd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
];
2806 ASSERT3S(delta
, >=, 0);
2807 ASSERT3U(pa
->used
, >=, delta
);
2808 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2809 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
2810 pa
->used
- delta
, pa
->comp
, pa
->uncomp
, tx
);
2812 delta
= pa
->originusedsnap
-
2813 odd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
];
2814 ASSERT3S(delta
, <=, 0);
2815 ASSERT3U(pa
->used
, >=, -delta
);
2816 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2817 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
2818 -pa
->used
- delta
, -pa
->comp
, -pa
->uncomp
, tx
);
2820 origin_ds
->ds_phys
->ds_unique_bytes
= pa
->unique
;
2822 /* log history record */
2823 spa_history_log_internal(LOG_DS_PROMOTE
, dd
->dd_pool
->dp_spa
, tx
,
2824 "dataset = %llu", hds
->ds_object
);
2826 dsl_dir_close(odd
, FTAG
);
2829 static char *snaplist_tag
= "snaplist";
2831 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2832 * (exclusive) and last_obj (inclusive). The list will be in reverse
2833 * order (last_obj will be the list_head()). If first_obj == 0, do all
2834 * snapshots back to this dataset's origin.
2837 snaplist_make(dsl_pool_t
*dp
, boolean_t own
,
2838 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
)
2840 uint64_t obj
= last_obj
;
2842 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
));
2844 list_create(l
, sizeof (struct promotenode
),
2845 offsetof(struct promotenode
, link
));
2847 while (obj
!= first_obj
) {
2849 struct promotenode
*snap
;
2853 err
= dsl_dataset_own_obj(dp
, obj
,
2854 0, snaplist_tag
, &ds
);
2856 dsl_dataset_make_exclusive(ds
, snaplist_tag
);
2858 err
= dsl_dataset_hold_obj(dp
, obj
, snaplist_tag
, &ds
);
2860 if (err
== ENOENT
) {
2861 /* lost race with snapshot destroy */
2862 struct promotenode
*last
= list_tail(l
);
2863 ASSERT(obj
!= last
->ds
->ds_phys
->ds_prev_snap_obj
);
2864 obj
= last
->ds
->ds_phys
->ds_prev_snap_obj
;
2871 first_obj
= ds
->ds_dir
->dd_phys
->dd_origin_obj
;
2873 snap
= kmem_alloc(sizeof (struct promotenode
), KM_SLEEP
);
2875 list_insert_tail(l
, snap
);
2876 obj
= ds
->ds_phys
->ds_prev_snap_obj
;
2883 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
2885 struct promotenode
*snap
;
2888 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
2889 uint64_t used
, comp
, uncomp
;
2890 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2891 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
2898 snaplist_destroy(list_t
*l
, boolean_t own
)
2900 struct promotenode
*snap
;
2902 if (!l
|| !list_link_active(&l
->list_head
))
2905 while ((snap
= list_tail(l
)) != NULL
) {
2906 list_remove(l
, snap
);
2908 dsl_dataset_disown(snap
->ds
, snaplist_tag
);
2910 dsl_dataset_rele(snap
->ds
, snaplist_tag
);
2911 kmem_free(snap
, sizeof (struct promotenode
));
2917 * Promote a clone. Nomenclature note:
2918 * "clone" or "cds": the original clone which is being promoted
2919 * "origin" or "ods": the snapshot which is originally clone's origin
2920 * "origin head" or "ohds": the dataset which is the head
2921 * (filesystem/volume) for the origin
2922 * "origin origin": the origin of the origin's filesystem (typically
2923 * NULL, indicating that the clone is not a clone of a clone).
2926 dsl_dataset_promote(const char *name
, char *conflsnap
)
2931 dmu_object_info_t doi
;
2932 struct promotearg pa
;
2933 struct promotenode
*snap
;
2936 bzero(&pa
, sizeof(struct promotearg
));
2937 err
= dsl_dataset_hold(name
, FTAG
, &ds
);
2943 err
= dmu_object_info(dp
->dp_meta_objset
,
2944 ds
->ds_phys
->ds_snapnames_zapobj
, &doi
);
2946 dsl_dataset_rele(ds
, FTAG
);
2950 if (dsl_dataset_is_snapshot(ds
) || dd
->dd_phys
->dd_origin_obj
== 0) {
2951 dsl_dataset_rele(ds
, FTAG
);
2956 * We are going to inherit all the snapshots taken before our
2957 * origin (i.e., our new origin will be our parent's origin).
2958 * Take ownership of them so that we can rename them into our
2961 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
2963 err
= snaplist_make(dp
, B_TRUE
, 0, dd
->dd_phys
->dd_origin_obj
,
2968 err
= snaplist_make(dp
, B_FALSE
, 0, ds
->ds_object
, &pa
.clone_snaps
);
2972 snap
= list_head(&pa
.shared_snaps
);
2973 ASSERT3U(snap
->ds
->ds_object
, ==, dd
->dd_phys
->dd_origin_obj
);
2974 err
= snaplist_make(dp
, B_FALSE
, dd
->dd_phys
->dd_origin_obj
,
2975 snap
->ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
, &pa
.origin_snaps
);
2979 if (snap
->ds
->ds_dir
->dd_phys
->dd_origin_obj
!= 0) {
2980 err
= dsl_dataset_hold_obj(dp
,
2981 snap
->ds
->ds_dir
->dd_phys
->dd_origin_obj
,
2982 FTAG
, &pa
.origin_origin
);
2988 rw_exit(&dp
->dp_config_rwlock
);
2991 * Add in 128x the snapnames zapobj size, since we will be moving
2992 * a bunch of snapnames to the promoted ds, and dirtying their
2996 err
= dsl_sync_task_do(dp
, dsl_dataset_promote_check
,
2997 dsl_dataset_promote_sync
, ds
, &pa
,
2998 2 + 2 * doi
.doi_physical_blocks_512
);
2999 if (err
&& pa
.err_ds
&& conflsnap
)
3000 (void) strncpy(conflsnap
, pa
.err_ds
, MAXNAMELEN
);
3003 snaplist_destroy(&pa
.shared_snaps
, B_TRUE
);
3004 snaplist_destroy(&pa
.clone_snaps
, B_FALSE
);
3005 snaplist_destroy(&pa
.origin_snaps
, B_FALSE
);
3006 if (pa
.origin_origin
)
3007 dsl_dataset_rele(pa
.origin_origin
, FTAG
);
3008 dsl_dataset_rele(ds
, FTAG
);
3012 struct cloneswaparg
{
3013 dsl_dataset_t
*cds
; /* clone dataset */
3014 dsl_dataset_t
*ohds
; /* origin's head dataset */
3016 int64_t unused_refres_delta
; /* change in unconsumed refreservation */
3021 dsl_dataset_clone_swap_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3023 struct cloneswaparg
*csa
= arg1
;
3025 /* they should both be heads */
3026 if (dsl_dataset_is_snapshot(csa
->cds
) ||
3027 dsl_dataset_is_snapshot(csa
->ohds
))
3030 /* the branch point should be just before them */
3031 if (csa
->cds
->ds_prev
!= csa
->ohds
->ds_prev
)
3034 /* cds should be the clone (unless they are unrelated) */
3035 if (csa
->cds
->ds_prev
!= NULL
&&
3036 csa
->cds
->ds_prev
!= csa
->cds
->ds_dir
->dd_pool
->dp_origin_snap
&&
3037 csa
->ohds
->ds_object
!=
3038 csa
->cds
->ds_prev
->ds_phys
->ds_next_snap_obj
)
3041 /* the clone should be a child of the origin */
3042 if (csa
->cds
->ds_dir
->dd_parent
!= csa
->ohds
->ds_dir
)
3045 /* ohds shouldn't be modified unless 'force' */
3046 if (!csa
->force
&& dsl_dataset_modified_since_lastsnap(csa
->ohds
))
3049 /* adjust amount of any unconsumed refreservation */
3050 csa
->unused_refres_delta
=
3051 (int64_t)MIN(csa
->ohds
->ds_reserved
,
3052 csa
->ohds
->ds_phys
->ds_unique_bytes
) -
3053 (int64_t)MIN(csa
->ohds
->ds_reserved
,
3054 csa
->cds
->ds_phys
->ds_unique_bytes
);
3056 if (csa
->unused_refres_delta
> 0 &&
3057 csa
->unused_refres_delta
>
3058 dsl_dir_space_available(csa
->ohds
->ds_dir
, NULL
, 0, TRUE
))
3061 if (csa
->ohds
->ds_quota
!= 0 &&
3062 csa
->cds
->ds_phys
->ds_unique_bytes
> csa
->ohds
->ds_quota
)
3070 dsl_dataset_clone_swap_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3072 struct cloneswaparg
*csa
= arg1
;
3073 dsl_pool_t
*dp
= csa
->cds
->ds_dir
->dd_pool
;
3075 ASSERT(csa
->cds
->ds_reserved
== 0);
3076 ASSERT(csa
->ohds
->ds_quota
== 0 ||
3077 csa
->cds
->ds_phys
->ds_unique_bytes
<= csa
->ohds
->ds_quota
);
3079 dmu_buf_will_dirty(csa
->cds
->ds_dbuf
, tx
);
3080 dmu_buf_will_dirty(csa
->ohds
->ds_dbuf
, tx
);
3082 if (csa
->cds
->ds_objset
!= NULL
) {
3083 dmu_objset_evict(csa
->cds
->ds_objset
);
3084 csa
->cds
->ds_objset
= NULL
;
3087 if (csa
->ohds
->ds_objset
!= NULL
) {
3088 dmu_objset_evict(csa
->ohds
->ds_objset
);
3089 csa
->ohds
->ds_objset
= NULL
;
3093 * Reset origin's unique bytes, if it exists.
3095 if (csa
->cds
->ds_prev
) {
3096 dsl_dataset_t
*origin
= csa
->cds
->ds_prev
;
3097 uint64_t comp
, uncomp
;
3099 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
3100 dsl_deadlist_space_range(&csa
->cds
->ds_deadlist
,
3101 origin
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
3102 &origin
->ds_phys
->ds_unique_bytes
, &comp
, &uncomp
);
3108 tmp
= csa
->ohds
->ds_phys
->ds_bp
;
3109 csa
->ohds
->ds_phys
->ds_bp
= csa
->cds
->ds_phys
->ds_bp
;
3110 csa
->cds
->ds_phys
->ds_bp
= tmp
;
3113 /* set dd_*_bytes */
3115 int64_t dused
, dcomp
, duncomp
;
3116 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
3117 uint64_t odl_used
, odl_comp
, odl_uncomp
;
3119 ASSERT3U(csa
->cds
->ds_dir
->dd_phys
->
3120 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
3122 dsl_deadlist_space(&csa
->cds
->ds_deadlist
,
3123 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3124 dsl_deadlist_space(&csa
->ohds
->ds_deadlist
,
3125 &odl_used
, &odl_comp
, &odl_uncomp
);
3127 dused
= csa
->cds
->ds_phys
->ds_used_bytes
+ cdl_used
-
3128 (csa
->ohds
->ds_phys
->ds_used_bytes
+ odl_used
);
3129 dcomp
= csa
->cds
->ds_phys
->ds_compressed_bytes
+ cdl_comp
-
3130 (csa
->ohds
->ds_phys
->ds_compressed_bytes
+ odl_comp
);
3131 duncomp
= csa
->cds
->ds_phys
->ds_uncompressed_bytes
+
3133 (csa
->ohds
->ds_phys
->ds_uncompressed_bytes
+ odl_uncomp
);
3135 dsl_dir_diduse_space(csa
->ohds
->ds_dir
, DD_USED_HEAD
,
3136 dused
, dcomp
, duncomp
, tx
);
3137 dsl_dir_diduse_space(csa
->cds
->ds_dir
, DD_USED_HEAD
,
3138 -dused
, -dcomp
, -duncomp
, tx
);
3141 * The difference in the space used by snapshots is the
3142 * difference in snapshot space due to the head's
3143 * deadlist (since that's the only thing that's
3144 * changing that affects the snapused).
3146 dsl_deadlist_space_range(&csa
->cds
->ds_deadlist
,
3147 csa
->ohds
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3148 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3149 dsl_deadlist_space_range(&csa
->ohds
->ds_deadlist
,
3150 csa
->ohds
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3151 &odl_used
, &odl_comp
, &odl_uncomp
);
3152 dsl_dir_transfer_space(csa
->ohds
->ds_dir
, cdl_used
- odl_used
,
3153 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
3156 /* swap ds_*_bytes */
3157 SWITCH64(csa
->ohds
->ds_phys
->ds_used_bytes
,
3158 csa
->cds
->ds_phys
->ds_used_bytes
);
3159 SWITCH64(csa
->ohds
->ds_phys
->ds_compressed_bytes
,
3160 csa
->cds
->ds_phys
->ds_compressed_bytes
);
3161 SWITCH64(csa
->ohds
->ds_phys
->ds_uncompressed_bytes
,
3162 csa
->cds
->ds_phys
->ds_uncompressed_bytes
);
3163 SWITCH64(csa
->ohds
->ds_phys
->ds_unique_bytes
,
3164 csa
->cds
->ds_phys
->ds_unique_bytes
);
3166 /* apply any parent delta for change in unconsumed refreservation */
3167 dsl_dir_diduse_space(csa
->ohds
->ds_dir
, DD_USED_REFRSRV
,
3168 csa
->unused_refres_delta
, 0, 0, tx
);
3173 dsl_deadlist_close(&csa
->cds
->ds_deadlist
);
3174 dsl_deadlist_close(&csa
->ohds
->ds_deadlist
);
3175 SWITCH64(csa
->ohds
->ds_phys
->ds_deadlist_obj
,
3176 csa
->cds
->ds_phys
->ds_deadlist_obj
);
3177 dsl_deadlist_open(&csa
->cds
->ds_deadlist
, dp
->dp_meta_objset
,
3178 csa
->cds
->ds_phys
->ds_deadlist_obj
);
3179 dsl_deadlist_open(&csa
->ohds
->ds_deadlist
, dp
->dp_meta_objset
,
3180 csa
->ohds
->ds_phys
->ds_deadlist_obj
);
3182 dsl_scan_ds_clone_swapped(csa
->ohds
, csa
->cds
, tx
);
3186 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3187 * recv" into an existing fs to swizzle the file system to the new
3188 * version, and by "zfs rollback". Can also be used to swap two
3189 * independent head datasets if neither has any snapshots.
3192 dsl_dataset_clone_swap(dsl_dataset_t
*clone
, dsl_dataset_t
*origin_head
,
3195 struct cloneswaparg csa
;
3198 ASSERT(clone
->ds_owner
);
3199 ASSERT(origin_head
->ds_owner
);
3202 * Need exclusive access for the swap. If we're swapping these
3203 * datasets back after an error, we already hold the locks.
3205 if (!RW_WRITE_HELD(&clone
->ds_rwlock
))
3206 rw_enter(&clone
->ds_rwlock
, RW_WRITER
);
3207 if (!RW_WRITE_HELD(&origin_head
->ds_rwlock
) &&
3208 !rw_tryenter(&origin_head
->ds_rwlock
, RW_WRITER
)) {
3209 rw_exit(&clone
->ds_rwlock
);
3210 rw_enter(&origin_head
->ds_rwlock
, RW_WRITER
);
3211 if (!rw_tryenter(&clone
->ds_rwlock
, RW_WRITER
)) {
3212 rw_exit(&origin_head
->ds_rwlock
);
3217 csa
.ohds
= origin_head
;
3219 error
= dsl_sync_task_do(clone
->ds_dir
->dd_pool
,
3220 dsl_dataset_clone_swap_check
,
3221 dsl_dataset_clone_swap_sync
, &csa
, NULL
, 9);
3226 * Given a pool name and a dataset object number in that pool,
3227 * return the name of that dataset.
3230 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
3237 if ((error
= spa_open(pname
, &spa
, FTAG
)) != 0)
3239 dp
= spa_get_dsl(spa
);
3240 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
3241 if ((error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
)) == 0) {
3242 dsl_dataset_name(ds
, buf
);
3243 dsl_dataset_rele(ds
, FTAG
);
3245 rw_exit(&dp
->dp_config_rwlock
);
3246 spa_close(spa
, FTAG
);
3252 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
3253 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
3257 ASSERT3S(asize
, >, 0);
3260 * *ref_rsrv is the portion of asize that will come from any
3261 * unconsumed refreservation space.
3265 mutex_enter(&ds
->ds_lock
);
3267 * Make a space adjustment for reserved bytes.
3269 if (ds
->ds_reserved
> ds
->ds_phys
->ds_unique_bytes
) {
3271 ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
);
3272 *used
-= (ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
);
3274 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
3277 if (!check_quota
|| ds
->ds_quota
== 0) {
3278 mutex_exit(&ds
->ds_lock
);
3282 * If they are requesting more space, and our current estimate
3283 * is over quota, they get to try again unless the actual
3284 * on-disk is over quota and there are no pending changes (which
3285 * may free up space for us).
3287 if (ds
->ds_phys
->ds_used_bytes
+ inflight
>= ds
->ds_quota
) {
3288 if (inflight
> 0 || ds
->ds_phys
->ds_used_bytes
< ds
->ds_quota
)
3293 DMU_TX_STAT_BUMP(dmu_tx_quota
);
3295 mutex_exit(&ds
->ds_lock
);
3302 dsl_dataset_set_quota_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3304 dsl_dataset_t
*ds
= arg1
;
3305 dsl_prop_setarg_t
*psa
= arg2
;
3308 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_REFQUOTA
)
3311 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
3314 if (psa
->psa_effective_value
== 0)
3317 if (psa
->psa_effective_value
< ds
->ds_phys
->ds_used_bytes
||
3318 psa
->psa_effective_value
< ds
->ds_reserved
)
3324 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t
*);
3327 dsl_dataset_set_quota_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3329 dsl_dataset_t
*ds
= arg1
;
3330 dsl_prop_setarg_t
*psa
= arg2
;
3331 uint64_t effective_value
= psa
->psa_effective_value
;
3333 dsl_prop_set_sync(ds
, psa
, tx
);
3334 DSL_PROP_CHECK_PREDICTION(ds
->ds_dir
, psa
);
3336 if (ds
->ds_quota
!= effective_value
) {
3337 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3338 ds
->ds_quota
= effective_value
;
3340 spa_history_log_internal(LOG_DS_REFQUOTA
,
3341 ds
->ds_dir
->dd_pool
->dp_spa
, tx
, "%lld dataset = %llu ",
3342 (longlong_t
)ds
->ds_quota
, ds
->ds_object
);
3347 dsl_dataset_set_quota(const char *dsname
, zprop_source_t source
, uint64_t quota
)
3350 dsl_prop_setarg_t psa
;
3353 dsl_prop_setarg_init_uint64(&psa
, "refquota", source
, "a
);
3355 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
3360 * If someone removes a file, then tries to set the quota, we
3361 * want to make sure the file freeing takes effect.
3363 txg_wait_open(ds
->ds_dir
->dd_pool
, 0);
3365 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3366 dsl_dataset_set_quota_check
, dsl_dataset_set_quota_sync
,
3369 dsl_dataset_rele(ds
, FTAG
);
3374 dsl_dataset_set_reservation_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3376 dsl_dataset_t
*ds
= arg1
;
3377 dsl_prop_setarg_t
*psa
= arg2
;
3378 uint64_t effective_value
;
3382 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
3383 SPA_VERSION_REFRESERVATION
)
3386 if (dsl_dataset_is_snapshot(ds
))
3389 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
3392 effective_value
= psa
->psa_effective_value
;
3395 * If we are doing the preliminary check in open context, the
3396 * space estimates may be inaccurate.
3398 if (!dmu_tx_is_syncing(tx
))
3401 mutex_enter(&ds
->ds_lock
);
3402 if (!DS_UNIQUE_IS_ACCURATE(ds
))
3403 dsl_dataset_recalc_head_uniq(ds
);
3404 unique
= ds
->ds_phys
->ds_unique_bytes
;
3405 mutex_exit(&ds
->ds_lock
);
3407 if (MAX(unique
, effective_value
) > MAX(unique
, ds
->ds_reserved
)) {
3408 uint64_t delta
= MAX(unique
, effective_value
) -
3409 MAX(unique
, ds
->ds_reserved
);
3411 if (delta
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
3413 if (ds
->ds_quota
> 0 &&
3414 effective_value
> ds
->ds_quota
)
3422 dsl_dataset_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3424 dsl_dataset_t
*ds
= arg1
;
3425 dsl_prop_setarg_t
*psa
= arg2
;
3426 uint64_t effective_value
= psa
->psa_effective_value
;
3430 dsl_prop_set_sync(ds
, psa
, tx
);
3431 DSL_PROP_CHECK_PREDICTION(ds
->ds_dir
, psa
);
3433 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3435 mutex_enter(&ds
->ds_dir
->dd_lock
);
3436 mutex_enter(&ds
->ds_lock
);
3437 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
3438 unique
= ds
->ds_phys
->ds_unique_bytes
;
3439 delta
= MAX(0, (int64_t)(effective_value
- unique
)) -
3440 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
3441 ds
->ds_reserved
= effective_value
;
3442 mutex_exit(&ds
->ds_lock
);
3444 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
3445 mutex_exit(&ds
->ds_dir
->dd_lock
);
3447 spa_history_log_internal(LOG_DS_REFRESERV
,
3448 ds
->ds_dir
->dd_pool
->dp_spa
, tx
, "%lld dataset = %llu",
3449 (longlong_t
)effective_value
, ds
->ds_object
);
3453 dsl_dataset_set_reservation(const char *dsname
, zprop_source_t source
,
3454 uint64_t reservation
)
3457 dsl_prop_setarg_t psa
;
3460 dsl_prop_setarg_init_uint64(&psa
, "refreservation", source
,
3463 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
3467 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3468 dsl_dataset_set_reservation_check
,
3469 dsl_dataset_set_reservation_sync
, ds
, &psa
, 0);
3471 dsl_dataset_rele(ds
, FTAG
);
3475 typedef struct zfs_hold_cleanup_arg
{
3478 char htag
[MAXNAMELEN
];
3479 } zfs_hold_cleanup_arg_t
;
3482 dsl_dataset_user_release_onexit(void *arg
)
3484 zfs_hold_cleanup_arg_t
*ca
= arg
;
3486 (void) dsl_dataset_user_release_tmp(ca
->dp
, ca
->dsobj
, ca
->htag
,
3488 kmem_free(ca
, sizeof (zfs_hold_cleanup_arg_t
));
3492 dsl_register_onexit_hold_cleanup(dsl_dataset_t
*ds
, const char *htag
,
3495 zfs_hold_cleanup_arg_t
*ca
;
3497 ca
= kmem_alloc(sizeof (zfs_hold_cleanup_arg_t
), KM_SLEEP
);
3498 ca
->dp
= ds
->ds_dir
->dd_pool
;
3499 ca
->dsobj
= ds
->ds_object
;
3500 (void) strlcpy(ca
->htag
, htag
, sizeof (ca
->htag
));
3501 VERIFY3U(0, ==, zfs_onexit_add_cb(minor
,
3502 dsl_dataset_user_release_onexit
, ca
, NULL
));
3506 * If you add new checks here, you may need to add
3507 * additional checks to the "temporary" case in
3508 * snapshot_check() in dmu_objset.c.
3511 dsl_dataset_user_hold_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3513 dsl_dataset_t
*ds
= arg1
;
3514 struct dsl_ds_holdarg
*ha
= arg2
;
3515 char *htag
= ha
->htag
;
3516 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3519 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_USERREFS
)
3522 if (!dsl_dataset_is_snapshot(ds
))
3525 /* tags must be unique */
3526 mutex_enter(&ds
->ds_lock
);
3527 if (ds
->ds_phys
->ds_userrefs_obj
) {
3528 error
= zap_lookup(mos
, ds
->ds_phys
->ds_userrefs_obj
, htag
,
3532 else if (error
== ENOENT
)
3535 mutex_exit(&ds
->ds_lock
);
3537 if (error
== 0 && ha
->temphold
&&
3538 strlen(htag
) + MAX_TAG_PREFIX_LEN
>= MAXNAMELEN
)
3545 dsl_dataset_user_hold_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3547 dsl_dataset_t
*ds
= arg1
;
3548 struct dsl_ds_holdarg
*ha
= arg2
;
3549 char *htag
= ha
->htag
;
3550 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
3551 objset_t
*mos
= dp
->dp_meta_objset
;
3552 uint64_t now
= gethrestime_sec();
3555 mutex_enter(&ds
->ds_lock
);
3556 if (ds
->ds_phys
->ds_userrefs_obj
== 0) {
3558 * This is the first user hold for this dataset. Create
3559 * the userrefs zap object.
3561 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3562 zapobj
= ds
->ds_phys
->ds_userrefs_obj
=
3563 zap_create(mos
, DMU_OT_USERREFS
, DMU_OT_NONE
, 0, tx
);
3565 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3568 mutex_exit(&ds
->ds_lock
);
3570 VERIFY(0 == zap_add(mos
, zapobj
, htag
, 8, 1, &now
, tx
));
3573 VERIFY(0 == dsl_pool_user_hold(dp
, ds
->ds_object
,
3577 spa_history_log_internal(LOG_DS_USER_HOLD
,
3578 dp
->dp_spa
, tx
, "<%s> temp = %d dataset = %llu", htag
,
3579 (int)ha
->temphold
, ds
->ds_object
);
3583 dsl_dataset_user_hold_one(const char *dsname
, void *arg
)
3585 struct dsl_ds_holdarg
*ha
= arg
;
3590 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3591 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3592 error
= dsl_dataset_hold(name
, ha
->dstg
, &ds
);
3595 ha
->gotone
= B_TRUE
;
3596 dsl_sync_task_create(ha
->dstg
, dsl_dataset_user_hold_check
,
3597 dsl_dataset_user_hold_sync
, ds
, ha
, 0);
3598 } else if (error
== ENOENT
&& ha
->recursive
) {
3601 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3607 dsl_dataset_user_hold_for_send(dsl_dataset_t
*ds
, char *htag
,
3610 struct dsl_ds_holdarg
*ha
;
3613 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3615 ha
->temphold
= temphold
;
3616 error
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3617 dsl_dataset_user_hold_check
, dsl_dataset_user_hold_sync
,
3619 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3625 dsl_dataset_user_hold(char *dsname
, char *snapname
, char *htag
,
3626 boolean_t recursive
, boolean_t temphold
, int cleanup_fd
)
3628 struct dsl_ds_holdarg
*ha
;
3629 dsl_sync_task_t
*dst
;
3634 if (cleanup_fd
!= -1) {
3635 /* Currently we only support cleanup-on-exit of tempholds. */
3638 error
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
3643 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3645 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3647 error
= spa_open(dsname
, &spa
, FTAG
);
3649 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3650 if (cleanup_fd
!= -1)
3651 zfs_onexit_fd_rele(cleanup_fd
);
3655 ha
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
3657 ha
->snapname
= snapname
;
3658 ha
->recursive
= recursive
;
3659 ha
->temphold
= temphold
;
3662 error
= dmu_objset_find(dsname
, dsl_dataset_user_hold_one
,
3663 ha
, DS_FIND_CHILDREN
);
3665 error
= dsl_dataset_user_hold_one(dsname
, ha
);
3668 error
= dsl_sync_task_group_wait(ha
->dstg
);
3670 for (dst
= list_head(&ha
->dstg
->dstg_tasks
); dst
;
3671 dst
= list_next(&ha
->dstg
->dstg_tasks
, dst
)) {
3672 dsl_dataset_t
*ds
= dst
->dst_arg1
;
3675 dsl_dataset_name(ds
, ha
->failed
);
3676 *strchr(ha
->failed
, '@') = '\0';
3677 } else if (error
== 0 && minor
!= 0 && temphold
) {
3679 * If this hold is to be released upon process exit,
3680 * register that action now.
3682 dsl_register_onexit_hold_cleanup(ds
, htag
, minor
);
3684 dsl_dataset_rele(ds
, ha
->dstg
);
3687 if (error
== 0 && recursive
&& !ha
->gotone
)
3691 (void) strlcpy(dsname
, ha
->failed
, sizeof (ha
->failed
));
3693 dsl_sync_task_group_destroy(ha
->dstg
);
3695 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3696 spa_close(spa
, FTAG
);
3697 if (cleanup_fd
!= -1)
3698 zfs_onexit_fd_rele(cleanup_fd
);
3702 struct dsl_ds_releasearg
{
3705 boolean_t own
; /* do we own or just hold ds? */
3709 dsl_dataset_release_might_destroy(dsl_dataset_t
*ds
, const char *htag
,
3710 boolean_t
*might_destroy
)
3712 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3717 *might_destroy
= B_FALSE
;
3719 mutex_enter(&ds
->ds_lock
);
3720 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3722 /* The tag can't possibly exist */
3723 mutex_exit(&ds
->ds_lock
);
3727 /* Make sure the tag exists */
3728 error
= zap_lookup(mos
, zapobj
, htag
, 8, 1, &tmp
);
3730 mutex_exit(&ds
->ds_lock
);
3731 if (error
== ENOENT
)
3736 if (ds
->ds_userrefs
== 1 && ds
->ds_phys
->ds_num_children
== 1 &&
3737 DS_IS_DEFER_DESTROY(ds
))
3738 *might_destroy
= B_TRUE
;
3740 mutex_exit(&ds
->ds_lock
);
3745 dsl_dataset_user_release_check(void *arg1
, void *tag
, dmu_tx_t
*tx
)
3747 struct dsl_ds_releasearg
*ra
= arg1
;
3748 dsl_dataset_t
*ds
= ra
->ds
;
3749 boolean_t might_destroy
;
3752 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_USERREFS
)
3755 error
= dsl_dataset_release_might_destroy(ds
, ra
->htag
, &might_destroy
);
3759 if (might_destroy
) {
3760 struct dsl_ds_destroyarg dsda
= {0};
3762 if (dmu_tx_is_syncing(tx
)) {
3764 * If we're not prepared to remove the snapshot,
3765 * we can't allow the release to happen right now.
3771 dsda
.releasing
= B_TRUE
;
3772 return (dsl_dataset_destroy_check(&dsda
, tag
, tx
));
3779 dsl_dataset_user_release_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
3781 struct dsl_ds_releasearg
*ra
= arg1
;
3782 dsl_dataset_t
*ds
= ra
->ds
;
3783 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
3784 objset_t
*mos
= dp
->dp_meta_objset
;
3786 uint64_t dsobj
= ds
->ds_object
;
3790 mutex_enter(&ds
->ds_lock
);
3792 refs
= ds
->ds_userrefs
;
3793 mutex_exit(&ds
->ds_lock
);
3794 error
= dsl_pool_user_release(dp
, ds
->ds_object
, ra
->htag
, tx
);
3795 VERIFY(error
== 0 || error
== ENOENT
);
3796 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3797 VERIFY(0 == zap_remove(mos
, zapobj
, ra
->htag
, tx
));
3798 if (ds
->ds_userrefs
== 0 && ds
->ds_phys
->ds_num_children
== 1 &&
3799 DS_IS_DEFER_DESTROY(ds
)) {
3800 struct dsl_ds_destroyarg dsda
= {0};
3804 dsda
.releasing
= B_TRUE
;
3805 /* We already did the destroy_check */
3806 dsl_dataset_destroy_sync(&dsda
, tag
, tx
);
3809 spa_history_log_internal(LOG_DS_USER_RELEASE
,
3810 dp
->dp_spa
, tx
, "<%s> %lld dataset = %llu",
3811 ra
->htag
, (longlong_t
)refs
, dsobj
);
3815 dsl_dataset_user_release_one(const char *dsname
, void *arg
)
3817 struct dsl_ds_holdarg
*ha
= arg
;
3818 struct dsl_ds_releasearg
*ra
;
3821 void *dtag
= ha
->dstg
;
3823 boolean_t own
= B_FALSE
;
3824 boolean_t might_destroy
;
3826 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3827 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3828 error
= dsl_dataset_hold(name
, dtag
, &ds
);
3830 if (error
== ENOENT
&& ha
->recursive
)
3832 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3836 ha
->gotone
= B_TRUE
;
3838 ASSERT(dsl_dataset_is_snapshot(ds
));
3840 error
= dsl_dataset_release_might_destroy(ds
, ha
->htag
, &might_destroy
);
3842 dsl_dataset_rele(ds
, dtag
);
3846 if (might_destroy
) {
3848 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3849 error
= zfs_unmount_snap(name
, NULL
);
3852 dsl_dataset_rele(ds
, dtag
);
3856 if (!dsl_dataset_tryown(ds
, B_TRUE
, dtag
)) {
3857 dsl_dataset_rele(ds
, dtag
);
3861 dsl_dataset_make_exclusive(ds
, dtag
);
3865 ra
= kmem_alloc(sizeof (struct dsl_ds_releasearg
), KM_SLEEP
);
3867 ra
->htag
= ha
->htag
;
3869 dsl_sync_task_create(ha
->dstg
, dsl_dataset_user_release_check
,
3870 dsl_dataset_user_release_sync
, ra
, dtag
, 0);
3876 dsl_dataset_user_release(char *dsname
, char *snapname
, char *htag
,
3877 boolean_t recursive
)
3879 struct dsl_ds_holdarg
*ha
;
3880 dsl_sync_task_t
*dst
;
3885 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3887 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3889 error
= spa_open(dsname
, &spa
, FTAG
);
3891 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3895 ha
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
3897 ha
->snapname
= snapname
;
3898 ha
->recursive
= recursive
;
3900 error
= dmu_objset_find(dsname
, dsl_dataset_user_release_one
,
3901 ha
, DS_FIND_CHILDREN
);
3903 error
= dsl_dataset_user_release_one(dsname
, ha
);
3906 error
= dsl_sync_task_group_wait(ha
->dstg
);
3908 for (dst
= list_head(&ha
->dstg
->dstg_tasks
); dst
;
3909 dst
= list_next(&ha
->dstg
->dstg_tasks
, dst
)) {
3910 struct dsl_ds_releasearg
*ra
= dst
->dst_arg1
;
3911 dsl_dataset_t
*ds
= ra
->ds
;
3914 dsl_dataset_name(ds
, ha
->failed
);
3917 dsl_dataset_disown(ds
, ha
->dstg
);
3919 dsl_dataset_rele(ds
, ha
->dstg
);
3921 kmem_free(ra
, sizeof (struct dsl_ds_releasearg
));
3924 if (error
== 0 && recursive
&& !ha
->gotone
)
3927 if (error
&& error
!= EBUSY
)
3928 (void) strlcpy(dsname
, ha
->failed
, sizeof (ha
->failed
));
3930 dsl_sync_task_group_destroy(ha
->dstg
);
3931 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3932 spa_close(spa
, FTAG
);
3935 * We can get EBUSY if we were racing with deferred destroy and
3936 * dsl_dataset_user_release_check() hadn't done the necessary
3937 * open context setup. We can also get EBUSY if we're racing
3938 * with destroy and that thread is the ds_owner. Either way
3939 * the busy condition should be transient, and we should retry
3940 * the release operation.
3949 * Called at spa_load time (with retry == B_FALSE) to release a stale
3950 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3953 dsl_dataset_user_release_tmp(dsl_pool_t
*dp
, uint64_t dsobj
, char *htag
,
3963 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
3964 error
= dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
);
3965 rw_exit(&dp
->dp_config_rwlock
);
3968 namelen
= dsl_dataset_namelen(ds
)+1;
3969 name
= kmem_alloc(namelen
, KM_SLEEP
);
3970 dsl_dataset_name(ds
, name
);
3971 dsl_dataset_rele(ds
, FTAG
);
3973 snap
= strchr(name
, '@');
3976 error
= dsl_dataset_user_release(name
, snap
, htag
, B_FALSE
);
3977 kmem_free(name
, namelen
);
3980 * The object can't have been destroyed because we have a hold,
3981 * but it might have been renamed, resulting in ENOENT. Retry
3982 * if we've been requested to do so.
3984 * It would be nice if we could use the dsobj all the way
3985 * through and avoid ENOENT entirely. But we might need to
3986 * unmount the snapshot, and there's currently no way to lookup
3987 * a vfsp using a ZFS object id.
3989 } while ((error
== ENOENT
) && retry
);
3995 dsl_dataset_get_holds(const char *dsname
, nvlist_t
**nvp
)
4000 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
4004 VERIFY(0 == nvlist_alloc(nvp
, NV_UNIQUE_NAME
, KM_SLEEP
));
4005 if (ds
->ds_phys
->ds_userrefs_obj
!= 0) {
4006 zap_attribute_t
*za
;
4009 za
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
4010 for (zap_cursor_init(&zc
, ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4011 ds
->ds_phys
->ds_userrefs_obj
);
4012 zap_cursor_retrieve(&zc
, za
) == 0;
4013 zap_cursor_advance(&zc
)) {
4014 VERIFY(0 == nvlist_add_uint64(*nvp
, za
->za_name
,
4015 za
->za_first_integer
));
4017 zap_cursor_fini(&zc
);
4018 kmem_free(za
, sizeof (zap_attribute_t
));
4020 dsl_dataset_rele(ds
, FTAG
);
4025 * Note, this fuction is used as the callback for dmu_objset_find(). We
4026 * always return 0 so that we will continue to find and process
4027 * inconsistent datasets, even if we encounter an error trying to
4028 * process one of them.
4032 dsl_destroy_inconsistent(const char *dsname
, void *arg
)
4036 if (dsl_dataset_own(dsname
, B_TRUE
, FTAG
, &ds
) == 0) {
4037 if (DS_IS_INCONSISTENT(ds
))
4038 (void) dsl_dataset_destroy(ds
, FTAG
, B_FALSE
);
4040 dsl_dataset_disown(ds
, FTAG
);
4045 #if defined(_KERNEL) && defined(HAVE_SPL)
4046 EXPORT_SYMBOL(dsl_dataset_hold
);
4047 EXPORT_SYMBOL(dsl_dataset_hold_obj
);
4048 EXPORT_SYMBOL(dsl_dataset_own
);
4049 EXPORT_SYMBOL(dsl_dataset_own_obj
);
4050 EXPORT_SYMBOL(dsl_dataset_name
);
4051 EXPORT_SYMBOL(dsl_dataset_rele
);
4052 EXPORT_SYMBOL(dsl_dataset_disown
);
4053 EXPORT_SYMBOL(dsl_dataset_drop_ref
);
4054 EXPORT_SYMBOL(dsl_dataset_tryown
);
4055 EXPORT_SYMBOL(dsl_dataset_make_exclusive
);
4056 EXPORT_SYMBOL(dsl_dataset_create_sync
);
4057 EXPORT_SYMBOL(dsl_dataset_create_sync_dd
);
4058 EXPORT_SYMBOL(dsl_dataset_destroy
);
4059 EXPORT_SYMBOL(dsl_snapshots_destroy
);
4060 EXPORT_SYMBOL(dsl_dataset_destroy_check
);
4061 EXPORT_SYMBOL(dsl_dataset_destroy_sync
);
4062 EXPORT_SYMBOL(dsl_dataset_snapshot_check
);
4063 EXPORT_SYMBOL(dsl_dataset_snapshot_sync
);
4064 EXPORT_SYMBOL(dsl_dataset_rename
);
4065 EXPORT_SYMBOL(dsl_dataset_promote
);
4066 EXPORT_SYMBOL(dsl_dataset_clone_swap
);
4067 EXPORT_SYMBOL(dsl_dataset_user_hold
);
4068 EXPORT_SYMBOL(dsl_dataset_user_release
);
4069 EXPORT_SYMBOL(dsl_dataset_user_release_tmp
);
4070 EXPORT_SYMBOL(dsl_dataset_get_holds
);
4071 EXPORT_SYMBOL(dsl_dataset_get_blkptr
);
4072 EXPORT_SYMBOL(dsl_dataset_set_blkptr
);
4073 EXPORT_SYMBOL(dsl_dataset_get_spa
);
4074 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap
);
4075 EXPORT_SYMBOL(dsl_dataset_sync
);
4076 EXPORT_SYMBOL(dsl_dataset_block_born
);
4077 EXPORT_SYMBOL(dsl_dataset_block_kill
);
4078 EXPORT_SYMBOL(dsl_dataset_block_freeable
);
4079 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg
);
4080 EXPORT_SYMBOL(dsl_dataset_dirty
);
4081 EXPORT_SYMBOL(dsl_dataset_stats
);
4082 EXPORT_SYMBOL(dsl_dataset_fast_stat
);
4083 EXPORT_SYMBOL(dsl_dataset_space
);
4084 EXPORT_SYMBOL(dsl_dataset_fsid_guid
);
4085 EXPORT_SYMBOL(dsl_dsobj_to_dsname
);
4086 EXPORT_SYMBOL(dsl_dataset_check_quota
);
4087 EXPORT_SYMBOL(dsl_dataset_set_quota
);
4088 EXPORT_SYMBOL(dsl_dataset_set_quota_sync
);
4089 EXPORT_SYMBOL(dsl_dataset_set_reservation
);
4090 EXPORT_SYMBOL(dsl_destroy_inconsistent
);