4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_onexit.h>
43 #include <sys/dsl_scan.h>
44 #include <sys/dsl_deadlist.h>
46 static char *dsl_reaper
= "the grim reaper";
48 static dsl_checkfunc_t dsl_dataset_destroy_begin_check
;
49 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync
;
50 static dsl_syncfunc_t dsl_dataset_set_reservation_sync
;
52 #define SWITCH64(x, y) \
54 uint64_t __tmp = (x); \
59 #define DS_REF_MAX (1ULL << 62)
61 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
63 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
67 * Figure out how much of this delta should be propogated to the dsl_dir
68 * layer. If there's a refreservation, that space has already been
69 * partially accounted for in our ancestors.
72 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
74 uint64_t old_bytes
, new_bytes
;
76 if (ds
->ds_reserved
== 0)
79 old_bytes
= MAX(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
80 new_bytes
= MAX(ds
->ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
82 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
83 return (new_bytes
- old_bytes
);
87 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
89 int used
, compressed
, uncompressed
;
92 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
93 compressed
= BP_GET_PSIZE(bp
);
94 uncompressed
= BP_GET_UCSIZE(bp
);
96 dprintf_bp(bp
, "ds=%p", ds
);
98 ASSERT(dmu_tx_is_syncing(tx
));
99 /* It could have been compressed away to nothing */
102 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
103 ASSERT3U(BP_GET_TYPE(bp
), <, DMU_OT_NUMTYPES
);
106 * Account for the meta-objset space in its placeholder
109 ASSERT3U(compressed
, ==, uncompressed
); /* it's all metadata */
110 dsl_dir_diduse_space(tx
->tx_pool
->dp_mos_dir
, DD_USED_HEAD
,
111 used
, compressed
, uncompressed
, tx
);
112 dsl_dir_dirty(tx
->tx_pool
->dp_mos_dir
, tx
);
115 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
117 mutex_enter(&ds
->ds_dir
->dd_lock
);
118 mutex_enter(&ds
->ds_lock
);
119 delta
= parent_delta(ds
, used
);
120 ds
->ds_phys
->ds_used_bytes
+= used
;
121 ds
->ds_phys
->ds_compressed_bytes
+= compressed
;
122 ds
->ds_phys
->ds_uncompressed_bytes
+= uncompressed
;
123 ds
->ds_phys
->ds_unique_bytes
+= used
;
124 mutex_exit(&ds
->ds_lock
);
125 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
, delta
,
126 compressed
, uncompressed
, tx
);
127 dsl_dir_transfer_space(ds
->ds_dir
, used
- delta
,
128 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
129 mutex_exit(&ds
->ds_dir
->dd_lock
);
133 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
136 int used
, compressed
, uncompressed
;
141 ASSERT(dmu_tx_is_syncing(tx
));
142 ASSERT(bp
->blk_birth
<= tx
->tx_txg
);
144 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
145 compressed
= BP_GET_PSIZE(bp
);
146 uncompressed
= BP_GET_UCSIZE(bp
);
151 * Account for the meta-objset space in its placeholder
154 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
156 dsl_dir_diduse_space(tx
->tx_pool
->dp_mos_dir
, DD_USED_HEAD
,
157 -used
, -compressed
, -uncompressed
, tx
);
158 dsl_dir_dirty(tx
->tx_pool
->dp_mos_dir
, tx
);
161 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
163 ASSERT(!dsl_dataset_is_snapshot(ds
));
164 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
166 if (bp
->blk_birth
> ds
->ds_phys
->ds_prev_snap_txg
) {
169 dprintf_bp(bp
, "freeing ds=%llu", ds
->ds_object
);
170 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
172 mutex_enter(&ds
->ds_dir
->dd_lock
);
173 mutex_enter(&ds
->ds_lock
);
174 ASSERT(ds
->ds_phys
->ds_unique_bytes
>= used
||
175 !DS_UNIQUE_IS_ACCURATE(ds
));
176 delta
= parent_delta(ds
, -used
);
177 ds
->ds_phys
->ds_unique_bytes
-= used
;
178 mutex_exit(&ds
->ds_lock
);
179 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
180 delta
, -compressed
, -uncompressed
, tx
);
181 dsl_dir_transfer_space(ds
->ds_dir
, -used
- delta
,
182 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
183 mutex_exit(&ds
->ds_dir
->dd_lock
);
185 dprintf_bp(bp
, "putting on dead list: %s", "");
188 * We are here as part of zio's write done callback,
189 * which means we're a zio interrupt thread. We can't
190 * call dsl_deadlist_insert() now because it may block
191 * waiting for I/O. Instead, put bp on the deferred
192 * queue and let dsl_pool_sync() finish the job.
194 bplist_append(&ds
->ds_pending_deadlist
, bp
);
196 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, tx
);
198 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
199 ds
->ds_phys
->ds_prev_snap_obj
);
200 ASSERT(ds
->ds_prev
->ds_phys
->ds_num_children
> 0);
201 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
202 if (ds
->ds_prev
->ds_phys
->ds_next_snap_obj
==
203 ds
->ds_object
&& bp
->blk_birth
>
204 ds
->ds_prev
->ds_phys
->ds_prev_snap_txg
) {
205 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
206 mutex_enter(&ds
->ds_prev
->ds_lock
);
207 ds
->ds_prev
->ds_phys
->ds_unique_bytes
+= used
;
208 mutex_exit(&ds
->ds_prev
->ds_lock
);
210 if (bp
->blk_birth
> ds
->ds_dir
->dd_origin_txg
) {
211 dsl_dir_transfer_space(ds
->ds_dir
, used
,
212 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
215 mutex_enter(&ds
->ds_lock
);
216 ASSERT3U(ds
->ds_phys
->ds_used_bytes
, >=, used
);
217 ds
->ds_phys
->ds_used_bytes
-= used
;
218 ASSERT3U(ds
->ds_phys
->ds_compressed_bytes
, >=, compressed
);
219 ds
->ds_phys
->ds_compressed_bytes
-= compressed
;
220 ASSERT3U(ds
->ds_phys
->ds_uncompressed_bytes
, >=, uncompressed
);
221 ds
->ds_phys
->ds_uncompressed_bytes
-= uncompressed
;
222 mutex_exit(&ds
->ds_lock
);
228 dsl_dataset_prev_snap_txg(dsl_dataset_t
*ds
)
230 uint64_t trysnap
= 0;
235 * The snapshot creation could fail, but that would cause an
236 * incorrect FALSE return, which would only result in an
237 * overestimation of the amount of space that an operation would
238 * consume, which is OK.
240 * There's also a small window where we could miss a pending
241 * snapshot, because we could set the sync task in the quiescing
242 * phase. So this should only be used as a guess.
244 if (ds
->ds_trysnap_txg
>
245 spa_last_synced_txg(ds
->ds_dir
->dd_pool
->dp_spa
))
246 trysnap
= ds
->ds_trysnap_txg
;
247 return (MAX(ds
->ds_phys
->ds_prev_snap_txg
, trysnap
));
251 dsl_dataset_block_freeable(dsl_dataset_t
*ds
, const blkptr_t
*bp
,
254 if (blk_birth
<= dsl_dataset_prev_snap_txg(ds
))
257 ddt_prefetch(dsl_dataset_get_spa(ds
), bp
);
264 dsl_dataset_evict(dmu_buf_t
*db
, void *dsv
)
266 dsl_dataset_t
*ds
= dsv
;
268 ASSERT(ds
->ds_owner
== NULL
|| DSL_DATASET_IS_DESTROYED(ds
));
270 unique_remove(ds
->ds_fsid_guid
);
272 if (ds
->ds_objset
!= NULL
)
273 dmu_objset_evict(ds
->ds_objset
);
276 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
280 bplist_destroy(&ds
->ds_pending_deadlist
);
282 dsl_deadlist_close(&ds
->ds_deadlist
);
284 ASSERT(ds
->ds_deadlist
.dl_dbuf
== NULL
);
285 ASSERT(!ds
->ds_deadlist
.dl_oldfmt
);
288 dsl_dir_close(ds
->ds_dir
, ds
);
290 ASSERT(!list_link_active(&ds
->ds_synced_link
));
292 mutex_destroy(&ds
->ds_lock
);
293 mutex_destroy(&ds
->ds_recvlock
);
294 mutex_destroy(&ds
->ds_opening_lock
);
295 rw_destroy(&ds
->ds_rwlock
);
296 cv_destroy(&ds
->ds_exclusive_cv
);
298 kmem_free(ds
, sizeof (dsl_dataset_t
));
302 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
304 dsl_dataset_phys_t
*headphys
;
307 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
308 objset_t
*mos
= dp
->dp_meta_objset
;
310 if (ds
->ds_snapname
[0])
312 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
315 err
= dmu_bonus_hold(mos
, ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
,
319 headphys
= headdbuf
->db_data
;
320 err
= zap_value_search(dp
->dp_meta_objset
,
321 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
);
322 dmu_buf_rele(headdbuf
, FTAG
);
327 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
329 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
330 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
334 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
339 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
340 value
, mt
, NULL
, 0, NULL
);
341 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
342 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
347 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, char *name
, dmu_tx_t
*tx
)
349 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
350 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
354 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
356 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
361 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
362 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
363 err
= zap_remove(mos
, snapobj
, name
, tx
);
368 dsl_dataset_get_ref(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
371 objset_t
*mos
= dp
->dp_meta_objset
;
375 dmu_object_info_t doi
;
377 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
378 dsl_pool_sync_context(dp
));
380 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
384 /* Make sure dsobj has the correct object type. */
385 dmu_object_info_from_db(dbuf
, &doi
);
386 if (doi
.doi_type
!= DMU_OT_DSL_DATASET
)
389 ds
= dmu_buf_get_user(dbuf
);
391 dsl_dataset_t
*winner
= NULL
;
393 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
395 ds
->ds_object
= dsobj
;
396 ds
->ds_phys
= dbuf
->db_data
;
397 list_link_init(&ds
->ds_synced_link
);
399 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
400 mutex_init(&ds
->ds_recvlock
, NULL
, MUTEX_DEFAULT
, NULL
);
401 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
402 rw_init(&ds
->ds_rwlock
, NULL
, RW_DEFAULT
, NULL
);
403 cv_init(&ds
->ds_exclusive_cv
, NULL
, CV_DEFAULT
, NULL
);
405 bplist_create(&ds
->ds_pending_deadlist
);
406 dsl_deadlist_open(&ds
->ds_deadlist
,
407 mos
, ds
->ds_phys
->ds_deadlist_obj
);
410 err
= dsl_dir_open_obj(dp
,
411 ds
->ds_phys
->ds_dir_obj
, NULL
, ds
, &ds
->ds_dir
);
414 mutex_destroy(&ds
->ds_lock
);
415 mutex_destroy(&ds
->ds_recvlock
);
416 mutex_destroy(&ds
->ds_opening_lock
);
417 rw_destroy(&ds
->ds_rwlock
);
418 cv_destroy(&ds
->ds_exclusive_cv
);
419 bplist_destroy(&ds
->ds_pending_deadlist
);
420 dsl_deadlist_close(&ds
->ds_deadlist
);
421 kmem_free(ds
, sizeof (dsl_dataset_t
));
422 dmu_buf_rele(dbuf
, tag
);
426 if (!dsl_dataset_is_snapshot(ds
)) {
427 ds
->ds_snapname
[0] = '\0';
428 if (ds
->ds_phys
->ds_prev_snap_obj
) {
429 err
= dsl_dataset_get_ref(dp
,
430 ds
->ds_phys
->ds_prev_snap_obj
,
434 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
435 err
= dsl_dataset_get_snapname(ds
);
436 if (err
== 0 && ds
->ds_phys
->ds_userrefs_obj
!= 0) {
438 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
439 ds
->ds_phys
->ds_userrefs_obj
,
444 if (err
== 0 && !dsl_dataset_is_snapshot(ds
)) {
446 * In sync context, we're called with either no lock
447 * or with the write lock. If we're not syncing,
448 * we're always called with the read lock held.
450 boolean_t need_lock
=
451 !RW_WRITE_HELD(&dp
->dp_config_rwlock
) &&
452 dsl_pool_sync_context(dp
);
455 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
457 err
= dsl_prop_get_ds(ds
,
458 "refreservation", sizeof (uint64_t), 1,
459 &ds
->ds_reserved
, NULL
);
461 err
= dsl_prop_get_ds(ds
,
462 "refquota", sizeof (uint64_t), 1,
463 &ds
->ds_quota
, NULL
);
467 rw_exit(&dp
->dp_config_rwlock
);
469 ds
->ds_reserved
= ds
->ds_quota
= 0;
473 winner
= dmu_buf_set_user_ie(dbuf
, ds
, &ds
->ds_phys
,
477 bplist_destroy(&ds
->ds_pending_deadlist
);
478 dsl_deadlist_close(&ds
->ds_deadlist
);
480 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
481 dsl_dir_close(ds
->ds_dir
, ds
);
482 mutex_destroy(&ds
->ds_lock
);
483 mutex_destroy(&ds
->ds_recvlock
);
484 mutex_destroy(&ds
->ds_opening_lock
);
485 rw_destroy(&ds
->ds_rwlock
);
486 cv_destroy(&ds
->ds_exclusive_cv
);
487 kmem_free(ds
, sizeof (dsl_dataset_t
));
489 dmu_buf_rele(dbuf
, tag
);
495 unique_insert(ds
->ds_phys
->ds_fsid_guid
);
498 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
499 ASSERT3P(ds
->ds_phys
, ==, dbuf
->db_data
);
500 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
!= 0 ||
501 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
502 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
503 mutex_enter(&ds
->ds_lock
);
504 if (!dsl_pool_sync_context(dp
) && DSL_DATASET_IS_DESTROYED(ds
)) {
505 mutex_exit(&ds
->ds_lock
);
506 dmu_buf_rele(ds
->ds_dbuf
, tag
);
509 mutex_exit(&ds
->ds_lock
);
515 dsl_dataset_hold_ref(dsl_dataset_t
*ds
, void *tag
)
517 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
520 * In syncing context we don't want the rwlock lock: there
521 * may be an existing writer waiting for sync phase to
522 * finish. We don't need to worry about such writers, since
523 * sync phase is single-threaded, so the writer can't be
524 * doing anything while we are active.
526 if (dsl_pool_sync_context(dp
)) {
527 ASSERT(!DSL_DATASET_IS_DESTROYED(ds
));
532 * Normal users will hold the ds_rwlock as a READER until they
533 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
534 * drop their READER lock after they set the ds_owner field.
536 * If the dataset is being destroyed, the destroy thread will
537 * obtain a WRITER lock for exclusive access after it's done its
538 * open-context work and then change the ds_owner to
539 * dsl_reaper once destruction is assured. So threads
540 * may block here temporarily, until the "destructability" of
541 * the dataset is determined.
543 ASSERT(!RW_WRITE_HELD(&dp
->dp_config_rwlock
));
544 mutex_enter(&ds
->ds_lock
);
545 while (!rw_tryenter(&ds
->ds_rwlock
, RW_READER
)) {
546 rw_exit(&dp
->dp_config_rwlock
);
547 cv_wait(&ds
->ds_exclusive_cv
, &ds
->ds_lock
);
548 if (DSL_DATASET_IS_DESTROYED(ds
)) {
549 mutex_exit(&ds
->ds_lock
);
550 dsl_dataset_drop_ref(ds
, tag
);
551 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
555 * The dp_config_rwlock lives above the ds_lock. And
556 * we need to check DSL_DATASET_IS_DESTROYED() while
557 * holding the ds_lock, so we have to drop and reacquire
560 mutex_exit(&ds
->ds_lock
);
561 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
562 mutex_enter(&ds
->ds_lock
);
564 mutex_exit(&ds
->ds_lock
);
569 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
572 int err
= dsl_dataset_get_ref(dp
, dsobj
, tag
, dsp
);
576 return (dsl_dataset_hold_ref(*dsp
, tag
));
580 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
, boolean_t inconsistentok
,
581 void *tag
, dsl_dataset_t
**dsp
)
583 int err
= dsl_dataset_hold_obj(dp
, dsobj
, tag
, dsp
);
586 if (!dsl_dataset_tryown(*dsp
, inconsistentok
, tag
)) {
587 dsl_dataset_rele(*dsp
, tag
);
595 dsl_dataset_hold(const char *name
, void *tag
, dsl_dataset_t
**dsp
)
599 const char *snapname
;
603 err
= dsl_dir_open_spa(NULL
, name
, FTAG
, &dd
, &snapname
);
608 obj
= dd
->dd_phys
->dd_head_dataset_obj
;
609 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
611 err
= dsl_dataset_get_ref(dp
, obj
, tag
, dsp
);
617 err
= dsl_dataset_hold_ref(*dsp
, tag
);
619 /* we may be looking for a snapshot */
620 if (err
== 0 && snapname
!= NULL
) {
621 dsl_dataset_t
*ds
= NULL
;
623 if (*snapname
++ != '@') {
624 dsl_dataset_rele(*dsp
, tag
);
629 dprintf("looking for snapshot '%s'\n", snapname
);
630 err
= dsl_dataset_snap_lookup(*dsp
, snapname
, &obj
);
632 err
= dsl_dataset_get_ref(dp
, obj
, tag
, &ds
);
633 dsl_dataset_rele(*dsp
, tag
);
635 ASSERT3U((err
== 0), ==, (ds
!= NULL
));
638 mutex_enter(&ds
->ds_lock
);
639 if (ds
->ds_snapname
[0] == 0)
640 (void) strlcpy(ds
->ds_snapname
, snapname
,
641 sizeof (ds
->ds_snapname
));
642 mutex_exit(&ds
->ds_lock
);
643 err
= dsl_dataset_hold_ref(ds
, tag
);
644 *dsp
= err
? NULL
: ds
;
648 rw_exit(&dp
->dp_config_rwlock
);
649 dsl_dir_close(dd
, FTAG
);
654 dsl_dataset_own(const char *name
, boolean_t inconsistentok
,
655 void *tag
, dsl_dataset_t
**dsp
)
657 int err
= dsl_dataset_hold(name
, tag
, dsp
);
660 if (!dsl_dataset_tryown(*dsp
, inconsistentok
, tag
)) {
661 dsl_dataset_rele(*dsp
, tag
);
668 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
671 (void) strcpy(name
, "mos");
673 dsl_dir_name(ds
->ds_dir
, name
);
674 VERIFY(0 == dsl_dataset_get_snapname(ds
));
675 if (ds
->ds_snapname
[0]) {
676 (void) strcat(name
, "@");
678 * We use a "recursive" mutex so that we
679 * can call dprintf_ds() with ds_lock held.
681 if (!MUTEX_HELD(&ds
->ds_lock
)) {
682 mutex_enter(&ds
->ds_lock
);
683 (void) strcat(name
, ds
->ds_snapname
);
684 mutex_exit(&ds
->ds_lock
);
686 (void) strcat(name
, ds
->ds_snapname
);
693 dsl_dataset_namelen(dsl_dataset_t
*ds
)
698 result
= 3; /* "mos" */
700 result
= dsl_dir_namelen(ds
->ds_dir
);
701 VERIFY(0 == dsl_dataset_get_snapname(ds
));
702 if (ds
->ds_snapname
[0]) {
703 ++result
; /* adding one for the @-sign */
704 if (!MUTEX_HELD(&ds
->ds_lock
)) {
705 mutex_enter(&ds
->ds_lock
);
706 result
+= strlen(ds
->ds_snapname
);
707 mutex_exit(&ds
->ds_lock
);
709 result
+= strlen(ds
->ds_snapname
);
718 dsl_dataset_drop_ref(dsl_dataset_t
*ds
, void *tag
)
720 dmu_buf_rele(ds
->ds_dbuf
, tag
);
724 dsl_dataset_rele(dsl_dataset_t
*ds
, void *tag
)
726 if (!dsl_pool_sync_context(ds
->ds_dir
->dd_pool
)) {
727 rw_exit(&ds
->ds_rwlock
);
729 dsl_dataset_drop_ref(ds
, tag
);
733 dsl_dataset_disown(dsl_dataset_t
*ds
, void *tag
)
735 ASSERT((ds
->ds_owner
== tag
&& ds
->ds_dbuf
) ||
736 (DSL_DATASET_IS_DESTROYED(ds
) && ds
->ds_dbuf
== NULL
));
738 mutex_enter(&ds
->ds_lock
);
740 if (RW_WRITE_HELD(&ds
->ds_rwlock
)) {
741 rw_exit(&ds
->ds_rwlock
);
742 cv_broadcast(&ds
->ds_exclusive_cv
);
744 mutex_exit(&ds
->ds_lock
);
746 dsl_dataset_drop_ref(ds
, tag
);
748 dsl_dataset_evict(NULL
, ds
);
752 dsl_dataset_tryown(dsl_dataset_t
*ds
, boolean_t inconsistentok
, void *tag
)
754 boolean_t gotit
= FALSE
;
756 mutex_enter(&ds
->ds_lock
);
757 if (ds
->ds_owner
== NULL
&&
758 (!DS_IS_INCONSISTENT(ds
) || inconsistentok
)) {
760 if (!dsl_pool_sync_context(ds
->ds_dir
->dd_pool
))
761 rw_exit(&ds
->ds_rwlock
);
764 mutex_exit(&ds
->ds_lock
);
769 dsl_dataset_make_exclusive(dsl_dataset_t
*ds
, void *owner
)
771 ASSERT3P(owner
, ==, ds
->ds_owner
);
772 if (!RW_WRITE_HELD(&ds
->ds_rwlock
))
773 rw_enter(&ds
->ds_rwlock
, RW_WRITER
);
777 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
778 uint64_t flags
, dmu_tx_t
*tx
)
780 dsl_pool_t
*dp
= dd
->dd_pool
;
782 dsl_dataset_phys_t
*dsphys
;
784 objset_t
*mos
= dp
->dp_meta_objset
;
787 origin
= dp
->dp_origin_snap
;
789 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
790 ASSERT(origin
== NULL
|| origin
->ds_phys
->ds_num_children
> 0);
791 ASSERT(dmu_tx_is_syncing(tx
));
792 ASSERT(dd
->dd_phys
->dd_head_dataset_obj
== 0);
794 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
795 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
796 VERIFY(0 == dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
797 dmu_buf_will_dirty(dbuf
, tx
);
798 dsphys
= dbuf
->db_data
;
799 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
800 dsphys
->ds_dir_obj
= dd
->dd_object
;
801 dsphys
->ds_flags
= flags
;
802 dsphys
->ds_fsid_guid
= unique_create();
803 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
804 sizeof (dsphys
->ds_guid
));
805 dsphys
->ds_snapnames_zapobj
=
806 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
808 dsphys
->ds_creation_time
= gethrestime_sec();
809 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
811 if (origin
== NULL
) {
812 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
816 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
817 dsphys
->ds_prev_snap_txg
=
818 origin
->ds_phys
->ds_creation_txg
;
819 dsphys
->ds_used_bytes
=
820 origin
->ds_phys
->ds_used_bytes
;
821 dsphys
->ds_compressed_bytes
=
822 origin
->ds_phys
->ds_compressed_bytes
;
823 dsphys
->ds_uncompressed_bytes
=
824 origin
->ds_phys
->ds_uncompressed_bytes
;
825 dsphys
->ds_bp
= origin
->ds_phys
->ds_bp
;
826 dsphys
->ds_flags
|= origin
->ds_phys
->ds_flags
;
828 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
829 origin
->ds_phys
->ds_num_children
++;
831 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
832 origin
->ds_dir
->dd_phys
->dd_head_dataset_obj
, FTAG
, &ohds
));
833 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
834 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
835 dsl_dataset_rele(ohds
, FTAG
);
837 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
838 if (origin
->ds_phys
->ds_next_clones_obj
== 0) {
839 origin
->ds_phys
->ds_next_clones_obj
=
841 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
843 VERIFY(0 == zap_add_int(mos
,
844 origin
->ds_phys
->ds_next_clones_obj
,
848 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
849 dd
->dd_phys
->dd_origin_obj
= origin
->ds_object
;
850 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
851 if (origin
->ds_dir
->dd_phys
->dd_clones
== 0) {
852 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
853 origin
->ds_dir
->dd_phys
->dd_clones
=
855 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
857 VERIFY3U(0, ==, zap_add_int(mos
,
858 origin
->ds_dir
->dd_phys
->dd_clones
, dsobj
, tx
));
862 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
863 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
865 dmu_buf_rele(dbuf
, FTAG
);
867 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
868 dd
->dd_phys
->dd_head_dataset_obj
= dsobj
;
874 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
875 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
, dmu_tx_t
*tx
)
877 dsl_pool_t
*dp
= pdd
->dd_pool
;
878 uint64_t dsobj
, ddobj
;
881 ASSERT(lastname
[0] != '@');
883 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
884 VERIFY(0 == dsl_dir_open_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
886 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
, flags
, tx
);
888 dsl_deleg_set_create_perms(dd
, tx
, cr
);
890 dsl_dir_close(dd
, FTAG
);
893 * If we are creating a clone, make sure we zero out any stale
894 * data from the origin snapshots zil header.
896 if (origin
!= NULL
) {
900 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
901 VERIFY3U(0, ==, dmu_objset_from_ds(ds
, &os
));
902 bzero(&os
->os_zil_header
, sizeof (os
->os_zil_header
));
903 dsl_dataset_dirty(ds
, tx
);
904 dsl_dataset_rele(ds
, FTAG
);
911 * The snapshots must all be in the same pool.
914 dmu_snapshots_destroy_nvl(nvlist_t
*snaps
, boolean_t defer
, char *failed
)
917 dsl_sync_task_t
*dst
;
920 dsl_sync_task_group_t
*dstg
;
922 pair
= nvlist_next_nvpair(snaps
, NULL
);
926 err
= spa_open(nvpair_name(pair
), &spa
, FTAG
);
929 dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
931 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
932 pair
= nvlist_next_nvpair(snaps
, pair
)) {
936 err
= dsl_dataset_own(nvpair_name(pair
), B_TRUE
, dstg
, &ds
);
938 struct dsl_ds_destroyarg
*dsda
;
940 dsl_dataset_make_exclusive(ds
, dstg
);
941 dsda
= kmem_zalloc(sizeof (struct dsl_ds_destroyarg
),
945 dsl_sync_task_create(dstg
, dsl_dataset_destroy_check
,
946 dsl_dataset_destroy_sync
, dsda
, dstg
, 0);
947 } else if (err
== ENOENT
) {
950 (void) strcpy(failed
, nvpair_name(pair
));
956 err
= dsl_sync_task_group_wait(dstg
);
958 for (dst
= list_head(&dstg
->dstg_tasks
); dst
;
959 dst
= list_next(&dstg
->dstg_tasks
, dst
)) {
960 struct dsl_ds_destroyarg
*dsda
= dst
->dst_arg1
;
961 dsl_dataset_t
*ds
= dsda
->ds
;
964 * Return the file system name that triggered the error
967 dsl_dataset_name(ds
, failed
);
969 ASSERT3P(dsda
->rm_origin
, ==, NULL
);
970 dsl_dataset_disown(ds
, dstg
);
971 kmem_free(dsda
, sizeof (struct dsl_ds_destroyarg
));
974 dsl_sync_task_group_destroy(dstg
);
975 spa_close(spa
, FTAG
);
981 dsl_dataset_might_destroy_origin(dsl_dataset_t
*ds
)
983 boolean_t might_destroy
= B_FALSE
;
985 mutex_enter(&ds
->ds_lock
);
986 if (ds
->ds_phys
->ds_num_children
== 2 && ds
->ds_userrefs
== 0 &&
987 DS_IS_DEFER_DESTROY(ds
))
988 might_destroy
= B_TRUE
;
989 mutex_exit(&ds
->ds_lock
);
991 return (might_destroy
);
995 * If we're removing a clone, and these three conditions are true:
996 * 1) the clone's origin has no other children
997 * 2) the clone's origin has no user references
998 * 3) the clone's origin has been marked for deferred destruction
999 * Then, prepare to remove the origin as part of this sync task group.
1002 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg
*dsda
, void *tag
)
1004 dsl_dataset_t
*ds
= dsda
->ds
;
1005 dsl_dataset_t
*origin
= ds
->ds_prev
;
1007 if (dsl_dataset_might_destroy_origin(origin
)) {
1012 namelen
= dsl_dataset_namelen(origin
) + 1;
1013 name
= kmem_alloc(namelen
, KM_SLEEP
);
1014 dsl_dataset_name(origin
, name
);
1016 error
= zfs_unmount_snap(name
, NULL
);
1018 kmem_free(name
, namelen
);
1022 error
= dsl_dataset_own(name
, B_TRUE
, tag
, &origin
);
1023 kmem_free(name
, namelen
);
1026 dsda
->rm_origin
= origin
;
1027 dsl_dataset_make_exclusive(origin
, tag
);
1034 * ds must be opened as OWNER. On return (whether successful or not),
1035 * ds will be closed and caller can no longer dereference it.
1038 dsl_dataset_destroy(dsl_dataset_t
*ds
, void *tag
, boolean_t defer
)
1041 dsl_sync_task_group_t
*dstg
;
1045 struct dsl_ds_destroyarg dsda
= { 0 };
1046 dsl_dataset_t
*dummy_ds
;
1050 if (dsl_dataset_is_snapshot(ds
)) {
1051 /* Destroying a snapshot is simpler */
1052 dsl_dataset_make_exclusive(ds
, tag
);
1055 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1056 dsl_dataset_destroy_check
, dsl_dataset_destroy_sync
,
1058 ASSERT3P(dsda
.rm_origin
, ==, NULL
);
1066 dummy_ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
1067 dummy_ds
->ds_dir
= dd
;
1068 dummy_ds
->ds_object
= ds
->ds_object
;
1071 * Check for errors and mark this ds as inconsistent, in
1072 * case we crash while freeing the objects.
1074 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dataset_destroy_begin_check
,
1075 dsl_dataset_destroy_begin_sync
, ds
, NULL
, 0);
1079 err
= dmu_objset_from_ds(ds
, &os
);
1084 * remove the objects in open context, so that we won't
1085 * have too much to do in syncing context.
1087 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
,
1088 ds
->ds_phys
->ds_prev_snap_txg
)) {
1090 * Ignore errors, if there is not enough disk space
1091 * we will deal with it in dsl_dataset_destroy_sync().
1093 (void) dmu_free_object(os
, obj
);
1099 * Only the ZIL knows how to free log blocks.
1101 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
1104 * Sync out all in-flight IO.
1106 txg_wait_synced(dd
->dd_pool
, 0);
1109 * If we managed to free all the objects in open
1110 * context, the user space accounting should be zero.
1112 if (ds
->ds_phys
->ds_bp
.blk_fill
== 0 &&
1113 dmu_objset_userused_enabled(os
)) {
1114 ASSERTV(uint64_t count
);
1115 ASSERT(zap_count(os
, DMU_USERUSED_OBJECT
, &count
) != 0 ||
1117 ASSERT(zap_count(os
, DMU_GROUPUSED_OBJECT
, &count
) != 0 ||
1121 rw_enter(&dd
->dd_pool
->dp_config_rwlock
, RW_READER
);
1122 err
= dsl_dir_open_obj(dd
->dd_pool
, dd
->dd_object
, NULL
, FTAG
, &dd
);
1123 rw_exit(&dd
->dd_pool
->dp_config_rwlock
);
1129 * Blow away the dsl_dir + head dataset.
1131 dsl_dataset_make_exclusive(ds
, tag
);
1133 * If we're removing a clone, we might also need to remove its
1137 dsda
.need_prep
= B_FALSE
;
1138 if (dsl_dir_is_clone(dd
)) {
1139 err
= dsl_dataset_origin_rm_prep(&dsda
, tag
);
1141 dsl_dir_close(dd
, FTAG
);
1146 dstg
= dsl_sync_task_group_create(ds
->ds_dir
->dd_pool
);
1147 dsl_sync_task_create(dstg
, dsl_dataset_destroy_check
,
1148 dsl_dataset_destroy_sync
, &dsda
, tag
, 0);
1149 dsl_sync_task_create(dstg
, dsl_dir_destroy_check
,
1150 dsl_dir_destroy_sync
, dummy_ds
, FTAG
, 0);
1151 err
= dsl_sync_task_group_wait(dstg
);
1152 dsl_sync_task_group_destroy(dstg
);
1155 * We could be racing against 'zfs release' or 'zfs destroy -d'
1156 * on the origin snap, in which case we can get EBUSY if we
1157 * needed to destroy the origin snap but were not ready to
1160 if (dsda
.need_prep
) {
1161 ASSERT(err
== EBUSY
);
1162 ASSERT(dsl_dir_is_clone(dd
));
1163 ASSERT(dsda
.rm_origin
== NULL
);
1165 } while (dsda
.need_prep
);
1167 if (dsda
.rm_origin
!= NULL
)
1168 dsl_dataset_disown(dsda
.rm_origin
, tag
);
1170 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1172 dsl_dir_close(dd
, FTAG
);
1175 kmem_free(dummy_ds
, sizeof (dsl_dataset_t
));
1177 dsl_dataset_disown(ds
, tag
);
1182 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
1184 return (&ds
->ds_phys
->ds_bp
);
1188 dsl_dataset_set_blkptr(dsl_dataset_t
*ds
, blkptr_t
*bp
, dmu_tx_t
*tx
)
1190 ASSERT(dmu_tx_is_syncing(tx
));
1191 /* If it's the meta-objset, set dp_meta_rootbp */
1193 tx
->tx_pool
->dp_meta_rootbp
= *bp
;
1195 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1196 ds
->ds_phys
->ds_bp
= *bp
;
1201 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
1203 return (ds
->ds_dir
->dd_pool
->dp_spa
);
1207 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1211 if (ds
== NULL
) /* this is the meta-objset */
1214 ASSERT(ds
->ds_objset
!= NULL
);
1216 if (ds
->ds_phys
->ds_next_snap_obj
!= 0)
1217 panic("dirtying snapshot!");
1219 dp
= ds
->ds_dir
->dd_pool
;
1221 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
) == 0) {
1222 /* up the hold count until we can be written out */
1223 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1228 * The unique space in the head dataset can be calculated by subtracting
1229 * the space used in the most recent snapshot, that is still being used
1230 * in this file system, from the space currently in use. To figure out
1231 * the space in the most recent snapshot still in use, we need to take
1232 * the total space used in the snapshot and subtract out the space that
1233 * has been freed up since the snapshot was taken.
1236 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
1239 uint64_t dlused
, dlcomp
, dluncomp
;
1241 ASSERT(!dsl_dataset_is_snapshot(ds
));
1243 if (ds
->ds_phys
->ds_prev_snap_obj
!= 0)
1244 mrs_used
= ds
->ds_prev
->ds_phys
->ds_used_bytes
;
1248 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
1250 ASSERT3U(dlused
, <=, mrs_used
);
1251 ds
->ds_phys
->ds_unique_bytes
=
1252 ds
->ds_phys
->ds_used_bytes
- (mrs_used
- dlused
);
1254 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
1255 SPA_VERSION_UNIQUE_ACCURATE
)
1256 ds
->ds_phys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1266 kill_blkptr(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
1267 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
1269 struct killarg
*ka
= arg
;
1270 dmu_tx_t
*tx
= ka
->tx
;
1275 if (zb
->zb_level
== ZB_ZIL_LEVEL
) {
1276 ASSERT(zilog
!= NULL
);
1278 * It's a block in the intent log. It has no
1279 * accounting, so just free it.
1281 dsl_free(ka
->tx
->tx_pool
, ka
->tx
->tx_txg
, bp
);
1283 ASSERT(zilog
== NULL
);
1284 ASSERT3U(bp
->blk_birth
, >, ka
->ds
->ds_phys
->ds_prev_snap_txg
);
1285 (void) dsl_dataset_block_kill(ka
->ds
, bp
, tx
, B_FALSE
);
1293 dsl_dataset_destroy_begin_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1295 dsl_dataset_t
*ds
= arg1
;
1296 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1301 * Can't delete a head dataset if there are snapshots of it.
1302 * (Except if the only snapshots are from the branch we cloned
1305 if (ds
->ds_prev
!= NULL
&&
1306 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
)
1310 * This is really a dsl_dir thing, but check it here so that
1311 * we'll be less likely to leave this dataset inconsistent &
1314 err
= zap_count(mos
, ds
->ds_dir
->dd_phys
->dd_child_dir_zapobj
, &count
);
1325 dsl_dataset_destroy_begin_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1327 dsl_dataset_t
*ds
= arg1
;
1328 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1330 /* Mark it as inconsistent on-disk, in case we crash */
1331 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1332 ds
->ds_phys
->ds_flags
|= DS_FLAG_INCONSISTENT
;
1334 spa_history_log_internal(LOG_DS_DESTROY_BEGIN
, dp
->dp_spa
, tx
,
1335 "dataset = %llu", ds
->ds_object
);
1339 dsl_dataset_origin_check(struct dsl_ds_destroyarg
*dsda
, void *tag
,
1342 dsl_dataset_t
*ds
= dsda
->ds
;
1343 dsl_dataset_t
*ds_prev
= ds
->ds_prev
;
1345 if (dsl_dataset_might_destroy_origin(ds_prev
)) {
1346 struct dsl_ds_destroyarg ndsda
= {0};
1349 * If we're not prepared to remove the origin, don't remove
1352 if (dsda
->rm_origin
== NULL
) {
1353 dsda
->need_prep
= B_TRUE
;
1358 ndsda
.is_origin_rm
= B_TRUE
;
1359 return (dsl_dataset_destroy_check(&ndsda
, tag
, tx
));
1363 * If we're not going to remove the origin after all,
1364 * undo the open context setup.
1366 if (dsda
->rm_origin
!= NULL
) {
1367 dsl_dataset_disown(dsda
->rm_origin
, tag
);
1368 dsda
->rm_origin
= NULL
;
1375 * If you add new checks here, you may need to add
1376 * additional checks to the "temporary" case in
1377 * snapshot_check() in dmu_objset.c.
1381 dsl_dataset_destroy_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1383 struct dsl_ds_destroyarg
*dsda
= arg1
;
1384 dsl_dataset_t
*ds
= dsda
->ds
;
1386 /* we have an owner hold, so noone else can destroy us */
1387 ASSERT(!DSL_DATASET_IS_DESTROYED(ds
));
1390 * Only allow deferred destroy on pools that support it.
1391 * NOTE: deferred destroy is only supported on snapshots.
1394 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
1395 SPA_VERSION_USERREFS
)
1397 ASSERT(dsl_dataset_is_snapshot(ds
));
1402 * Can't delete a head dataset if there are snapshots of it.
1403 * (Except if the only snapshots are from the branch we cloned
1406 if (ds
->ds_prev
!= NULL
&&
1407 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
)
1411 * If we made changes this txg, traverse_dsl_dataset won't find
1414 if (ds
->ds_phys
->ds_bp
.blk_birth
>= tx
->tx_txg
)
1417 if (dsl_dataset_is_snapshot(ds
)) {
1419 * If this snapshot has an elevated user reference count,
1420 * we can't destroy it yet.
1422 if (ds
->ds_userrefs
> 0 && !dsda
->releasing
)
1425 mutex_enter(&ds
->ds_lock
);
1427 * Can't delete a branch point. However, if we're destroying
1428 * a clone and removing its origin due to it having a user
1429 * hold count of 0 and having been marked for deferred destroy,
1430 * it's OK for the origin to have a single clone.
1432 if (ds
->ds_phys
->ds_num_children
>
1433 (dsda
->is_origin_rm
? 2 : 1)) {
1434 mutex_exit(&ds
->ds_lock
);
1437 mutex_exit(&ds
->ds_lock
);
1438 } else if (dsl_dir_is_clone(ds
->ds_dir
)) {
1439 return (dsl_dataset_origin_check(dsda
, arg2
, tx
));
1442 /* XXX we should do some i/o error checking... */
1454 dsl_dataset_refs_gone(dmu_buf_t
*db
, void *argv
)
1456 struct refsarg
*arg
= argv
;
1458 mutex_enter(&arg
->lock
);
1460 cv_signal(&arg
->cv
);
1461 mutex_exit(&arg
->lock
);
1465 dsl_dataset_drain_refs(dsl_dataset_t
*ds
, void *tag
)
1469 mutex_init(&arg
.lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1470 cv_init(&arg
.cv
, NULL
, CV_DEFAULT
, NULL
);
1472 (void) dmu_buf_update_user(ds
->ds_dbuf
, ds
, &arg
, &ds
->ds_phys
,
1473 dsl_dataset_refs_gone
);
1474 dmu_buf_rele(ds
->ds_dbuf
, tag
);
1475 mutex_enter(&arg
.lock
);
1477 cv_wait(&arg
.cv
, &arg
.lock
);
1479 mutex_exit(&arg
.lock
);
1482 mutex_destroy(&arg
.lock
);
1483 cv_destroy(&arg
.cv
);
1487 remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
, dmu_tx_t
*tx
)
1489 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1491 ASSERTV(uint64_t count
);
1493 ASSERT(ds
->ds_phys
->ds_num_children
>= 2);
1494 err
= zap_remove_int(mos
, ds
->ds_phys
->ds_next_clones_obj
, obj
, tx
);
1496 * The err should not be ENOENT, but a bug in a previous version
1497 * of the code could cause upgrade_clones_cb() to not set
1498 * ds_next_snap_obj when it should, leading to a missing entry.
1499 * If we knew that the pool was created after
1500 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1501 * ENOENT. However, at least we can check that we don't have
1502 * too many entries in the next_clones_obj even after failing to
1505 if (err
!= ENOENT
) {
1506 VERIFY3U(err
, ==, 0);
1508 ASSERT3U(0, ==, zap_count(mos
, ds
->ds_phys
->ds_next_clones_obj
,
1510 ASSERT3U(count
, <=, ds
->ds_phys
->ds_num_children
- 2);
1514 dsl_dataset_remove_clones_key(dsl_dataset_t
*ds
, uint64_t mintxg
, dmu_tx_t
*tx
)
1516 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1521 * If it is the old version, dd_clones doesn't exist so we can't
1522 * find the clones, but deadlist_remove_key() is a no-op so it
1525 if (ds
->ds_dir
->dd_phys
->dd_clones
== 0)
1528 for (zap_cursor_init(&zc
, mos
, ds
->ds_dir
->dd_phys
->dd_clones
);
1529 zap_cursor_retrieve(&zc
, &za
) == 0;
1530 zap_cursor_advance(&zc
)) {
1531 dsl_dataset_t
*clone
;
1533 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
1534 za
.za_first_integer
, FTAG
, &clone
));
1535 if (clone
->ds_dir
->dd_origin_txg
> mintxg
) {
1536 dsl_deadlist_remove_key(&clone
->ds_deadlist
,
1538 dsl_dataset_remove_clones_key(clone
, mintxg
, tx
);
1540 dsl_dataset_rele(clone
, FTAG
);
1542 zap_cursor_fini(&zc
);
1545 struct process_old_arg
{
1547 dsl_dataset_t
*ds_prev
;
1548 boolean_t after_branch_point
;
1550 uint64_t used
, comp
, uncomp
;
1554 process_old_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1556 struct process_old_arg
*poa
= arg
;
1557 dsl_pool_t
*dp
= poa
->ds
->ds_dir
->dd_pool
;
1559 if (bp
->blk_birth
<= poa
->ds
->ds_phys
->ds_prev_snap_txg
) {
1560 dsl_deadlist_insert(&poa
->ds
->ds_deadlist
, bp
, tx
);
1561 if (poa
->ds_prev
&& !poa
->after_branch_point
&&
1563 poa
->ds_prev
->ds_phys
->ds_prev_snap_txg
) {
1564 poa
->ds_prev
->ds_phys
->ds_unique_bytes
+=
1565 bp_get_dsize_sync(dp
->dp_spa
, bp
);
1568 poa
->used
+= bp_get_dsize_sync(dp
->dp_spa
, bp
);
1569 poa
->comp
+= BP_GET_PSIZE(bp
);
1570 poa
->uncomp
+= BP_GET_UCSIZE(bp
);
1571 dsl_free_sync(poa
->pio
, dp
, tx
->tx_txg
, bp
);
1577 process_old_deadlist(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_prev
,
1578 dsl_dataset_t
*ds_next
, boolean_t after_branch_point
, dmu_tx_t
*tx
)
1580 struct process_old_arg poa
= { 0 };
1581 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1582 objset_t
*mos
= dp
->dp_meta_objset
;
1584 ASSERT(ds
->ds_deadlist
.dl_oldfmt
);
1585 ASSERT(ds_next
->ds_deadlist
.dl_oldfmt
);
1588 poa
.ds_prev
= ds_prev
;
1589 poa
.after_branch_point
= after_branch_point
;
1590 poa
.pio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1591 VERIFY3U(0, ==, bpobj_iterate(&ds_next
->ds_deadlist
.dl_bpobj
,
1592 process_old_cb
, &poa
, tx
));
1593 VERIFY3U(zio_wait(poa
.pio
), ==, 0);
1594 ASSERT3U(poa
.used
, ==, ds
->ds_phys
->ds_unique_bytes
);
1596 /* change snapused */
1597 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
1598 -poa
.used
, -poa
.comp
, -poa
.uncomp
, tx
);
1600 /* swap next's deadlist to our deadlist */
1601 dsl_deadlist_close(&ds
->ds_deadlist
);
1602 dsl_deadlist_close(&ds_next
->ds_deadlist
);
1603 SWITCH64(ds_next
->ds_phys
->ds_deadlist_obj
,
1604 ds
->ds_phys
->ds_deadlist_obj
);
1605 dsl_deadlist_open(&ds
->ds_deadlist
, mos
, ds
->ds_phys
->ds_deadlist_obj
);
1606 dsl_deadlist_open(&ds_next
->ds_deadlist
, mos
,
1607 ds_next
->ds_phys
->ds_deadlist_obj
);
1611 dsl_dataset_destroy_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
1613 struct dsl_ds_destroyarg
*dsda
= arg1
;
1614 dsl_dataset_t
*ds
= dsda
->ds
;
1616 int after_branch_point
= FALSE
;
1617 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1618 objset_t
*mos
= dp
->dp_meta_objset
;
1619 dsl_dataset_t
*ds_prev
= NULL
;
1620 boolean_t wont_destroy
;
1623 wont_destroy
= (dsda
->defer
&&
1624 (ds
->ds_userrefs
> 0 || ds
->ds_phys
->ds_num_children
> 1));
1626 ASSERT(ds
->ds_owner
|| wont_destroy
);
1627 ASSERT(dsda
->defer
|| ds
->ds_phys
->ds_num_children
<= 1);
1628 ASSERT(ds
->ds_prev
== NULL
||
1629 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
);
1630 ASSERT3U(ds
->ds_phys
->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
1633 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
1634 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1635 ds
->ds_phys
->ds_flags
|= DS_FLAG_DEFER_DESTROY
;
1639 /* signal any waiters that this dataset is going away */
1640 mutex_enter(&ds
->ds_lock
);
1641 ds
->ds_owner
= dsl_reaper
;
1642 cv_broadcast(&ds
->ds_exclusive_cv
);
1643 mutex_exit(&ds
->ds_lock
);
1645 /* Remove our reservation */
1646 if (ds
->ds_reserved
!= 0) {
1647 dsl_prop_setarg_t psa
;
1650 dsl_prop_setarg_init_uint64(&psa
, "refreservation",
1651 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
1653 psa
.psa_effective_value
= 0; /* predict default value */
1655 dsl_dataset_set_reservation_sync(ds
, &psa
, tx
);
1656 ASSERT3U(ds
->ds_reserved
, ==, 0);
1659 ASSERT(RW_WRITE_HELD(&dp
->dp_config_rwlock
));
1661 dsl_scan_ds_destroyed(ds
, tx
);
1663 obj
= ds
->ds_object
;
1665 if (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
1667 ds_prev
= ds
->ds_prev
;
1669 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1670 ds
->ds_phys
->ds_prev_snap_obj
, FTAG
, &ds_prev
));
1672 after_branch_point
=
1673 (ds_prev
->ds_phys
->ds_next_snap_obj
!= obj
);
1675 dmu_buf_will_dirty(ds_prev
->ds_dbuf
, tx
);
1676 if (after_branch_point
&&
1677 ds_prev
->ds_phys
->ds_next_clones_obj
!= 0) {
1678 remove_from_next_clones(ds_prev
, obj
, tx
);
1679 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
1680 VERIFY(0 == zap_add_int(mos
,
1681 ds_prev
->ds_phys
->ds_next_clones_obj
,
1682 ds
->ds_phys
->ds_next_snap_obj
, tx
));
1685 if (after_branch_point
&&
1686 ds
->ds_phys
->ds_next_snap_obj
== 0) {
1687 /* This clone is toast. */
1688 ASSERT(ds_prev
->ds_phys
->ds_num_children
> 1);
1689 ds_prev
->ds_phys
->ds_num_children
--;
1692 * If the clone's origin has no other clones, no
1693 * user holds, and has been marked for deferred
1694 * deletion, then we should have done the necessary
1695 * destroy setup for it.
1697 if (ds_prev
->ds_phys
->ds_num_children
== 1 &&
1698 ds_prev
->ds_userrefs
== 0 &&
1699 DS_IS_DEFER_DESTROY(ds_prev
)) {
1700 ASSERT3P(dsda
->rm_origin
, !=, NULL
);
1702 ASSERT3P(dsda
->rm_origin
, ==, NULL
);
1704 } else if (!after_branch_point
) {
1705 ds_prev
->ds_phys
->ds_next_snap_obj
=
1706 ds
->ds_phys
->ds_next_snap_obj
;
1710 if (dsl_dataset_is_snapshot(ds
)) {
1711 dsl_dataset_t
*ds_next
;
1712 uint64_t old_unique
;
1713 uint64_t used
= 0, comp
= 0, uncomp
= 0;
1715 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1716 ds
->ds_phys
->ds_next_snap_obj
, FTAG
, &ds_next
));
1717 ASSERT3U(ds_next
->ds_phys
->ds_prev_snap_obj
, ==, obj
);
1719 old_unique
= ds_next
->ds_phys
->ds_unique_bytes
;
1721 dmu_buf_will_dirty(ds_next
->ds_dbuf
, tx
);
1722 ds_next
->ds_phys
->ds_prev_snap_obj
=
1723 ds
->ds_phys
->ds_prev_snap_obj
;
1724 ds_next
->ds_phys
->ds_prev_snap_txg
=
1725 ds
->ds_phys
->ds_prev_snap_txg
;
1726 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, ==,
1727 ds_prev
? ds_prev
->ds_phys
->ds_creation_txg
: 0);
1730 if (ds_next
->ds_deadlist
.dl_oldfmt
) {
1731 process_old_deadlist(ds
, ds_prev
, ds_next
,
1732 after_branch_point
, tx
);
1734 /* Adjust prev's unique space. */
1735 if (ds_prev
&& !after_branch_point
) {
1736 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
1737 ds_prev
->ds_phys
->ds_prev_snap_txg
,
1738 ds
->ds_phys
->ds_prev_snap_txg
,
1739 &used
, &comp
, &uncomp
);
1740 ds_prev
->ds_phys
->ds_unique_bytes
+= used
;
1743 /* Adjust snapused. */
1744 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
1745 ds
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
1746 &used
, &comp
, &uncomp
);
1747 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
1748 -used
, -comp
, -uncomp
, tx
);
1750 /* Move blocks to be freed to pool's free list. */
1751 dsl_deadlist_move_bpobj(&ds_next
->ds_deadlist
,
1752 &dp
->dp_free_bpobj
, ds
->ds_phys
->ds_prev_snap_txg
,
1754 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
1755 DD_USED_HEAD
, used
, comp
, uncomp
, tx
);
1756 dsl_dir_dirty(tx
->tx_pool
->dp_free_dir
, tx
);
1758 /* Merge our deadlist into next's and free it. */
1759 dsl_deadlist_merge(&ds_next
->ds_deadlist
,
1760 ds
->ds_phys
->ds_deadlist_obj
, tx
);
1762 dsl_deadlist_close(&ds
->ds_deadlist
);
1763 dsl_deadlist_free(mos
, ds
->ds_phys
->ds_deadlist_obj
, tx
);
1765 /* Collapse range in clone heads */
1766 dsl_dataset_remove_clones_key(ds
,
1767 ds
->ds_phys
->ds_creation_txg
, tx
);
1769 if (dsl_dataset_is_snapshot(ds_next
)) {
1770 dsl_dataset_t
*ds_nextnext
;
1774 * Update next's unique to include blocks which
1775 * were previously shared by only this snapshot
1776 * and it. Those blocks will be born after the
1777 * prev snap and before this snap, and will have
1778 * died after the next snap and before the one
1779 * after that (ie. be on the snap after next's
1782 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1783 ds_next
->ds_phys
->ds_next_snap_obj
,
1784 FTAG
, &ds_nextnext
));
1785 dsl_deadlist_space_range(&ds_nextnext
->ds_deadlist
,
1786 ds
->ds_phys
->ds_prev_snap_txg
,
1787 ds
->ds_phys
->ds_creation_txg
,
1788 &used
, &comp
, &uncomp
);
1789 ds_next
->ds_phys
->ds_unique_bytes
+= used
;
1790 dsl_dataset_rele(ds_nextnext
, FTAG
);
1791 ASSERT3P(ds_next
->ds_prev
, ==, NULL
);
1793 /* Collapse range in this head. */
1794 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
1795 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
,
1797 dsl_deadlist_remove_key(&hds
->ds_deadlist
,
1798 ds
->ds_phys
->ds_creation_txg
, tx
);
1799 dsl_dataset_rele(hds
, FTAG
);
1802 ASSERT3P(ds_next
->ds_prev
, ==, ds
);
1803 dsl_dataset_drop_ref(ds_next
->ds_prev
, ds_next
);
1804 ds_next
->ds_prev
= NULL
;
1806 VERIFY(0 == dsl_dataset_get_ref(dp
,
1807 ds
->ds_phys
->ds_prev_snap_obj
,
1808 ds_next
, &ds_next
->ds_prev
));
1811 dsl_dataset_recalc_head_uniq(ds_next
);
1814 * Reduce the amount of our unconsmed refreservation
1815 * being charged to our parent by the amount of
1816 * new unique data we have gained.
1818 if (old_unique
< ds_next
->ds_reserved
) {
1820 uint64_t new_unique
=
1821 ds_next
->ds_phys
->ds_unique_bytes
;
1823 ASSERT(old_unique
<= new_unique
);
1824 mrsdelta
= MIN(new_unique
- old_unique
,
1825 ds_next
->ds_reserved
- old_unique
);
1826 dsl_dir_diduse_space(ds
->ds_dir
,
1827 DD_USED_REFRSRV
, -mrsdelta
, 0, 0, tx
);
1830 dsl_dataset_rele(ds_next
, FTAG
);
1833 * There's no next snapshot, so this is a head dataset.
1834 * Destroy the deadlist. Unless it's a clone, the
1835 * deadlist should be empty. (If it's a clone, it's
1836 * safe to ignore the deadlist contents.)
1840 dsl_deadlist_close(&ds
->ds_deadlist
);
1841 dsl_deadlist_free(mos
, ds
->ds_phys
->ds_deadlist_obj
, tx
);
1842 ds
->ds_phys
->ds_deadlist_obj
= 0;
1845 * Free everything that we point to (that's born after
1846 * the previous snapshot, if we are a clone)
1848 * NB: this should be very quick, because we already
1849 * freed all the objects in open context.
1853 err
= traverse_dataset(ds
, ds
->ds_phys
->ds_prev_snap_txg
,
1854 TRAVERSE_POST
, kill_blkptr
, &ka
);
1855 ASSERT3U(err
, ==, 0);
1856 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
1857 ds
->ds_phys
->ds_unique_bytes
== 0);
1859 if (ds
->ds_prev
!= NULL
) {
1860 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
1861 VERIFY3U(0, ==, zap_remove_int(mos
,
1862 ds
->ds_prev
->ds_dir
->dd_phys
->dd_clones
,
1863 ds
->ds_object
, tx
));
1865 dsl_dataset_rele(ds
->ds_prev
, ds
);
1866 ds
->ds_prev
= ds_prev
= NULL
;
1871 * This must be done after the dsl_traverse(), because it will
1872 * re-open the objset.
1874 if (ds
->ds_objset
) {
1875 dmu_objset_evict(ds
->ds_objset
);
1876 ds
->ds_objset
= NULL
;
1879 if (ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
== ds
->ds_object
) {
1880 /* Erase the link in the dir */
1881 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
1882 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
= 0;
1883 ASSERT(ds
->ds_phys
->ds_snapnames_zapobj
!= 0);
1884 err
= zap_destroy(mos
, ds
->ds_phys
->ds_snapnames_zapobj
, tx
);
1887 /* remove from snapshot namespace */
1888 dsl_dataset_t
*ds_head
;
1889 ASSERT(ds
->ds_phys
->ds_snapnames_zapobj
== 0);
1890 VERIFY(0 == dsl_dataset_hold_obj(dp
,
1891 ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
, FTAG
, &ds_head
));
1892 VERIFY(0 == dsl_dataset_get_snapname(ds
));
1897 err
= dsl_dataset_snap_lookup(ds_head
,
1898 ds
->ds_snapname
, &val
);
1899 ASSERT3U(err
, ==, 0);
1900 ASSERT3U(val
, ==, obj
);
1903 err
= dsl_dataset_snap_remove(ds_head
, ds
->ds_snapname
, tx
);
1905 dsl_dataset_rele(ds_head
, FTAG
);
1908 if (ds_prev
&& ds
->ds_prev
!= ds_prev
)
1909 dsl_dataset_rele(ds_prev
, FTAG
);
1911 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
1912 spa_history_log_internal(LOG_DS_DESTROY
, dp
->dp_spa
, tx
,
1913 "dataset = %llu", ds
->ds_object
);
1915 if (ds
->ds_phys
->ds_next_clones_obj
!= 0) {
1916 ASSERTV(uint64_t count
);
1917 ASSERT(0 == zap_count(mos
,
1918 ds
->ds_phys
->ds_next_clones_obj
, &count
) && count
== 0);
1919 VERIFY(0 == dmu_object_free(mos
,
1920 ds
->ds_phys
->ds_next_clones_obj
, tx
));
1922 if (ds
->ds_phys
->ds_props_obj
!= 0)
1923 VERIFY(0 == zap_destroy(mos
, ds
->ds_phys
->ds_props_obj
, tx
));
1924 if (ds
->ds_phys
->ds_userrefs_obj
!= 0)
1925 VERIFY(0 == zap_destroy(mos
, ds
->ds_phys
->ds_userrefs_obj
, tx
));
1926 dsl_dir_close(ds
->ds_dir
, ds
);
1928 dsl_dataset_drain_refs(ds
, tag
);
1929 VERIFY(0 == dmu_object_free(mos
, obj
, tx
));
1931 if (dsda
->rm_origin
) {
1933 * Remove the origin of the clone we just destroyed.
1935 struct dsl_ds_destroyarg ndsda
= {0};
1937 ndsda
.ds
= dsda
->rm_origin
;
1938 dsl_dataset_destroy_sync(&ndsda
, tag
, tx
);
1943 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1947 if (!dmu_tx_is_syncing(tx
))
1951 * If there's an fs-only reservation, any blocks that might become
1952 * owned by the snapshot dataset must be accommodated by space
1953 * outside of the reservation.
1955 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1956 asize
= MIN(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
1957 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1961 * Propogate any reserved space for this snapshot to other
1962 * snapshot checks in this sync group.
1965 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1971 dsl_dataset_snapshot_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1973 dsl_dataset_t
*ds
= arg1
;
1974 const char *snapname
= arg2
;
1979 * We don't allow multiple snapshots of the same txg. If there
1980 * is already one, try again.
1982 if (ds
->ds_phys
->ds_prev_snap_txg
>= tx
->tx_txg
)
1986 * Check for conflicting name snapshot name.
1988 err
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
1995 * Check that the dataset's name is not too long. Name consists
1996 * of the dataset's length + 1 for the @-sign + snapshot name's length
1998 if (dsl_dataset_namelen(ds
) + 1 + strlen(snapname
) >= MAXNAMELEN
)
1999 return (ENAMETOOLONG
);
2001 err
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
2005 ds
->ds_trysnap_txg
= tx
->tx_txg
;
2010 dsl_dataset_snapshot_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2012 dsl_dataset_t
*ds
= arg1
;
2013 const char *snapname
= arg2
;
2014 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2016 dsl_dataset_phys_t
*dsphys
;
2017 uint64_t dsobj
, crtxg
;
2018 objset_t
*mos
= dp
->dp_meta_objset
;
2021 ASSERT(RW_WRITE_HELD(&dp
->dp_config_rwlock
));
2024 * The origin's ds_creation_txg has to be < TXG_INITIAL
2026 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
2031 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
2032 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
2033 VERIFY(0 == dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
2034 dmu_buf_will_dirty(dbuf
, tx
);
2035 dsphys
= dbuf
->db_data
;
2036 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
2037 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
2038 dsphys
->ds_fsid_guid
= unique_create();
2039 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
2040 sizeof (dsphys
->ds_guid
));
2041 dsphys
->ds_prev_snap_obj
= ds
->ds_phys
->ds_prev_snap_obj
;
2042 dsphys
->ds_prev_snap_txg
= ds
->ds_phys
->ds_prev_snap_txg
;
2043 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
2044 dsphys
->ds_num_children
= 1;
2045 dsphys
->ds_creation_time
= gethrestime_sec();
2046 dsphys
->ds_creation_txg
= crtxg
;
2047 dsphys
->ds_deadlist_obj
= ds
->ds_phys
->ds_deadlist_obj
;
2048 dsphys
->ds_used_bytes
= ds
->ds_phys
->ds_used_bytes
;
2049 dsphys
->ds_compressed_bytes
= ds
->ds_phys
->ds_compressed_bytes
;
2050 dsphys
->ds_uncompressed_bytes
= ds
->ds_phys
->ds_uncompressed_bytes
;
2051 dsphys
->ds_flags
= ds
->ds_phys
->ds_flags
;
2052 dsphys
->ds_bp
= ds
->ds_phys
->ds_bp
;
2053 dmu_buf_rele(dbuf
, FTAG
);
2055 ASSERT3U(ds
->ds_prev
!= 0, ==, ds
->ds_phys
->ds_prev_snap_obj
!= 0);
2057 uint64_t next_clones_obj
=
2058 ds
->ds_prev
->ds_phys
->ds_next_clones_obj
;
2059 ASSERT(ds
->ds_prev
->ds_phys
->ds_next_snap_obj
==
2061 ds
->ds_prev
->ds_phys
->ds_num_children
> 1);
2062 if (ds
->ds_prev
->ds_phys
->ds_next_snap_obj
== ds
->ds_object
) {
2063 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
2064 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, ==,
2065 ds
->ds_prev
->ds_phys
->ds_creation_txg
);
2066 ds
->ds_prev
->ds_phys
->ds_next_snap_obj
= dsobj
;
2067 } else if (next_clones_obj
!= 0) {
2068 remove_from_next_clones(ds
->ds_prev
,
2069 dsphys
->ds_next_snap_obj
, tx
);
2070 VERIFY3U(0, ==, zap_add_int(mos
,
2071 next_clones_obj
, dsobj
, tx
));
2076 * If we have a reference-reservation on this dataset, we will
2077 * need to increase the amount of refreservation being charged
2078 * since our unique space is going to zero.
2080 if (ds
->ds_reserved
) {
2082 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
2083 delta
= MIN(ds
->ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
2084 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
2088 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2089 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2090 ds
->ds_dir
->dd_myname
, snapname
, dsobj
,
2091 ds
->ds_phys
->ds_prev_snap_txg
);
2092 ds
->ds_phys
->ds_deadlist_obj
= dsl_deadlist_clone(&ds
->ds_deadlist
,
2093 UINT64_MAX
, ds
->ds_phys
->ds_prev_snap_obj
, tx
);
2094 dsl_deadlist_close(&ds
->ds_deadlist
);
2095 dsl_deadlist_open(&ds
->ds_deadlist
, mos
, ds
->ds_phys
->ds_deadlist_obj
);
2096 dsl_deadlist_add_key(&ds
->ds_deadlist
,
2097 ds
->ds_phys
->ds_prev_snap_txg
, tx
);
2099 ASSERT3U(ds
->ds_phys
->ds_prev_snap_txg
, <, tx
->tx_txg
);
2100 ds
->ds_phys
->ds_prev_snap_obj
= dsobj
;
2101 ds
->ds_phys
->ds_prev_snap_txg
= crtxg
;
2102 ds
->ds_phys
->ds_unique_bytes
= 0;
2103 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
2104 ds
->ds_phys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
2106 err
= zap_add(mos
, ds
->ds_phys
->ds_snapnames_zapobj
,
2107 snapname
, 8, 1, &dsobj
, tx
);
2111 dsl_dataset_drop_ref(ds
->ds_prev
, ds
);
2112 VERIFY(0 == dsl_dataset_get_ref(dp
,
2113 ds
->ds_phys
->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
2115 dsl_scan_ds_snapshotted(ds
, tx
);
2117 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
2119 spa_history_log_internal(LOG_DS_SNAPSHOT
, dp
->dp_spa
, tx
,
2120 "dataset = %llu", dsobj
);
2124 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*zio
, dmu_tx_t
*tx
)
2126 ASSERT(dmu_tx_is_syncing(tx
));
2127 ASSERT(ds
->ds_objset
!= NULL
);
2128 ASSERT(ds
->ds_phys
->ds_next_snap_obj
== 0);
2131 * in case we had to change ds_fsid_guid when we opened it,
2134 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2135 ds
->ds_phys
->ds_fsid_guid
= ds
->ds_fsid_guid
;
2137 dsl_dir_dirty(ds
->ds_dir
, tx
);
2138 dmu_objset_sync(ds
->ds_objset
, zio
, tx
);
2142 get_clones_stat(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2145 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
2151 rw_enter(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
, RW_READER
);
2152 VERIFY(nvlist_alloc(&propval
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2153 VERIFY(nvlist_alloc(&val
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2156 * There may me missing entries in ds_next_clones_obj
2157 * due to a bug in a previous version of the code.
2158 * Only trust it if it has the right number of entries.
2160 if (ds
->ds_phys
->ds_next_clones_obj
!= 0) {
2161 ASSERT3U(0, ==, zap_count(mos
, ds
->ds_phys
->ds_next_clones_obj
,
2164 if (count
!= ds
->ds_phys
->ds_num_children
- 1) {
2167 for (zap_cursor_init(&zc
, mos
, ds
->ds_phys
->ds_next_clones_obj
);
2168 zap_cursor_retrieve(&zc
, &za
) == 0;
2169 zap_cursor_advance(&zc
)) {
2170 dsl_dataset_t
*clone
;
2171 char buf
[ZFS_MAXNAMELEN
];
2172 if (dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
2173 za
.za_first_integer
, FTAG
, &clone
) != 0) {
2176 dsl_dir_name(clone
->ds_dir
, buf
);
2177 VERIFY(nvlist_add_boolean(val
, buf
) == 0);
2178 dsl_dataset_rele(clone
, FTAG
);
2180 zap_cursor_fini(&zc
);
2181 VERIFY(nvlist_add_nvlist(propval
, ZPROP_VALUE
, val
) == 0);
2182 VERIFY(nvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_CLONES
),
2186 nvlist_free(propval
);
2187 rw_exit(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
);
2191 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2193 uint64_t refd
, avail
, uobjs
, aobjs
, ratio
;
2195 dsl_dir_stats(ds
->ds_dir
, nv
);
2197 dsl_dataset_space(ds
, &refd
, &avail
, &uobjs
, &aobjs
);
2198 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
, avail
);
2199 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
, refd
);
2201 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
2202 ds
->ds_phys
->ds_creation_time
);
2203 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
2204 ds
->ds_phys
->ds_creation_txg
);
2205 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
2207 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
2209 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
2210 ds
->ds_phys
->ds_guid
);
2211 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
2212 ds
->ds_phys
->ds_unique_bytes
);
2213 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
2215 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
2217 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
2218 DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
2220 if (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
2221 uint64_t written
, comp
, uncomp
;
2222 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2223 dsl_dataset_t
*prev
;
2226 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
2227 err
= dsl_dataset_hold_obj(dp
,
2228 ds
->ds_phys
->ds_prev_snap_obj
, FTAG
, &prev
);
2229 rw_exit(&dp
->dp_config_rwlock
);
2231 err
= dsl_dataset_space_written(prev
, ds
, &written
,
2233 dsl_dataset_rele(prev
, FTAG
);
2235 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_WRITTEN
,
2241 ratio
= ds
->ds_phys
->ds_compressed_bytes
== 0 ? 100 :
2242 (ds
->ds_phys
->ds_uncompressed_bytes
* 100 /
2243 ds
->ds_phys
->ds_compressed_bytes
);
2244 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
, ratio
);
2246 if (ds
->ds_phys
->ds_next_snap_obj
) {
2248 * This is a snapshot; override the dd's space used with
2249 * our unique space and compression ratio.
2251 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
2252 ds
->ds_phys
->ds_unique_bytes
);
2253 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
, ratio
);
2255 get_clones_stat(ds
, nv
);
2260 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
2262 stat
->dds_creation_txg
= ds
->ds_phys
->ds_creation_txg
;
2263 stat
->dds_inconsistent
= ds
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
;
2264 stat
->dds_guid
= ds
->ds_phys
->ds_guid
;
2265 if (ds
->ds_phys
->ds_next_snap_obj
) {
2266 stat
->dds_is_snapshot
= B_TRUE
;
2267 stat
->dds_num_clones
= ds
->ds_phys
->ds_num_children
- 1;
2269 stat
->dds_is_snapshot
= B_FALSE
;
2270 stat
->dds_num_clones
= 0;
2273 /* clone origin is really a dsl_dir thing... */
2274 rw_enter(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
, RW_READER
);
2275 if (dsl_dir_is_clone(ds
->ds_dir
)) {
2278 VERIFY(0 == dsl_dataset_get_ref(ds
->ds_dir
->dd_pool
,
2279 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &ods
));
2280 dsl_dataset_name(ods
, stat
->dds_origin
);
2281 dsl_dataset_drop_ref(ods
, FTAG
);
2283 stat
->dds_origin
[0] = '\0';
2285 rw_exit(&ds
->ds_dir
->dd_pool
->dp_config_rwlock
);
2289 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
2291 return (ds
->ds_fsid_guid
);
2295 dsl_dataset_space(dsl_dataset_t
*ds
,
2296 uint64_t *refdbytesp
, uint64_t *availbytesp
,
2297 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2299 *refdbytesp
= ds
->ds_phys
->ds_used_bytes
;
2300 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
2301 if (ds
->ds_reserved
> ds
->ds_phys
->ds_unique_bytes
)
2302 *availbytesp
+= ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
;
2303 if (ds
->ds_quota
!= 0) {
2305 * Adjust available bytes according to refquota
2307 if (*refdbytesp
< ds
->ds_quota
)
2308 *availbytesp
= MIN(*availbytesp
,
2309 ds
->ds_quota
- *refdbytesp
);
2313 *usedobjsp
= ds
->ds_phys
->ds_bp
.blk_fill
;
2314 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
2318 dsl_dataset_modified_since_lastsnap(dsl_dataset_t
*ds
)
2320 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
2322 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
2323 dsl_pool_sync_context(dp
));
2324 if (ds
->ds_prev
== NULL
)
2326 if (ds
->ds_phys
->ds_bp
.blk_birth
>
2327 ds
->ds_prev
->ds_phys
->ds_creation_txg
) {
2328 objset_t
*os
, *os_prev
;
2330 * It may be that only the ZIL differs, because it was
2331 * reset in the head. Don't count that as being
2334 if (dmu_objset_from_ds(ds
, &os
) != 0)
2336 if (dmu_objset_from_ds(ds
->ds_prev
, &os_prev
) != 0)
2338 return (bcmp(&os
->os_phys
->os_meta_dnode
,
2339 &os_prev
->os_phys
->os_meta_dnode
,
2340 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
2347 dsl_dataset_snapshot_rename_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2349 dsl_dataset_t
*ds
= arg1
;
2350 char *newsnapname
= arg2
;
2351 dsl_dir_t
*dd
= ds
->ds_dir
;
2356 err
= dsl_dataset_hold_obj(dd
->dd_pool
,
2357 dd
->dd_phys
->dd_head_dataset_obj
, FTAG
, &hds
);
2361 /* new name better not be in use */
2362 err
= dsl_dataset_snap_lookup(hds
, newsnapname
, &val
);
2363 dsl_dataset_rele(hds
, FTAG
);
2367 else if (err
== ENOENT
)
2370 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2371 if (dsl_dir_namelen(ds
->ds_dir
) + 1 + strlen(newsnapname
) >= MAXNAMELEN
)
2378 dsl_dataset_snapshot_rename_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2380 dsl_dataset_t
*ds
= arg1
;
2381 const char *newsnapname
= arg2
;
2382 dsl_dir_t
*dd
= ds
->ds_dir
;
2383 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
2387 ASSERT(ds
->ds_phys
->ds_next_snap_obj
!= 0);
2389 VERIFY(0 == dsl_dataset_hold_obj(dd
->dd_pool
,
2390 dd
->dd_phys
->dd_head_dataset_obj
, FTAG
, &hds
));
2392 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2393 err
= dsl_dataset_snap_remove(hds
, ds
->ds_snapname
, tx
);
2394 ASSERT3U(err
, ==, 0);
2395 mutex_enter(&ds
->ds_lock
);
2396 (void) strcpy(ds
->ds_snapname
, newsnapname
);
2397 mutex_exit(&ds
->ds_lock
);
2398 err
= zap_add(mos
, hds
->ds_phys
->ds_snapnames_zapobj
,
2399 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
);
2400 ASSERT3U(err
, ==, 0);
2402 spa_history_log_internal(LOG_DS_RENAME
, dd
->dd_pool
->dp_spa
, tx
,
2403 "dataset = %llu", ds
->ds_object
);
2404 dsl_dataset_rele(hds
, FTAG
);
2407 struct renamesnaparg
{
2408 dsl_sync_task_group_t
*dstg
;
2409 char failed
[MAXPATHLEN
];
2415 dsl_snapshot_rename_one(const char *name
, void *arg
)
2417 struct renamesnaparg
*ra
= arg
;
2418 dsl_dataset_t
*ds
= NULL
;
2422 snapname
= kmem_asprintf("%s@%s", name
, ra
->oldsnap
);
2423 (void) strlcpy(ra
->failed
, snapname
, sizeof (ra
->failed
));
2426 * For recursive snapshot renames the parent won't be changing
2427 * so we just pass name for both the to/from argument.
2429 err
= zfs_secpolicy_rename_perms(snapname
, snapname
, CRED());
2432 return (err
== ENOENT
? 0 : err
);
2437 * For all filesystems undergoing rename, we'll need to unmount it.
2439 (void) zfs_unmount_snap(snapname
, NULL
);
2441 err
= dsl_dataset_hold(snapname
, ra
->dstg
, &ds
);
2444 return (err
== ENOENT
? 0 : err
);
2446 dsl_sync_task_create(ra
->dstg
, dsl_dataset_snapshot_rename_check
,
2447 dsl_dataset_snapshot_rename_sync
, ds
, ra
->newsnap
, 0);
2453 dsl_recursive_rename(char *oldname
, const char *newname
)
2456 struct renamesnaparg
*ra
;
2457 dsl_sync_task_t
*dst
;
2459 char *cp
, *fsname
= spa_strdup(oldname
);
2460 int len
= strlen(oldname
) + 1;
2462 /* truncate the snapshot name to get the fsname */
2463 cp
= strchr(fsname
, '@');
2466 err
= spa_open(fsname
, &spa
, FTAG
);
2468 kmem_free(fsname
, len
);
2471 ra
= kmem_alloc(sizeof (struct renamesnaparg
), KM_SLEEP
);
2472 ra
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
2474 ra
->oldsnap
= strchr(oldname
, '@') + 1;
2475 ra
->newsnap
= strchr(newname
, '@') + 1;
2478 err
= dmu_objset_find(fsname
, dsl_snapshot_rename_one
, ra
,
2480 kmem_free(fsname
, len
);
2483 err
= dsl_sync_task_group_wait(ra
->dstg
);
2486 for (dst
= list_head(&ra
->dstg
->dstg_tasks
); dst
;
2487 dst
= list_next(&ra
->dstg
->dstg_tasks
, dst
)) {
2488 dsl_dataset_t
*ds
= dst
->dst_arg1
;
2490 dsl_dir_name(ds
->ds_dir
, ra
->failed
);
2491 (void) strlcat(ra
->failed
, "@", sizeof (ra
->failed
));
2492 (void) strlcat(ra
->failed
, ra
->newsnap
,
2493 sizeof (ra
->failed
));
2495 dsl_dataset_rele(ds
, ra
->dstg
);
2499 (void) strlcpy(oldname
, ra
->failed
, sizeof (ra
->failed
));
2501 dsl_sync_task_group_destroy(ra
->dstg
);
2502 kmem_free(ra
, sizeof (struct renamesnaparg
));
2503 spa_close(spa
, FTAG
);
2508 dsl_valid_rename(const char *oldname
, void *arg
)
2510 int delta
= *(int *)arg
;
2512 if (strlen(oldname
) + delta
>= MAXNAMELEN
)
2513 return (ENAMETOOLONG
);
2518 #pragma weak dmu_objset_rename = dsl_dataset_rename
2520 dsl_dataset_rename(char *oldname
, const char *newname
, boolean_t recursive
)
2527 err
= dsl_dir_open(oldname
, FTAG
, &dd
, &tail
);
2532 int delta
= strlen(newname
) - strlen(oldname
);
2534 /* if we're growing, validate child name lengths */
2536 err
= dmu_objset_find(oldname
, dsl_valid_rename
,
2537 &delta
, DS_FIND_CHILDREN
| DS_FIND_SNAPSHOTS
);
2540 err
= dsl_dir_rename(dd
, newname
);
2541 dsl_dir_close(dd
, FTAG
);
2545 if (tail
[0] != '@') {
2546 /* the name ended in a nonexistent component */
2547 dsl_dir_close(dd
, FTAG
);
2551 dsl_dir_close(dd
, FTAG
);
2553 /* new name must be snapshot in same filesystem */
2554 tail
= strchr(newname
, '@');
2558 if (strncmp(oldname
, newname
, tail
- newname
) != 0)
2562 err
= dsl_recursive_rename(oldname
, newname
);
2564 err
= dsl_dataset_hold(oldname
, FTAG
, &ds
);
2568 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
2569 dsl_dataset_snapshot_rename_check
,
2570 dsl_dataset_snapshot_rename_sync
, ds
, (char *)tail
, 1);
2572 dsl_dataset_rele(ds
, FTAG
);
2578 struct promotenode
{
2584 list_t shared_snaps
, origin_snaps
, clone_snaps
;
2585 dsl_dataset_t
*origin_origin
;
2586 uint64_t used
, comp
, uncomp
, unique
, cloneusedsnap
, originusedsnap
;
2590 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
2593 dsl_dataset_promote_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2595 dsl_dataset_t
*hds
= arg1
;
2596 struct promotearg
*pa
= arg2
;
2597 struct promotenode
*snap
= list_head(&pa
->shared_snaps
);
2598 dsl_dataset_t
*origin_ds
= snap
->ds
;
2602 /* Check that it is a real clone */
2603 if (!dsl_dir_is_clone(hds
->ds_dir
))
2606 /* Since this is so expensive, don't do the preliminary check */
2607 if (!dmu_tx_is_syncing(tx
))
2610 if (hds
->ds_phys
->ds_flags
& DS_FLAG_NOPROMOTE
)
2613 /* compute origin's new unique space */
2614 snap
= list_tail(&pa
->clone_snaps
);
2615 ASSERT3U(snap
->ds
->ds_phys
->ds_prev_snap_obj
, ==, origin_ds
->ds_object
);
2616 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2617 origin_ds
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
2618 &pa
->unique
, &unused
, &unused
);
2621 * Walk the snapshots that we are moving
2623 * Compute space to transfer. Consider the incremental changes
2624 * to used for each snapshot:
2625 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2626 * So each snapshot gave birth to:
2627 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2628 * So a sequence would look like:
2629 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2630 * Which simplifies to:
2631 * uN + kN + kN-1 + ... + k1 + k0
2632 * Note however, if we stop before we reach the ORIGIN we get:
2633 * uN + kN + kN-1 + ... + kM - uM-1
2635 pa
->used
= origin_ds
->ds_phys
->ds_used_bytes
;
2636 pa
->comp
= origin_ds
->ds_phys
->ds_compressed_bytes
;
2637 pa
->uncomp
= origin_ds
->ds_phys
->ds_uncompressed_bytes
;
2638 for (snap
= list_head(&pa
->shared_snaps
); snap
;
2639 snap
= list_next(&pa
->shared_snaps
, snap
)) {
2640 uint64_t val
, dlused
, dlcomp
, dluncomp
;
2641 dsl_dataset_t
*ds
= snap
->ds
;
2643 /* Check that the snapshot name does not conflict */
2644 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2645 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
2653 /* The very first snapshot does not have a deadlist */
2654 if (ds
->ds_phys
->ds_prev_snap_obj
== 0)
2657 dsl_deadlist_space(&ds
->ds_deadlist
,
2658 &dlused
, &dlcomp
, &dluncomp
);
2661 pa
->uncomp
+= dluncomp
;
2665 * If we are a clone of a clone then we never reached ORIGIN,
2666 * so we need to subtract out the clone origin's used space.
2668 if (pa
->origin_origin
) {
2669 pa
->used
-= pa
->origin_origin
->ds_phys
->ds_used_bytes
;
2670 pa
->comp
-= pa
->origin_origin
->ds_phys
->ds_compressed_bytes
;
2671 pa
->uncomp
-= pa
->origin_origin
->ds_phys
->ds_uncompressed_bytes
;
2674 /* Check that there is enough space here */
2675 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
2681 * Compute the amounts of space that will be used by snapshots
2682 * after the promotion (for both origin and clone). For each,
2683 * it is the amount of space that will be on all of their
2684 * deadlists (that was not born before their new origin).
2686 if (hds
->ds_dir
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2690 * Note, typically this will not be a clone of a clone,
2691 * so dd_origin_txg will be < TXG_INITIAL, so
2692 * these snaplist_space() -> dsl_deadlist_space_range()
2693 * calls will be fast because they do not have to
2694 * iterate over all bps.
2696 snap
= list_head(&pa
->origin_snaps
);
2697 err
= snaplist_space(&pa
->shared_snaps
,
2698 snap
->ds
->ds_dir
->dd_origin_txg
, &pa
->cloneusedsnap
);
2702 err
= snaplist_space(&pa
->clone_snaps
,
2703 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
2706 pa
->cloneusedsnap
+= space
;
2708 if (origin_ds
->ds_dir
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2709 err
= snaplist_space(&pa
->origin_snaps
,
2710 origin_ds
->ds_phys
->ds_creation_txg
, &pa
->originusedsnap
);
2717 pa
->err_ds
= snap
->ds
->ds_snapname
;
2722 dsl_dataset_promote_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
2724 dsl_dataset_t
*hds
= arg1
;
2725 struct promotearg
*pa
= arg2
;
2726 struct promotenode
*snap
= list_head(&pa
->shared_snaps
);
2727 dsl_dataset_t
*origin_ds
= snap
->ds
;
2728 dsl_dataset_t
*origin_head
;
2729 dsl_dir_t
*dd
= hds
->ds_dir
;
2730 dsl_pool_t
*dp
= hds
->ds_dir
->dd_pool
;
2731 dsl_dir_t
*odd
= NULL
;
2732 uint64_t oldnext_obj
;
2735 ASSERT(0 == (hds
->ds_phys
->ds_flags
& DS_FLAG_NOPROMOTE
));
2737 snap
= list_head(&pa
->origin_snaps
);
2738 origin_head
= snap
->ds
;
2741 * We need to explicitly open odd, since origin_ds's dd will be
2744 VERIFY(0 == dsl_dir_open_obj(dp
, origin_ds
->ds_dir
->dd_object
,
2747 /* change origin's next snap */
2748 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
2749 oldnext_obj
= origin_ds
->ds_phys
->ds_next_snap_obj
;
2750 snap
= list_tail(&pa
->clone_snaps
);
2751 ASSERT3U(snap
->ds
->ds_phys
->ds_prev_snap_obj
, ==, origin_ds
->ds_object
);
2752 origin_ds
->ds_phys
->ds_next_snap_obj
= snap
->ds
->ds_object
;
2754 /* change the origin's next clone */
2755 if (origin_ds
->ds_phys
->ds_next_clones_obj
) {
2756 remove_from_next_clones(origin_ds
, snap
->ds
->ds_object
, tx
);
2757 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2758 origin_ds
->ds_phys
->ds_next_clones_obj
,
2763 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
2764 ASSERT3U(dd
->dd_phys
->dd_origin_obj
, ==, origin_ds
->ds_object
);
2765 dd
->dd_phys
->dd_origin_obj
= odd
->dd_phys
->dd_origin_obj
;
2766 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
2767 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
2768 odd
->dd_phys
->dd_origin_obj
= origin_ds
->ds_object
;
2769 origin_head
->ds_dir
->dd_origin_txg
=
2770 origin_ds
->ds_phys
->ds_creation_txg
;
2772 /* change dd_clone entries */
2773 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2774 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2775 odd
->dd_phys
->dd_clones
, hds
->ds_object
, tx
));
2776 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2777 pa
->origin_origin
->ds_dir
->dd_phys
->dd_clones
,
2778 hds
->ds_object
, tx
));
2780 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2781 pa
->origin_origin
->ds_dir
->dd_phys
->dd_clones
,
2782 origin_head
->ds_object
, tx
));
2783 if (dd
->dd_phys
->dd_clones
== 0) {
2784 dd
->dd_phys
->dd_clones
= zap_create(dp
->dp_meta_objset
,
2785 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
2787 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
2788 dd
->dd_phys
->dd_clones
, origin_head
->ds_object
, tx
));
2792 /* move snapshots to this dir */
2793 for (snap
= list_head(&pa
->shared_snaps
); snap
;
2794 snap
= list_next(&pa
->shared_snaps
, snap
)) {
2795 dsl_dataset_t
*ds
= snap
->ds
;
2797 /* unregister props as dsl_dir is changing */
2798 if (ds
->ds_objset
) {
2799 dmu_objset_evict(ds
->ds_objset
);
2800 ds
->ds_objset
= NULL
;
2802 /* move snap name entry */
2803 VERIFY(0 == dsl_dataset_get_snapname(ds
));
2804 VERIFY(0 == dsl_dataset_snap_remove(origin_head
,
2805 ds
->ds_snapname
, tx
));
2806 VERIFY(0 == zap_add(dp
->dp_meta_objset
,
2807 hds
->ds_phys
->ds_snapnames_zapobj
, ds
->ds_snapname
,
2808 8, 1, &ds
->ds_object
, tx
));
2810 /* change containing dsl_dir */
2811 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2812 ASSERT3U(ds
->ds_phys
->ds_dir_obj
, ==, odd
->dd_object
);
2813 ds
->ds_phys
->ds_dir_obj
= dd
->dd_object
;
2814 ASSERT3P(ds
->ds_dir
, ==, odd
);
2815 dsl_dir_close(ds
->ds_dir
, ds
);
2816 VERIFY(0 == dsl_dir_open_obj(dp
, dd
->dd_object
,
2817 NULL
, ds
, &ds
->ds_dir
));
2819 /* move any clone references */
2820 if (ds
->ds_phys
->ds_next_clones_obj
&&
2821 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2825 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2826 ds
->ds_phys
->ds_next_clones_obj
);
2827 zap_cursor_retrieve(&zc
, &za
) == 0;
2828 zap_cursor_advance(&zc
)) {
2829 dsl_dataset_t
*cnds
;
2832 if (za
.za_first_integer
== oldnext_obj
) {
2834 * We've already moved the
2835 * origin's reference.
2840 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
2841 za
.za_first_integer
, FTAG
, &cnds
));
2842 o
= cnds
->ds_dir
->dd_phys
->dd_head_dataset_obj
;
2844 VERIFY3U(zap_remove_int(dp
->dp_meta_objset
,
2845 odd
->dd_phys
->dd_clones
, o
, tx
), ==, 0);
2846 VERIFY3U(zap_add_int(dp
->dp_meta_objset
,
2847 dd
->dd_phys
->dd_clones
, o
, tx
), ==, 0);
2848 dsl_dataset_rele(cnds
, FTAG
);
2850 zap_cursor_fini(&zc
);
2853 ASSERT3U(dsl_prop_numcb(ds
), ==, 0);
2857 * Change space accounting.
2858 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2859 * both be valid, or both be 0 (resulting in delta == 0). This
2860 * is true for each of {clone,origin} independently.
2863 delta
= pa
->cloneusedsnap
-
2864 dd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
];
2865 ASSERT3S(delta
, >=, 0);
2866 ASSERT3U(pa
->used
, >=, delta
);
2867 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2868 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
2869 pa
->used
- delta
, pa
->comp
, pa
->uncomp
, tx
);
2871 delta
= pa
->originusedsnap
-
2872 odd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
];
2873 ASSERT3S(delta
, <=, 0);
2874 ASSERT3U(pa
->used
, >=, -delta
);
2875 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2876 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
2877 -pa
->used
- delta
, -pa
->comp
, -pa
->uncomp
, tx
);
2879 origin_ds
->ds_phys
->ds_unique_bytes
= pa
->unique
;
2881 /* log history record */
2882 spa_history_log_internal(LOG_DS_PROMOTE
, dd
->dd_pool
->dp_spa
, tx
,
2883 "dataset = %llu", hds
->ds_object
);
2885 dsl_dir_close(odd
, FTAG
);
2888 static char *snaplist_tag
= "snaplist";
2890 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2891 * (exclusive) and last_obj (inclusive). The list will be in reverse
2892 * order (last_obj will be the list_head()). If first_obj == 0, do all
2893 * snapshots back to this dataset's origin.
2896 snaplist_make(dsl_pool_t
*dp
, boolean_t own
,
2897 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
)
2899 uint64_t obj
= last_obj
;
2901 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
));
2903 list_create(l
, sizeof (struct promotenode
),
2904 offsetof(struct promotenode
, link
));
2906 while (obj
!= first_obj
) {
2908 struct promotenode
*snap
;
2912 err
= dsl_dataset_own_obj(dp
, obj
,
2913 0, snaplist_tag
, &ds
);
2915 dsl_dataset_make_exclusive(ds
, snaplist_tag
);
2917 err
= dsl_dataset_hold_obj(dp
, obj
, snaplist_tag
, &ds
);
2919 if (err
== ENOENT
) {
2920 /* lost race with snapshot destroy */
2921 struct promotenode
*last
= list_tail(l
);
2922 ASSERT(obj
!= last
->ds
->ds_phys
->ds_prev_snap_obj
);
2923 obj
= last
->ds
->ds_phys
->ds_prev_snap_obj
;
2930 first_obj
= ds
->ds_dir
->dd_phys
->dd_origin_obj
;
2932 snap
= kmem_alloc(sizeof (struct promotenode
), KM_SLEEP
);
2934 list_insert_tail(l
, snap
);
2935 obj
= ds
->ds_phys
->ds_prev_snap_obj
;
2942 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
2944 struct promotenode
*snap
;
2947 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
2948 uint64_t used
, comp
, uncomp
;
2949 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2950 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
2957 snaplist_destroy(list_t
*l
, boolean_t own
)
2959 struct promotenode
*snap
;
2961 if (!l
|| !list_link_active(&l
->list_head
))
2964 while ((snap
= list_tail(l
)) != NULL
) {
2965 list_remove(l
, snap
);
2967 dsl_dataset_disown(snap
->ds
, snaplist_tag
);
2969 dsl_dataset_rele(snap
->ds
, snaplist_tag
);
2970 kmem_free(snap
, sizeof (struct promotenode
));
2976 * Promote a clone. Nomenclature note:
2977 * "clone" or "cds": the original clone which is being promoted
2978 * "origin" or "ods": the snapshot which is originally clone's origin
2979 * "origin head" or "ohds": the dataset which is the head
2980 * (filesystem/volume) for the origin
2981 * "origin origin": the origin of the origin's filesystem (typically
2982 * NULL, indicating that the clone is not a clone of a clone).
2985 dsl_dataset_promote(const char *name
, char *conflsnap
)
2990 dmu_object_info_t doi
;
2991 struct promotearg pa
;
2992 struct promotenode
*snap
;
2995 bzero(&pa
, sizeof(struct promotearg
));
2996 err
= dsl_dataset_hold(name
, FTAG
, &ds
);
3002 err
= dmu_object_info(dp
->dp_meta_objset
,
3003 ds
->ds_phys
->ds_snapnames_zapobj
, &doi
);
3005 dsl_dataset_rele(ds
, FTAG
);
3009 if (dsl_dataset_is_snapshot(ds
) || dd
->dd_phys
->dd_origin_obj
== 0) {
3010 dsl_dataset_rele(ds
, FTAG
);
3015 * We are going to inherit all the snapshots taken before our
3016 * origin (i.e., our new origin will be our parent's origin).
3017 * Take ownership of them so that we can rename them into our
3020 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
3022 err
= snaplist_make(dp
, B_TRUE
, 0, dd
->dd_phys
->dd_origin_obj
,
3027 err
= snaplist_make(dp
, B_FALSE
, 0, ds
->ds_object
, &pa
.clone_snaps
);
3031 snap
= list_head(&pa
.shared_snaps
);
3032 ASSERT3U(snap
->ds
->ds_object
, ==, dd
->dd_phys
->dd_origin_obj
);
3033 err
= snaplist_make(dp
, B_FALSE
, dd
->dd_phys
->dd_origin_obj
,
3034 snap
->ds
->ds_dir
->dd_phys
->dd_head_dataset_obj
, &pa
.origin_snaps
);
3038 if (snap
->ds
->ds_dir
->dd_phys
->dd_origin_obj
!= 0) {
3039 err
= dsl_dataset_hold_obj(dp
,
3040 snap
->ds
->ds_dir
->dd_phys
->dd_origin_obj
,
3041 FTAG
, &pa
.origin_origin
);
3047 rw_exit(&dp
->dp_config_rwlock
);
3050 * Add in 128x the snapnames zapobj size, since we will be moving
3051 * a bunch of snapnames to the promoted ds, and dirtying their
3055 err
= dsl_sync_task_do(dp
, dsl_dataset_promote_check
,
3056 dsl_dataset_promote_sync
, ds
, &pa
,
3057 2 + 2 * doi
.doi_physical_blocks_512
);
3058 if (err
&& pa
.err_ds
&& conflsnap
)
3059 (void) strncpy(conflsnap
, pa
.err_ds
, MAXNAMELEN
);
3062 snaplist_destroy(&pa
.shared_snaps
, B_TRUE
);
3063 snaplist_destroy(&pa
.clone_snaps
, B_FALSE
);
3064 snaplist_destroy(&pa
.origin_snaps
, B_FALSE
);
3065 if (pa
.origin_origin
)
3066 dsl_dataset_rele(pa
.origin_origin
, FTAG
);
3067 dsl_dataset_rele(ds
, FTAG
);
3071 struct cloneswaparg
{
3072 dsl_dataset_t
*cds
; /* clone dataset */
3073 dsl_dataset_t
*ohds
; /* origin's head dataset */
3075 int64_t unused_refres_delta
; /* change in unconsumed refreservation */
3080 dsl_dataset_clone_swap_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3082 struct cloneswaparg
*csa
= arg1
;
3084 /* they should both be heads */
3085 if (dsl_dataset_is_snapshot(csa
->cds
) ||
3086 dsl_dataset_is_snapshot(csa
->ohds
))
3089 /* the branch point should be just before them */
3090 if (csa
->cds
->ds_prev
!= csa
->ohds
->ds_prev
)
3093 /* cds should be the clone (unless they are unrelated) */
3094 if (csa
->cds
->ds_prev
!= NULL
&&
3095 csa
->cds
->ds_prev
!= csa
->cds
->ds_dir
->dd_pool
->dp_origin_snap
&&
3096 csa
->ohds
->ds_object
!=
3097 csa
->cds
->ds_prev
->ds_phys
->ds_next_snap_obj
)
3100 /* the clone should be a child of the origin */
3101 if (csa
->cds
->ds_dir
->dd_parent
!= csa
->ohds
->ds_dir
)
3104 /* ohds shouldn't be modified unless 'force' */
3105 if (!csa
->force
&& dsl_dataset_modified_since_lastsnap(csa
->ohds
))
3108 /* adjust amount of any unconsumed refreservation */
3109 csa
->unused_refres_delta
=
3110 (int64_t)MIN(csa
->ohds
->ds_reserved
,
3111 csa
->ohds
->ds_phys
->ds_unique_bytes
) -
3112 (int64_t)MIN(csa
->ohds
->ds_reserved
,
3113 csa
->cds
->ds_phys
->ds_unique_bytes
);
3115 if (csa
->unused_refres_delta
> 0 &&
3116 csa
->unused_refres_delta
>
3117 dsl_dir_space_available(csa
->ohds
->ds_dir
, NULL
, 0, TRUE
))
3120 if (csa
->ohds
->ds_quota
!= 0 &&
3121 csa
->cds
->ds_phys
->ds_unique_bytes
> csa
->ohds
->ds_quota
)
3129 dsl_dataset_clone_swap_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3131 struct cloneswaparg
*csa
= arg1
;
3132 dsl_pool_t
*dp
= csa
->cds
->ds_dir
->dd_pool
;
3134 ASSERT(csa
->cds
->ds_reserved
== 0);
3135 ASSERT(csa
->ohds
->ds_quota
== 0 ||
3136 csa
->cds
->ds_phys
->ds_unique_bytes
<= csa
->ohds
->ds_quota
);
3138 dmu_buf_will_dirty(csa
->cds
->ds_dbuf
, tx
);
3139 dmu_buf_will_dirty(csa
->ohds
->ds_dbuf
, tx
);
3141 if (csa
->cds
->ds_objset
!= NULL
) {
3142 dmu_objset_evict(csa
->cds
->ds_objset
);
3143 csa
->cds
->ds_objset
= NULL
;
3146 if (csa
->ohds
->ds_objset
!= NULL
) {
3147 dmu_objset_evict(csa
->ohds
->ds_objset
);
3148 csa
->ohds
->ds_objset
= NULL
;
3152 * Reset origin's unique bytes, if it exists.
3154 if (csa
->cds
->ds_prev
) {
3155 dsl_dataset_t
*origin
= csa
->cds
->ds_prev
;
3156 uint64_t comp
, uncomp
;
3158 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
3159 dsl_deadlist_space_range(&csa
->cds
->ds_deadlist
,
3160 origin
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
3161 &origin
->ds_phys
->ds_unique_bytes
, &comp
, &uncomp
);
3167 tmp
= csa
->ohds
->ds_phys
->ds_bp
;
3168 csa
->ohds
->ds_phys
->ds_bp
= csa
->cds
->ds_phys
->ds_bp
;
3169 csa
->cds
->ds_phys
->ds_bp
= tmp
;
3172 /* set dd_*_bytes */
3174 int64_t dused
, dcomp
, duncomp
;
3175 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
3176 uint64_t odl_used
, odl_comp
, odl_uncomp
;
3178 ASSERT3U(csa
->cds
->ds_dir
->dd_phys
->
3179 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
3181 dsl_deadlist_space(&csa
->cds
->ds_deadlist
,
3182 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3183 dsl_deadlist_space(&csa
->ohds
->ds_deadlist
,
3184 &odl_used
, &odl_comp
, &odl_uncomp
);
3186 dused
= csa
->cds
->ds_phys
->ds_used_bytes
+ cdl_used
-
3187 (csa
->ohds
->ds_phys
->ds_used_bytes
+ odl_used
);
3188 dcomp
= csa
->cds
->ds_phys
->ds_compressed_bytes
+ cdl_comp
-
3189 (csa
->ohds
->ds_phys
->ds_compressed_bytes
+ odl_comp
);
3190 duncomp
= csa
->cds
->ds_phys
->ds_uncompressed_bytes
+
3192 (csa
->ohds
->ds_phys
->ds_uncompressed_bytes
+ odl_uncomp
);
3194 dsl_dir_diduse_space(csa
->ohds
->ds_dir
, DD_USED_HEAD
,
3195 dused
, dcomp
, duncomp
, tx
);
3196 dsl_dir_diduse_space(csa
->cds
->ds_dir
, DD_USED_HEAD
,
3197 -dused
, -dcomp
, -duncomp
, tx
);
3200 * The difference in the space used by snapshots is the
3201 * difference in snapshot space due to the head's
3202 * deadlist (since that's the only thing that's
3203 * changing that affects the snapused).
3205 dsl_deadlist_space_range(&csa
->cds
->ds_deadlist
,
3206 csa
->ohds
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3207 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3208 dsl_deadlist_space_range(&csa
->ohds
->ds_deadlist
,
3209 csa
->ohds
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3210 &odl_used
, &odl_comp
, &odl_uncomp
);
3211 dsl_dir_transfer_space(csa
->ohds
->ds_dir
, cdl_used
- odl_used
,
3212 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
3215 /* swap ds_*_bytes */
3216 SWITCH64(csa
->ohds
->ds_phys
->ds_used_bytes
,
3217 csa
->cds
->ds_phys
->ds_used_bytes
);
3218 SWITCH64(csa
->ohds
->ds_phys
->ds_compressed_bytes
,
3219 csa
->cds
->ds_phys
->ds_compressed_bytes
);
3220 SWITCH64(csa
->ohds
->ds_phys
->ds_uncompressed_bytes
,
3221 csa
->cds
->ds_phys
->ds_uncompressed_bytes
);
3222 SWITCH64(csa
->ohds
->ds_phys
->ds_unique_bytes
,
3223 csa
->cds
->ds_phys
->ds_unique_bytes
);
3225 /* apply any parent delta for change in unconsumed refreservation */
3226 dsl_dir_diduse_space(csa
->ohds
->ds_dir
, DD_USED_REFRSRV
,
3227 csa
->unused_refres_delta
, 0, 0, tx
);
3232 dsl_deadlist_close(&csa
->cds
->ds_deadlist
);
3233 dsl_deadlist_close(&csa
->ohds
->ds_deadlist
);
3234 SWITCH64(csa
->ohds
->ds_phys
->ds_deadlist_obj
,
3235 csa
->cds
->ds_phys
->ds_deadlist_obj
);
3236 dsl_deadlist_open(&csa
->cds
->ds_deadlist
, dp
->dp_meta_objset
,
3237 csa
->cds
->ds_phys
->ds_deadlist_obj
);
3238 dsl_deadlist_open(&csa
->ohds
->ds_deadlist
, dp
->dp_meta_objset
,
3239 csa
->ohds
->ds_phys
->ds_deadlist_obj
);
3241 dsl_scan_ds_clone_swapped(csa
->ohds
, csa
->cds
, tx
);
3245 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3246 * recv" into an existing fs to swizzle the file system to the new
3247 * version, and by "zfs rollback". Can also be used to swap two
3248 * independent head datasets if neither has any snapshots.
3251 dsl_dataset_clone_swap(dsl_dataset_t
*clone
, dsl_dataset_t
*origin_head
,
3254 struct cloneswaparg csa
;
3257 ASSERT(clone
->ds_owner
);
3258 ASSERT(origin_head
->ds_owner
);
3261 * Need exclusive access for the swap. If we're swapping these
3262 * datasets back after an error, we already hold the locks.
3264 if (!RW_WRITE_HELD(&clone
->ds_rwlock
))
3265 rw_enter(&clone
->ds_rwlock
, RW_WRITER
);
3266 if (!RW_WRITE_HELD(&origin_head
->ds_rwlock
) &&
3267 !rw_tryenter(&origin_head
->ds_rwlock
, RW_WRITER
)) {
3268 rw_exit(&clone
->ds_rwlock
);
3269 rw_enter(&origin_head
->ds_rwlock
, RW_WRITER
);
3270 if (!rw_tryenter(&clone
->ds_rwlock
, RW_WRITER
)) {
3271 rw_exit(&origin_head
->ds_rwlock
);
3276 csa
.ohds
= origin_head
;
3278 error
= dsl_sync_task_do(clone
->ds_dir
->dd_pool
,
3279 dsl_dataset_clone_swap_check
,
3280 dsl_dataset_clone_swap_sync
, &csa
, NULL
, 9);
3285 * Given a pool name and a dataset object number in that pool,
3286 * return the name of that dataset.
3289 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
3296 if ((error
= spa_open(pname
, &spa
, FTAG
)) != 0)
3298 dp
= spa_get_dsl(spa
);
3299 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
3300 if ((error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
)) == 0) {
3301 dsl_dataset_name(ds
, buf
);
3302 dsl_dataset_rele(ds
, FTAG
);
3304 rw_exit(&dp
->dp_config_rwlock
);
3305 spa_close(spa
, FTAG
);
3311 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
3312 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
3316 ASSERT3S(asize
, >, 0);
3319 * *ref_rsrv is the portion of asize that will come from any
3320 * unconsumed refreservation space.
3324 mutex_enter(&ds
->ds_lock
);
3326 * Make a space adjustment for reserved bytes.
3328 if (ds
->ds_reserved
> ds
->ds_phys
->ds_unique_bytes
) {
3330 ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
);
3331 *used
-= (ds
->ds_reserved
- ds
->ds_phys
->ds_unique_bytes
);
3333 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
3336 if (!check_quota
|| ds
->ds_quota
== 0) {
3337 mutex_exit(&ds
->ds_lock
);
3341 * If they are requesting more space, and our current estimate
3342 * is over quota, they get to try again unless the actual
3343 * on-disk is over quota and there are no pending changes (which
3344 * may free up space for us).
3346 if (ds
->ds_phys
->ds_used_bytes
+ inflight
>= ds
->ds_quota
) {
3347 if (inflight
> 0 || ds
->ds_phys
->ds_used_bytes
< ds
->ds_quota
)
3352 DMU_TX_STAT_BUMP(dmu_tx_quota
);
3354 mutex_exit(&ds
->ds_lock
);
3361 dsl_dataset_set_quota_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3363 dsl_dataset_t
*ds
= arg1
;
3364 dsl_prop_setarg_t
*psa
= arg2
;
3367 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_REFQUOTA
)
3370 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
3373 if (psa
->psa_effective_value
== 0)
3376 if (psa
->psa_effective_value
< ds
->ds_phys
->ds_used_bytes
||
3377 psa
->psa_effective_value
< ds
->ds_reserved
)
3383 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t
*);
3386 dsl_dataset_set_quota_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3388 dsl_dataset_t
*ds
= arg1
;
3389 dsl_prop_setarg_t
*psa
= arg2
;
3390 uint64_t effective_value
= psa
->psa_effective_value
;
3392 dsl_prop_set_sync(ds
, psa
, tx
);
3393 DSL_PROP_CHECK_PREDICTION(ds
->ds_dir
, psa
);
3395 if (ds
->ds_quota
!= effective_value
) {
3396 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3397 ds
->ds_quota
= effective_value
;
3399 spa_history_log_internal(LOG_DS_REFQUOTA
,
3400 ds
->ds_dir
->dd_pool
->dp_spa
, tx
, "%lld dataset = %llu ",
3401 (longlong_t
)ds
->ds_quota
, ds
->ds_object
);
3406 dsl_dataset_set_quota(const char *dsname
, zprop_source_t source
, uint64_t quota
)
3409 dsl_prop_setarg_t psa
;
3412 dsl_prop_setarg_init_uint64(&psa
, "refquota", source
, "a
);
3414 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
3419 * If someone removes a file, then tries to set the quota, we
3420 * want to make sure the file freeing takes effect.
3422 txg_wait_open(ds
->ds_dir
->dd_pool
, 0);
3424 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3425 dsl_dataset_set_quota_check
, dsl_dataset_set_quota_sync
,
3428 dsl_dataset_rele(ds
, FTAG
);
3433 dsl_dataset_set_reservation_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3435 dsl_dataset_t
*ds
= arg1
;
3436 dsl_prop_setarg_t
*psa
= arg2
;
3437 uint64_t effective_value
;
3441 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
3442 SPA_VERSION_REFRESERVATION
)
3445 if (dsl_dataset_is_snapshot(ds
))
3448 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
3451 effective_value
= psa
->psa_effective_value
;
3454 * If we are doing the preliminary check in open context, the
3455 * space estimates may be inaccurate.
3457 if (!dmu_tx_is_syncing(tx
))
3460 mutex_enter(&ds
->ds_lock
);
3461 if (!DS_UNIQUE_IS_ACCURATE(ds
))
3462 dsl_dataset_recalc_head_uniq(ds
);
3463 unique
= ds
->ds_phys
->ds_unique_bytes
;
3464 mutex_exit(&ds
->ds_lock
);
3466 if (MAX(unique
, effective_value
) > MAX(unique
, ds
->ds_reserved
)) {
3467 uint64_t delta
= MAX(unique
, effective_value
) -
3468 MAX(unique
, ds
->ds_reserved
);
3470 if (delta
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
3472 if (ds
->ds_quota
> 0 &&
3473 effective_value
> ds
->ds_quota
)
3481 dsl_dataset_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3483 dsl_dataset_t
*ds
= arg1
;
3484 dsl_prop_setarg_t
*psa
= arg2
;
3485 uint64_t effective_value
= psa
->psa_effective_value
;
3489 dsl_prop_set_sync(ds
, psa
, tx
);
3490 DSL_PROP_CHECK_PREDICTION(ds
->ds_dir
, psa
);
3492 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3494 mutex_enter(&ds
->ds_dir
->dd_lock
);
3495 mutex_enter(&ds
->ds_lock
);
3496 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
3497 unique
= ds
->ds_phys
->ds_unique_bytes
;
3498 delta
= MAX(0, (int64_t)(effective_value
- unique
)) -
3499 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
3500 ds
->ds_reserved
= effective_value
;
3501 mutex_exit(&ds
->ds_lock
);
3503 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
3504 mutex_exit(&ds
->ds_dir
->dd_lock
);
3506 spa_history_log_internal(LOG_DS_REFRESERV
,
3507 ds
->ds_dir
->dd_pool
->dp_spa
, tx
, "%lld dataset = %llu",
3508 (longlong_t
)effective_value
, ds
->ds_object
);
3512 dsl_dataset_set_reservation(const char *dsname
, zprop_source_t source
,
3513 uint64_t reservation
)
3516 dsl_prop_setarg_t psa
;
3519 dsl_prop_setarg_init_uint64(&psa
, "refreservation", source
,
3522 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
3526 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3527 dsl_dataset_set_reservation_check
,
3528 dsl_dataset_set_reservation_sync
, ds
, &psa
, 0);
3530 dsl_dataset_rele(ds
, FTAG
);
3534 typedef struct zfs_hold_cleanup_arg
{
3537 char htag
[MAXNAMELEN
];
3538 } zfs_hold_cleanup_arg_t
;
3541 dsl_dataset_user_release_onexit(void *arg
)
3543 zfs_hold_cleanup_arg_t
*ca
= arg
;
3545 (void) dsl_dataset_user_release_tmp(ca
->dp
, ca
->dsobj
, ca
->htag
,
3547 kmem_free(ca
, sizeof (zfs_hold_cleanup_arg_t
));
3551 dsl_register_onexit_hold_cleanup(dsl_dataset_t
*ds
, const char *htag
,
3554 zfs_hold_cleanup_arg_t
*ca
;
3556 ca
= kmem_alloc(sizeof (zfs_hold_cleanup_arg_t
), KM_SLEEP
);
3557 ca
->dp
= ds
->ds_dir
->dd_pool
;
3558 ca
->dsobj
= ds
->ds_object
;
3559 (void) strlcpy(ca
->htag
, htag
, sizeof (ca
->htag
));
3560 VERIFY3U(0, ==, zfs_onexit_add_cb(minor
,
3561 dsl_dataset_user_release_onexit
, ca
, NULL
));
3565 * If you add new checks here, you may need to add
3566 * additional checks to the "temporary" case in
3567 * snapshot_check() in dmu_objset.c.
3570 dsl_dataset_user_hold_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3572 dsl_dataset_t
*ds
= arg1
;
3573 struct dsl_ds_holdarg
*ha
= arg2
;
3574 char *htag
= ha
->htag
;
3575 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3578 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_USERREFS
)
3581 if (!dsl_dataset_is_snapshot(ds
))
3584 /* tags must be unique */
3585 mutex_enter(&ds
->ds_lock
);
3586 if (ds
->ds_phys
->ds_userrefs_obj
) {
3587 error
= zap_lookup(mos
, ds
->ds_phys
->ds_userrefs_obj
, htag
,
3591 else if (error
== ENOENT
)
3594 mutex_exit(&ds
->ds_lock
);
3596 if (error
== 0 && ha
->temphold
&&
3597 strlen(htag
) + MAX_TAG_PREFIX_LEN
>= MAXNAMELEN
)
3604 dsl_dataset_user_hold_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
3606 dsl_dataset_t
*ds
= arg1
;
3607 struct dsl_ds_holdarg
*ha
= arg2
;
3608 char *htag
= ha
->htag
;
3609 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
3610 objset_t
*mos
= dp
->dp_meta_objset
;
3611 uint64_t now
= gethrestime_sec();
3614 mutex_enter(&ds
->ds_lock
);
3615 if (ds
->ds_phys
->ds_userrefs_obj
== 0) {
3617 * This is the first user hold for this dataset. Create
3618 * the userrefs zap object.
3620 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3621 zapobj
= ds
->ds_phys
->ds_userrefs_obj
=
3622 zap_create(mos
, DMU_OT_USERREFS
, DMU_OT_NONE
, 0, tx
);
3624 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3627 mutex_exit(&ds
->ds_lock
);
3629 VERIFY(0 == zap_add(mos
, zapobj
, htag
, 8, 1, &now
, tx
));
3632 VERIFY(0 == dsl_pool_user_hold(dp
, ds
->ds_object
,
3636 spa_history_log_internal(LOG_DS_USER_HOLD
,
3637 dp
->dp_spa
, tx
, "<%s> temp = %d dataset = %llu", htag
,
3638 (int)ha
->temphold
, ds
->ds_object
);
3642 dsl_dataset_user_hold_one(const char *dsname
, void *arg
)
3644 struct dsl_ds_holdarg
*ha
= arg
;
3649 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3650 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3651 error
= dsl_dataset_hold(name
, ha
->dstg
, &ds
);
3654 ha
->gotone
= B_TRUE
;
3655 dsl_sync_task_create(ha
->dstg
, dsl_dataset_user_hold_check
,
3656 dsl_dataset_user_hold_sync
, ds
, ha
, 0);
3657 } else if (error
== ENOENT
&& ha
->recursive
) {
3660 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3666 dsl_dataset_user_hold_for_send(dsl_dataset_t
*ds
, char *htag
,
3669 struct dsl_ds_holdarg
*ha
;
3672 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3674 ha
->temphold
= temphold
;
3675 error
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
3676 dsl_dataset_user_hold_check
, dsl_dataset_user_hold_sync
,
3678 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3684 dsl_dataset_user_hold(char *dsname
, char *snapname
, char *htag
,
3685 boolean_t recursive
, boolean_t temphold
, int cleanup_fd
)
3687 struct dsl_ds_holdarg
*ha
;
3688 dsl_sync_task_t
*dst
;
3693 if (cleanup_fd
!= -1) {
3694 /* Currently we only support cleanup-on-exit of tempholds. */
3697 error
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
3702 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3704 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3706 error
= spa_open(dsname
, &spa
, FTAG
);
3708 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3709 if (cleanup_fd
!= -1)
3710 zfs_onexit_fd_rele(cleanup_fd
);
3714 ha
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
3716 ha
->snapname
= snapname
;
3717 ha
->recursive
= recursive
;
3718 ha
->temphold
= temphold
;
3721 error
= dmu_objset_find(dsname
, dsl_dataset_user_hold_one
,
3722 ha
, DS_FIND_CHILDREN
);
3724 error
= dsl_dataset_user_hold_one(dsname
, ha
);
3727 error
= dsl_sync_task_group_wait(ha
->dstg
);
3729 for (dst
= list_head(&ha
->dstg
->dstg_tasks
); dst
;
3730 dst
= list_next(&ha
->dstg
->dstg_tasks
, dst
)) {
3731 dsl_dataset_t
*ds
= dst
->dst_arg1
;
3734 dsl_dataset_name(ds
, ha
->failed
);
3735 *strchr(ha
->failed
, '@') = '\0';
3736 } else if (error
== 0 && minor
!= 0 && temphold
) {
3738 * If this hold is to be released upon process exit,
3739 * register that action now.
3741 dsl_register_onexit_hold_cleanup(ds
, htag
, minor
);
3743 dsl_dataset_rele(ds
, ha
->dstg
);
3746 if (error
== 0 && recursive
&& !ha
->gotone
)
3750 (void) strlcpy(dsname
, ha
->failed
, sizeof (ha
->failed
));
3752 dsl_sync_task_group_destroy(ha
->dstg
);
3754 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3755 spa_close(spa
, FTAG
);
3756 if (cleanup_fd
!= -1)
3757 zfs_onexit_fd_rele(cleanup_fd
);
3761 struct dsl_ds_releasearg
{
3764 boolean_t own
; /* do we own or just hold ds? */
3768 dsl_dataset_release_might_destroy(dsl_dataset_t
*ds
, const char *htag
,
3769 boolean_t
*might_destroy
)
3771 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3776 *might_destroy
= B_FALSE
;
3778 mutex_enter(&ds
->ds_lock
);
3779 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3781 /* The tag can't possibly exist */
3782 mutex_exit(&ds
->ds_lock
);
3786 /* Make sure the tag exists */
3787 error
= zap_lookup(mos
, zapobj
, htag
, 8, 1, &tmp
);
3789 mutex_exit(&ds
->ds_lock
);
3790 if (error
== ENOENT
)
3795 if (ds
->ds_userrefs
== 1 && ds
->ds_phys
->ds_num_children
== 1 &&
3796 DS_IS_DEFER_DESTROY(ds
))
3797 *might_destroy
= B_TRUE
;
3799 mutex_exit(&ds
->ds_lock
);
3804 dsl_dataset_user_release_check(void *arg1
, void *tag
, dmu_tx_t
*tx
)
3806 struct dsl_ds_releasearg
*ra
= arg1
;
3807 dsl_dataset_t
*ds
= ra
->ds
;
3808 boolean_t might_destroy
;
3811 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) < SPA_VERSION_USERREFS
)
3814 error
= dsl_dataset_release_might_destroy(ds
, ra
->htag
, &might_destroy
);
3818 if (might_destroy
) {
3819 struct dsl_ds_destroyarg dsda
= {0};
3821 if (dmu_tx_is_syncing(tx
)) {
3823 * If we're not prepared to remove the snapshot,
3824 * we can't allow the release to happen right now.
3830 dsda
.releasing
= B_TRUE
;
3831 return (dsl_dataset_destroy_check(&dsda
, tag
, tx
));
3838 dsl_dataset_user_release_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
3840 struct dsl_ds_releasearg
*ra
= arg1
;
3841 dsl_dataset_t
*ds
= ra
->ds
;
3842 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
3843 objset_t
*mos
= dp
->dp_meta_objset
;
3845 uint64_t dsobj
= ds
->ds_object
;
3849 mutex_enter(&ds
->ds_lock
);
3851 refs
= ds
->ds_userrefs
;
3852 mutex_exit(&ds
->ds_lock
);
3853 error
= dsl_pool_user_release(dp
, ds
->ds_object
, ra
->htag
, tx
);
3854 VERIFY(error
== 0 || error
== ENOENT
);
3855 zapobj
= ds
->ds_phys
->ds_userrefs_obj
;
3856 VERIFY(0 == zap_remove(mos
, zapobj
, ra
->htag
, tx
));
3857 if (ds
->ds_userrefs
== 0 && ds
->ds_phys
->ds_num_children
== 1 &&
3858 DS_IS_DEFER_DESTROY(ds
)) {
3859 struct dsl_ds_destroyarg dsda
= {0};
3863 dsda
.releasing
= B_TRUE
;
3864 /* We already did the destroy_check */
3865 dsl_dataset_destroy_sync(&dsda
, tag
, tx
);
3868 spa_history_log_internal(LOG_DS_USER_RELEASE
,
3869 dp
->dp_spa
, tx
, "<%s> %lld dataset = %llu",
3870 ra
->htag
, (longlong_t
)refs
, dsobj
);
3874 dsl_dataset_user_release_one(const char *dsname
, void *arg
)
3876 struct dsl_ds_holdarg
*ha
= arg
;
3877 struct dsl_ds_releasearg
*ra
;
3880 void *dtag
= ha
->dstg
;
3882 boolean_t own
= B_FALSE
;
3883 boolean_t might_destroy
;
3885 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3886 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3887 error
= dsl_dataset_hold(name
, dtag
, &ds
);
3889 if (error
== ENOENT
&& ha
->recursive
)
3891 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3895 ha
->gotone
= B_TRUE
;
3897 ASSERT(dsl_dataset_is_snapshot(ds
));
3899 error
= dsl_dataset_release_might_destroy(ds
, ha
->htag
, &might_destroy
);
3901 dsl_dataset_rele(ds
, dtag
);
3905 if (might_destroy
) {
3907 name
= kmem_asprintf("%s@%s", dsname
, ha
->snapname
);
3908 error
= zfs_unmount_snap(name
, NULL
);
3911 dsl_dataset_rele(ds
, dtag
);
3915 if (!dsl_dataset_tryown(ds
, B_TRUE
, dtag
)) {
3916 dsl_dataset_rele(ds
, dtag
);
3920 dsl_dataset_make_exclusive(ds
, dtag
);
3924 ra
= kmem_alloc(sizeof (struct dsl_ds_releasearg
), KM_SLEEP
);
3926 ra
->htag
= ha
->htag
;
3928 dsl_sync_task_create(ha
->dstg
, dsl_dataset_user_release_check
,
3929 dsl_dataset_user_release_sync
, ra
, dtag
, 0);
3935 dsl_dataset_user_release(char *dsname
, char *snapname
, char *htag
,
3936 boolean_t recursive
)
3938 struct dsl_ds_holdarg
*ha
;
3939 dsl_sync_task_t
*dst
;
3944 ha
= kmem_zalloc(sizeof (struct dsl_ds_holdarg
), KM_SLEEP
);
3946 (void) strlcpy(ha
->failed
, dsname
, sizeof (ha
->failed
));
3948 error
= spa_open(dsname
, &spa
, FTAG
);
3950 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3954 ha
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
3956 ha
->snapname
= snapname
;
3957 ha
->recursive
= recursive
;
3959 error
= dmu_objset_find(dsname
, dsl_dataset_user_release_one
,
3960 ha
, DS_FIND_CHILDREN
);
3962 error
= dsl_dataset_user_release_one(dsname
, ha
);
3965 error
= dsl_sync_task_group_wait(ha
->dstg
);
3967 for (dst
= list_head(&ha
->dstg
->dstg_tasks
); dst
;
3968 dst
= list_next(&ha
->dstg
->dstg_tasks
, dst
)) {
3969 struct dsl_ds_releasearg
*ra
= dst
->dst_arg1
;
3970 dsl_dataset_t
*ds
= ra
->ds
;
3973 dsl_dataset_name(ds
, ha
->failed
);
3976 dsl_dataset_disown(ds
, ha
->dstg
);
3978 dsl_dataset_rele(ds
, ha
->dstg
);
3980 kmem_free(ra
, sizeof (struct dsl_ds_releasearg
));
3983 if (error
== 0 && recursive
&& !ha
->gotone
)
3986 if (error
&& error
!= EBUSY
)
3987 (void) strlcpy(dsname
, ha
->failed
, sizeof (ha
->failed
));
3989 dsl_sync_task_group_destroy(ha
->dstg
);
3990 kmem_free(ha
, sizeof (struct dsl_ds_holdarg
));
3991 spa_close(spa
, FTAG
);
3994 * We can get EBUSY if we were racing with deferred destroy and
3995 * dsl_dataset_user_release_check() hadn't done the necessary
3996 * open context setup. We can also get EBUSY if we're racing
3997 * with destroy and that thread is the ds_owner. Either way
3998 * the busy condition should be transient, and we should retry
3999 * the release operation.
4008 * Called at spa_load time (with retry == B_FALSE) to release a stale
4009 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4012 dsl_dataset_user_release_tmp(dsl_pool_t
*dp
, uint64_t dsobj
, char *htag
,
4022 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
4023 error
= dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
);
4024 rw_exit(&dp
->dp_config_rwlock
);
4027 namelen
= dsl_dataset_namelen(ds
)+1;
4028 name
= kmem_alloc(namelen
, KM_SLEEP
);
4029 dsl_dataset_name(ds
, name
);
4030 dsl_dataset_rele(ds
, FTAG
);
4032 snap
= strchr(name
, '@');
4035 error
= dsl_dataset_user_release(name
, snap
, htag
, B_FALSE
);
4036 kmem_free(name
, namelen
);
4039 * The object can't have been destroyed because we have a hold,
4040 * but it might have been renamed, resulting in ENOENT. Retry
4041 * if we've been requested to do so.
4043 * It would be nice if we could use the dsobj all the way
4044 * through and avoid ENOENT entirely. But we might need to
4045 * unmount the snapshot, and there's currently no way to lookup
4046 * a vfsp using a ZFS object id.
4048 } while ((error
== ENOENT
) && retry
);
4054 dsl_dataset_get_holds(const char *dsname
, nvlist_t
**nvp
)
4059 err
= dsl_dataset_hold(dsname
, FTAG
, &ds
);
4063 VERIFY(0 == nvlist_alloc(nvp
, NV_UNIQUE_NAME
, KM_SLEEP
));
4064 if (ds
->ds_phys
->ds_userrefs_obj
!= 0) {
4065 zap_attribute_t
*za
;
4068 za
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
4069 for (zap_cursor_init(&zc
, ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4070 ds
->ds_phys
->ds_userrefs_obj
);
4071 zap_cursor_retrieve(&zc
, za
) == 0;
4072 zap_cursor_advance(&zc
)) {
4073 VERIFY(0 == nvlist_add_uint64(*nvp
, za
->za_name
,
4074 za
->za_first_integer
));
4076 zap_cursor_fini(&zc
);
4077 kmem_free(za
, sizeof (zap_attribute_t
));
4079 dsl_dataset_rele(ds
, FTAG
);
4084 * Note, this function is used as the callback for dmu_objset_find(). We
4085 * always return 0 so that we will continue to find and process
4086 * inconsistent datasets, even if we encounter an error trying to
4087 * process one of them.
4091 dsl_destroy_inconsistent(const char *dsname
, void *arg
)
4095 if (dsl_dataset_own(dsname
, B_TRUE
, FTAG
, &ds
) == 0) {
4096 if (DS_IS_INCONSISTENT(ds
))
4097 (void) dsl_dataset_destroy(ds
, FTAG
, B_FALSE
);
4099 dsl_dataset_disown(ds
, FTAG
);
4106 * Return (in *usedp) the amount of space written in new that is not
4107 * present in oldsnap. New may be a snapshot or the head. Old must be
4108 * a snapshot before new, in new's filesystem (or its origin). If not then
4109 * fail and return EINVAL.
4111 * The written space is calculated by considering two components: First, we
4112 * ignore any freed space, and calculate the written as new's used space
4113 * minus old's used space. Next, we add in the amount of space that was freed
4114 * between the two snapshots, thus reducing new's used space relative to old's.
4115 * Specifically, this is the space that was born before old->ds_creation_txg,
4116 * and freed before new (ie. on new's deadlist or a previous deadlist).
4118 * space freed [---------------------]
4119 * snapshots ---O-------O--------O-------O------
4123 dsl_dataset_space_written(dsl_dataset_t
*oldsnap
, dsl_dataset_t
*new,
4124 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4128 dsl_pool_t
*dp
= new->ds_dir
->dd_pool
;
4131 *usedp
+= new->ds_phys
->ds_used_bytes
;
4132 *usedp
-= oldsnap
->ds_phys
->ds_used_bytes
;
4135 *compp
+= new->ds_phys
->ds_compressed_bytes
;
4136 *compp
-= oldsnap
->ds_phys
->ds_compressed_bytes
;
4139 *uncompp
+= new->ds_phys
->ds_uncompressed_bytes
;
4140 *uncompp
-= oldsnap
->ds_phys
->ds_uncompressed_bytes
;
4142 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
4143 snapobj
= new->ds_object
;
4144 while (snapobj
!= oldsnap
->ds_object
) {
4145 dsl_dataset_t
*snap
;
4146 uint64_t used
, comp
, uncomp
;
4148 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snap
);
4152 if (snap
->ds_phys
->ds_prev_snap_txg
==
4153 oldsnap
->ds_phys
->ds_creation_txg
) {
4155 * The blocks in the deadlist can not be born after
4156 * ds_prev_snap_txg, so get the whole deadlist space,
4157 * which is more efficient (especially for old-format
4158 * deadlists). Unfortunately the deadlist code
4159 * doesn't have enough information to make this
4160 * optimization itself.
4162 dsl_deadlist_space(&snap
->ds_deadlist
,
4163 &used
, &comp
, &uncomp
);
4165 dsl_deadlist_space_range(&snap
->ds_deadlist
,
4166 0, oldsnap
->ds_phys
->ds_creation_txg
,
4167 &used
, &comp
, &uncomp
);
4174 * If we get to the beginning of the chain of snapshots
4175 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4176 * was not a snapshot of/before new.
4178 snapobj
= snap
->ds_phys
->ds_prev_snap_obj
;
4179 dsl_dataset_rele(snap
, FTAG
);
4186 rw_exit(&dp
->dp_config_rwlock
);
4191 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4192 * lastsnap, and all snapshots in between are deleted.
4194 * blocks that would be freed [---------------------------]
4195 * snapshots ---O-------O--------O-------O--------O
4196 * firstsnap lastsnap
4198 * This is the set of blocks that were born after the snap before firstsnap,
4199 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4200 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4201 * We calculate this by iterating over the relevant deadlists (from the snap
4202 * after lastsnap, backward to the snap after firstsnap), summing up the
4203 * space on the deadlist that was born after the snap before firstsnap.
4206 dsl_dataset_space_wouldfree(dsl_dataset_t
*firstsnap
,
4207 dsl_dataset_t
*lastsnap
,
4208 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4212 dsl_pool_t
*dp
= firstsnap
->ds_dir
->dd_pool
;
4214 ASSERT(dsl_dataset_is_snapshot(firstsnap
));
4215 ASSERT(dsl_dataset_is_snapshot(lastsnap
));
4218 * Check that the snapshots are in the same dsl_dir, and firstsnap
4219 * is before lastsnap.
4221 if (firstsnap
->ds_dir
!= lastsnap
->ds_dir
||
4222 firstsnap
->ds_phys
->ds_creation_txg
>
4223 lastsnap
->ds_phys
->ds_creation_txg
)
4226 *usedp
= *compp
= *uncompp
= 0;
4228 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
4229 snapobj
= lastsnap
->ds_phys
->ds_next_snap_obj
;
4230 while (snapobj
!= firstsnap
->ds_object
) {
4232 uint64_t used
, comp
, uncomp
;
4234 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &ds
);
4238 dsl_deadlist_space_range(&ds
->ds_deadlist
,
4239 firstsnap
->ds_phys
->ds_prev_snap_txg
, UINT64_MAX
,
4240 &used
, &comp
, &uncomp
);
4245 snapobj
= ds
->ds_phys
->ds_prev_snap_obj
;
4246 ASSERT3U(snapobj
, !=, 0);
4247 dsl_dataset_rele(ds
, FTAG
);
4249 rw_exit(&dp
->dp_config_rwlock
);
4253 #if defined(_KERNEL) && defined(HAVE_SPL)
4254 EXPORT_SYMBOL(dmu_snapshots_destroy_nvl
);
4255 EXPORT_SYMBOL(dsl_dataset_hold
);
4256 EXPORT_SYMBOL(dsl_dataset_hold_obj
);
4257 EXPORT_SYMBOL(dsl_dataset_own
);
4258 EXPORT_SYMBOL(dsl_dataset_own_obj
);
4259 EXPORT_SYMBOL(dsl_dataset_name
);
4260 EXPORT_SYMBOL(dsl_dataset_rele
);
4261 EXPORT_SYMBOL(dsl_dataset_disown
);
4262 EXPORT_SYMBOL(dsl_dataset_drop_ref
);
4263 EXPORT_SYMBOL(dsl_dataset_tryown
);
4264 EXPORT_SYMBOL(dsl_dataset_make_exclusive
);
4265 EXPORT_SYMBOL(dsl_dataset_create_sync
);
4266 EXPORT_SYMBOL(dsl_dataset_create_sync_dd
);
4267 EXPORT_SYMBOL(dsl_dataset_destroy
);
4268 EXPORT_SYMBOL(dsl_dataset_destroy_check
);
4269 EXPORT_SYMBOL(dsl_dataset_destroy_sync
);
4270 EXPORT_SYMBOL(dsl_dataset_snapshot_check
);
4271 EXPORT_SYMBOL(dsl_dataset_snapshot_sync
);
4272 EXPORT_SYMBOL(dsl_dataset_rename
);
4273 EXPORT_SYMBOL(dsl_dataset_promote
);
4274 EXPORT_SYMBOL(dsl_dataset_clone_swap
);
4275 EXPORT_SYMBOL(dsl_dataset_user_hold
);
4276 EXPORT_SYMBOL(dsl_dataset_user_release
);
4277 EXPORT_SYMBOL(dsl_dataset_user_release_tmp
);
4278 EXPORT_SYMBOL(dsl_dataset_get_holds
);
4279 EXPORT_SYMBOL(dsl_dataset_get_blkptr
);
4280 EXPORT_SYMBOL(dsl_dataset_set_blkptr
);
4281 EXPORT_SYMBOL(dsl_dataset_get_spa
);
4282 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap
);
4283 EXPORT_SYMBOL(dsl_dataset_space_written
);
4284 EXPORT_SYMBOL(dsl_dataset_space_wouldfree
);
4285 EXPORT_SYMBOL(dsl_dataset_sync
);
4286 EXPORT_SYMBOL(dsl_dataset_block_born
);
4287 EXPORT_SYMBOL(dsl_dataset_block_kill
);
4288 EXPORT_SYMBOL(dsl_dataset_block_freeable
);
4289 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg
);
4290 EXPORT_SYMBOL(dsl_dataset_dirty
);
4291 EXPORT_SYMBOL(dsl_dataset_stats
);
4292 EXPORT_SYMBOL(dsl_dataset_fast_stat
);
4293 EXPORT_SYMBOL(dsl_dataset_space
);
4294 EXPORT_SYMBOL(dsl_dataset_fsid_guid
);
4295 EXPORT_SYMBOL(dsl_dsobj_to_dsname
);
4296 EXPORT_SYMBOL(dsl_dataset_check_quota
);
4297 EXPORT_SYMBOL(dsl_dataset_set_quota
);
4298 EXPORT_SYMBOL(dsl_dataset_set_quota_sync
);
4299 EXPORT_SYMBOL(dsl_dataset_set_reservation
);
4300 EXPORT_SYMBOL(dsl_destroy_inconsistent
);