4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2014 RackTop Systems.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dmu_traverse.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/dmu_tx.h>
40 #include <sys/zfeature.h>
41 #include <sys/unique.h>
42 #include <sys/zfs_context.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zfs_znode.h>
46 #include <sys/zfs_onexit.h>
48 #include <sys/dsl_scan.h>
49 #include <sys/dsl_deadlist.h>
50 #include <sys/dsl_destroy.h>
51 #include <sys/dsl_userhold.h>
52 #include <sys/dsl_bookmark.h>
55 * The SPA supports block sizes up to 16MB. However, very large blocks
56 * can have an impact on i/o latency (e.g. tying up a spinning disk for
57 * ~300ms), and also potentially on the memory allocator. Therefore,
58 * we do not allow the recordsize to be set larger than zfs_max_recordsize
59 * (default 1MB). Larger blocks can be created by changing this tunable,
60 * and pools with larger blocks can always be imported and used, regardless
63 int zfs_max_recordsize
= 1 * 1024 * 1024;
65 #define SWITCH64(x, y) \
67 uint64_t __tmp = (x); \
72 #define DS_REF_MAX (1ULL << 62)
74 extern inline dsl_dataset_phys_t
*dsl_dataset_phys(dsl_dataset_t
*ds
);
77 * Figure out how much of this delta should be propogated to the dsl_dir
78 * layer. If there's a refreservation, that space has already been
79 * partially accounted for in our ancestors.
82 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
84 dsl_dataset_phys_t
*ds_phys
;
85 uint64_t old_bytes
, new_bytes
;
87 if (ds
->ds_reserved
== 0)
90 ds_phys
= dsl_dataset_phys(ds
);
91 old_bytes
= MAX(ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
92 new_bytes
= MAX(ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
94 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
95 return (new_bytes
- old_bytes
);
99 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
101 int used
, compressed
, uncompressed
;
104 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
105 compressed
= BP_GET_PSIZE(bp
);
106 uncompressed
= BP_GET_UCSIZE(bp
);
108 dprintf_bp(bp
, "ds=%p", ds
);
110 ASSERT(dmu_tx_is_syncing(tx
));
111 /* It could have been compressed away to nothing */
114 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
115 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp
)));
117 dsl_pool_mos_diduse_space(tx
->tx_pool
,
118 used
, compressed
, uncompressed
);
122 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
123 mutex_enter(&ds
->ds_lock
);
124 delta
= parent_delta(ds
, used
);
125 dsl_dataset_phys(ds
)->ds_referenced_bytes
+= used
;
126 dsl_dataset_phys(ds
)->ds_compressed_bytes
+= compressed
;
127 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
+= uncompressed
;
128 dsl_dataset_phys(ds
)->ds_unique_bytes
+= used
;
129 if (BP_GET_LSIZE(bp
) > SPA_OLD_MAXBLOCKSIZE
) {
130 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_BLOCKS
] =
133 mutex_exit(&ds
->ds_lock
);
134 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
, delta
,
135 compressed
, uncompressed
, tx
);
136 dsl_dir_transfer_space(ds
->ds_dir
, used
- delta
,
137 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
141 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
144 int used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
145 int compressed
= BP_GET_PSIZE(bp
);
146 int uncompressed
= BP_GET_UCSIZE(bp
);
151 ASSERT(dmu_tx_is_syncing(tx
));
152 ASSERT(bp
->blk_birth
<= tx
->tx_txg
);
155 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
156 dsl_pool_mos_diduse_space(tx
->tx_pool
,
157 -used
, -compressed
, -uncompressed
);
160 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
162 ASSERT(!ds
->ds_is_snapshot
);
163 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
165 if (bp
->blk_birth
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
168 dprintf_bp(bp
, "freeing ds=%llu", ds
->ds_object
);
169 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
171 mutex_enter(&ds
->ds_lock
);
172 ASSERT(dsl_dataset_phys(ds
)->ds_unique_bytes
>= used
||
173 !DS_UNIQUE_IS_ACCURATE(ds
));
174 delta
= parent_delta(ds
, -used
);
175 dsl_dataset_phys(ds
)->ds_unique_bytes
-= used
;
176 mutex_exit(&ds
->ds_lock
);
177 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
178 delta
, -compressed
, -uncompressed
, tx
);
179 dsl_dir_transfer_space(ds
->ds_dir
, -used
- delta
,
180 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
182 dprintf_bp(bp
, "putting on dead list: %s", "");
185 * We are here as part of zio's write done callback,
186 * which means we're a zio interrupt thread. We can't
187 * call dsl_deadlist_insert() now because it may block
188 * waiting for I/O. Instead, put bp on the deferred
189 * queue and let dsl_pool_sync() finish the job.
191 bplist_append(&ds
->ds_pending_deadlist
, bp
);
193 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, tx
);
195 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
196 dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
197 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 0);
198 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
199 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
200 ds
->ds_object
&& bp
->blk_birth
>
201 dsl_dataset_phys(ds
->ds_prev
)->ds_prev_snap_txg
) {
202 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
203 mutex_enter(&ds
->ds_prev
->ds_lock
);
204 dsl_dataset_phys(ds
->ds_prev
)->ds_unique_bytes
+= used
;
205 mutex_exit(&ds
->ds_prev
->ds_lock
);
207 if (bp
->blk_birth
> ds
->ds_dir
->dd_origin_txg
) {
208 dsl_dir_transfer_space(ds
->ds_dir
, used
,
209 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
212 mutex_enter(&ds
->ds_lock
);
213 ASSERT3U(dsl_dataset_phys(ds
)->ds_referenced_bytes
, >=, used
);
214 dsl_dataset_phys(ds
)->ds_referenced_bytes
-= used
;
215 ASSERT3U(dsl_dataset_phys(ds
)->ds_compressed_bytes
, >=, compressed
);
216 dsl_dataset_phys(ds
)->ds_compressed_bytes
-= compressed
;
217 ASSERT3U(dsl_dataset_phys(ds
)->ds_uncompressed_bytes
, >=, uncompressed
);
218 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
-= uncompressed
;
219 mutex_exit(&ds
->ds_lock
);
225 dsl_dataset_prev_snap_txg(dsl_dataset_t
*ds
)
227 uint64_t trysnap
= 0;
232 * The snapshot creation could fail, but that would cause an
233 * incorrect FALSE return, which would only result in an
234 * overestimation of the amount of space that an operation would
235 * consume, which is OK.
237 * There's also a small window where we could miss a pending
238 * snapshot, because we could set the sync task in the quiescing
239 * phase. So this should only be used as a guess.
241 if (ds
->ds_trysnap_txg
>
242 spa_last_synced_txg(ds
->ds_dir
->dd_pool
->dp_spa
))
243 trysnap
= ds
->ds_trysnap_txg
;
244 return (MAX(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, trysnap
));
248 dsl_dataset_block_freeable(dsl_dataset_t
*ds
, const blkptr_t
*bp
,
251 if (blk_birth
<= dsl_dataset_prev_snap_txg(ds
) ||
252 (bp
!= NULL
&& BP_IS_HOLE(bp
)))
255 ddt_prefetch(dsl_dataset_get_spa(ds
), bp
);
261 dsl_dataset_evict(void *dbu
)
263 dsl_dataset_t
*ds
= dbu
;
265 ASSERT(ds
->ds_owner
== NULL
);
269 unique_remove(ds
->ds_fsid_guid
);
271 if (ds
->ds_objset
!= NULL
)
272 dmu_objset_evict(ds
->ds_objset
);
275 dsl_dataset_rele(ds
->ds_prev
, ds
);
279 bplist_destroy(&ds
->ds_pending_deadlist
);
280 if (ds
->ds_deadlist
.dl_os
!= NULL
)
281 dsl_deadlist_close(&ds
->ds_deadlist
);
283 dsl_dir_async_rele(ds
->ds_dir
, ds
);
285 ASSERT(!list_link_active(&ds
->ds_synced_link
));
287 mutex_destroy(&ds
->ds_lock
);
288 mutex_destroy(&ds
->ds_opening_lock
);
289 mutex_destroy(&ds
->ds_sendstream_lock
);
290 refcount_destroy(&ds
->ds_longholds
);
292 kmem_free(ds
, sizeof (dsl_dataset_t
));
296 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
298 dsl_dataset_phys_t
*headphys
;
301 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
302 objset_t
*mos
= dp
->dp_meta_objset
;
304 if (ds
->ds_snapname
[0])
306 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0)
309 err
= dmu_bonus_hold(mos
, dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
,
313 headphys
= headdbuf
->db_data
;
314 err
= zap_value_search(dp
->dp_meta_objset
,
315 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
);
316 if (err
!= 0 && zfs_recover
== B_TRUE
) {
318 (void) snprintf(ds
->ds_snapname
, sizeof (ds
->ds_snapname
),
319 "SNAPOBJ=%llu-ERR=%d",
320 (unsigned long long)ds
->ds_object
, err
);
322 dmu_buf_rele(headdbuf
, FTAG
);
327 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
329 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
330 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
334 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
339 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
340 value
, mt
, NULL
, 0, NULL
);
341 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
342 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
347 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, const char *name
, dmu_tx_t
*tx
,
350 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
351 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
355 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
357 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
362 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
363 if (err
== ENOTSUP
&& mt
== MT_FIRST
)
364 err
= zap_remove(mos
, snapobj
, name
, tx
);
366 if (err
== 0 && adj_cnt
)
367 dsl_fs_ss_count_adjust(ds
->ds_dir
, -1,
368 DD_FIELD_SNAPSHOT_COUNT
, tx
);
374 dsl_dataset_try_add_ref(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *tag
)
376 dmu_buf_t
*dbuf
= ds
->ds_dbuf
;
377 boolean_t result
= B_FALSE
;
379 if (dbuf
!= NULL
&& dmu_buf_try_add_ref(dbuf
, dp
->dp_meta_objset
,
380 ds
->ds_object
, DMU_BONUS_BLKID
, tag
)) {
382 if (ds
== dmu_buf_get_user(dbuf
))
385 dmu_buf_rele(dbuf
, tag
);
392 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
395 objset_t
*mos
= dp
->dp_meta_objset
;
399 dmu_object_info_t doi
;
401 ASSERT(dsl_pool_config_held(dp
));
403 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
407 /* Make sure dsobj has the correct object type. */
408 dmu_object_info_from_db(dbuf
, &doi
);
409 if (doi
.doi_bonus_type
!= DMU_OT_DSL_DATASET
) {
410 dmu_buf_rele(dbuf
, tag
);
411 return (SET_ERROR(EINVAL
));
414 ds
= dmu_buf_get_user(dbuf
);
416 dsl_dataset_t
*winner
= NULL
;
418 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
420 ds
->ds_object
= dsobj
;
421 ds
->ds_is_snapshot
= dsl_dataset_phys(ds
)->ds_num_children
!= 0;
422 list_link_init(&ds
->ds_synced_link
);
424 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
425 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
426 mutex_init(&ds
->ds_sendstream_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
427 refcount_create(&ds
->ds_longholds
);
429 bplist_create(&ds
->ds_pending_deadlist
);
430 dsl_deadlist_open(&ds
->ds_deadlist
,
431 mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
);
433 list_create(&ds
->ds_sendstreams
, sizeof (dmu_sendarg_t
),
434 offsetof(dmu_sendarg_t
, dsa_link
));
436 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
439 for (f
= 0; f
< SPA_FEATURES
; f
++) {
440 if (!(spa_feature_table
[f
].fi_flags
&
441 ZFEATURE_FLAG_PER_DATASET
))
443 err
= zap_contains(mos
, dsobj
,
444 spa_feature_table
[f
].fi_guid
);
446 ds
->ds_feature_inuse
[f
] = B_TRUE
;
448 ASSERT3U(err
, ==, ENOENT
);
454 err
= dsl_dir_hold_obj(dp
,
455 dsl_dataset_phys(ds
)->ds_dir_obj
, NULL
, ds
, &ds
->ds_dir
);
457 mutex_destroy(&ds
->ds_lock
);
458 mutex_destroy(&ds
->ds_opening_lock
);
459 mutex_destroy(&ds
->ds_sendstream_lock
);
460 refcount_destroy(&ds
->ds_longholds
);
461 bplist_destroy(&ds
->ds_pending_deadlist
);
462 dsl_deadlist_close(&ds
->ds_deadlist
);
463 kmem_free(ds
, sizeof (dsl_dataset_t
));
464 dmu_buf_rele(dbuf
, tag
);
468 if (!ds
->ds_is_snapshot
) {
469 ds
->ds_snapname
[0] = '\0';
470 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
471 err
= dsl_dataset_hold_obj(dp
,
472 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
475 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
476 int zaperr
= zap_lookup(mos
, ds
->ds_object
,
477 DS_FIELD_BOOKMARK_NAMES
,
478 sizeof (ds
->ds_bookmarks
), 1,
480 if (zaperr
!= ENOENT
)
484 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
485 err
= dsl_dataset_get_snapname(ds
);
487 dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0) {
489 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
490 dsl_dataset_phys(ds
)->ds_userrefs_obj
,
495 if (err
== 0 && !ds
->ds_is_snapshot
) {
496 err
= dsl_prop_get_int_ds(ds
,
497 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
500 err
= dsl_prop_get_int_ds(ds
,
501 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
505 ds
->ds_reserved
= ds
->ds_quota
= 0;
508 dmu_buf_init_user(&ds
->ds_dbu
, dsl_dataset_evict
, &ds
->ds_dbuf
);
510 winner
= dmu_buf_set_user_ie(dbuf
, &ds
->ds_dbu
);
512 if (err
!= 0 || winner
!= NULL
) {
513 bplist_destroy(&ds
->ds_pending_deadlist
);
514 dsl_deadlist_close(&ds
->ds_deadlist
);
516 dsl_dataset_rele(ds
->ds_prev
, ds
);
517 dsl_dir_rele(ds
->ds_dir
, ds
);
518 mutex_destroy(&ds
->ds_lock
);
519 mutex_destroy(&ds
->ds_opening_lock
);
520 mutex_destroy(&ds
->ds_sendstream_lock
);
521 refcount_destroy(&ds
->ds_longholds
);
522 kmem_free(ds
, sizeof (dsl_dataset_t
));
524 dmu_buf_rele(dbuf
, tag
);
530 unique_insert(dsl_dataset_phys(ds
)->ds_fsid_guid
);
533 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
534 ASSERT3P(dsl_dataset_phys(ds
), ==, dbuf
->db_data
);
535 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0 ||
536 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
537 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
543 dsl_dataset_hold(dsl_pool_t
*dp
, const char *name
,
544 void *tag
, dsl_dataset_t
**dsp
)
547 const char *snapname
;
552 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, &snapname
);
556 ASSERT(dsl_pool_config_held(dp
));
557 obj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
559 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
561 err
= SET_ERROR(ENOENT
);
563 /* we may be looking for a snapshot */
564 if (err
== 0 && snapname
!= NULL
) {
565 dsl_dataset_t
*snap_ds
;
567 if (*snapname
++ != '@') {
568 dsl_dataset_rele(ds
, tag
);
569 dsl_dir_rele(dd
, FTAG
);
570 return (SET_ERROR(ENOENT
));
573 dprintf("looking for snapshot '%s'\n", snapname
);
574 err
= dsl_dataset_snap_lookup(ds
, snapname
, &obj
);
576 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &snap_ds
);
577 dsl_dataset_rele(ds
, tag
);
580 mutex_enter(&snap_ds
->ds_lock
);
581 if (snap_ds
->ds_snapname
[0] == 0)
582 (void) strlcpy(snap_ds
->ds_snapname
, snapname
,
583 sizeof (snap_ds
->ds_snapname
));
584 mutex_exit(&snap_ds
->ds_lock
);
590 dsl_dir_rele(dd
, FTAG
);
595 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
,
596 void *tag
, dsl_dataset_t
**dsp
)
598 int err
= dsl_dataset_hold_obj(dp
, dsobj
, tag
, dsp
);
601 if (!dsl_dataset_tryown(*dsp
, tag
)) {
602 dsl_dataset_rele(*dsp
, tag
);
604 return (SET_ERROR(EBUSY
));
610 dsl_dataset_own(dsl_pool_t
*dp
, const char *name
,
611 void *tag
, dsl_dataset_t
**dsp
)
613 int err
= dsl_dataset_hold(dp
, name
, tag
, dsp
);
616 if (!dsl_dataset_tryown(*dsp
, tag
)) {
617 dsl_dataset_rele(*dsp
, tag
);
618 return (SET_ERROR(EBUSY
));
624 * See the comment above dsl_pool_hold() for details. In summary, a long
625 * hold is used to prevent destruction of a dataset while the pool hold
626 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
628 * The dataset and pool must be held when this function is called. After it
629 * is called, the pool hold may be released while the dataset is still held
633 dsl_dataset_long_hold(dsl_dataset_t
*ds
, void *tag
)
635 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
636 (void) refcount_add(&ds
->ds_longholds
, tag
);
640 dsl_dataset_long_rele(dsl_dataset_t
*ds
, void *tag
)
642 (void) refcount_remove(&ds
->ds_longholds
, tag
);
645 /* Return B_TRUE if there are any long holds on this dataset. */
647 dsl_dataset_long_held(dsl_dataset_t
*ds
)
649 return (!refcount_is_zero(&ds
->ds_longholds
));
653 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
656 (void) strcpy(name
, "mos");
658 dsl_dir_name(ds
->ds_dir
, name
);
659 VERIFY0(dsl_dataset_get_snapname(ds
));
660 if (ds
->ds_snapname
[0]) {
661 (void) strcat(name
, "@");
663 * We use a "recursive" mutex so that we
664 * can call dprintf_ds() with ds_lock held.
666 if (!MUTEX_HELD(&ds
->ds_lock
)) {
667 mutex_enter(&ds
->ds_lock
);
668 (void) strcat(name
, ds
->ds_snapname
);
669 mutex_exit(&ds
->ds_lock
);
671 (void) strcat(name
, ds
->ds_snapname
);
678 dsl_dataset_rele(dsl_dataset_t
*ds
, void *tag
)
680 dmu_buf_rele(ds
->ds_dbuf
, tag
);
684 dsl_dataset_disown(dsl_dataset_t
*ds
, void *tag
)
686 ASSERT3P(ds
->ds_owner
, ==, tag
);
687 ASSERT(ds
->ds_dbuf
!= NULL
);
689 mutex_enter(&ds
->ds_lock
);
691 mutex_exit(&ds
->ds_lock
);
692 dsl_dataset_long_rele(ds
, tag
);
693 dsl_dataset_rele(ds
, tag
);
697 dsl_dataset_tryown(dsl_dataset_t
*ds
, void *tag
)
699 boolean_t gotit
= FALSE
;
701 mutex_enter(&ds
->ds_lock
);
702 if (ds
->ds_owner
== NULL
&& !DS_IS_INCONSISTENT(ds
)) {
704 dsl_dataset_long_hold(ds
, tag
);
707 mutex_exit(&ds
->ds_lock
);
712 dsl_dataset_activate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
714 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
715 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
718 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
720 spa_feature_incr(spa
, f
, tx
);
721 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
723 VERIFY0(zap_add(mos
, dsobj
, spa_feature_table
[f
].fi_guid
,
724 sizeof (zero
), 1, &zero
, tx
));
728 dsl_dataset_deactivate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
730 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
731 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
733 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
735 VERIFY0(zap_remove(mos
, dsobj
, spa_feature_table
[f
].fi_guid
, tx
));
736 spa_feature_decr(spa
, f
, tx
);
740 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
741 uint64_t flags
, dmu_tx_t
*tx
)
743 dsl_pool_t
*dp
= dd
->dd_pool
;
745 dsl_dataset_phys_t
*dsphys
;
747 objset_t
*mos
= dp
->dp_meta_objset
;
750 origin
= dp
->dp_origin_snap
;
752 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
753 ASSERT(origin
== NULL
|| dsl_dataset_phys(origin
)->ds_num_children
> 0);
754 ASSERT(dmu_tx_is_syncing(tx
));
755 ASSERT(dsl_dir_phys(dd
)->dd_head_dataset_obj
== 0);
757 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
758 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
759 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
760 dmu_buf_will_dirty(dbuf
, tx
);
761 dsphys
= dbuf
->db_data
;
762 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
763 dsphys
->ds_dir_obj
= dd
->dd_object
;
764 dsphys
->ds_flags
= flags
;
765 dsphys
->ds_fsid_guid
= unique_create();
766 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
767 sizeof (dsphys
->ds_guid
));
768 dsphys
->ds_snapnames_zapobj
=
769 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
771 dsphys
->ds_creation_time
= gethrestime_sec();
772 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
774 if (origin
== NULL
) {
775 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
778 dsl_dataset_t
*ohds
; /* head of the origin snapshot */
780 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
781 dsphys
->ds_prev_snap_txg
=
782 dsl_dataset_phys(origin
)->ds_creation_txg
;
783 dsphys
->ds_referenced_bytes
=
784 dsl_dataset_phys(origin
)->ds_referenced_bytes
;
785 dsphys
->ds_compressed_bytes
=
786 dsl_dataset_phys(origin
)->ds_compressed_bytes
;
787 dsphys
->ds_uncompressed_bytes
=
788 dsl_dataset_phys(origin
)->ds_uncompressed_bytes
;
789 dsphys
->ds_bp
= dsl_dataset_phys(origin
)->ds_bp
;
792 * Inherit flags that describe the dataset's contents
793 * (INCONSISTENT) or properties (Case Insensitive).
795 dsphys
->ds_flags
|= dsl_dataset_phys(origin
)->ds_flags
&
796 (DS_FLAG_INCONSISTENT
| DS_FLAG_CI_DATASET
);
798 for (f
= 0; f
< SPA_FEATURES
; f
++) {
799 if (origin
->ds_feature_inuse
[f
])
800 dsl_dataset_activate_feature(dsobj
, f
, tx
);
803 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
804 dsl_dataset_phys(origin
)->ds_num_children
++;
806 VERIFY0(dsl_dataset_hold_obj(dp
,
807 dsl_dir_phys(origin
->ds_dir
)->dd_head_dataset_obj
,
809 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
810 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
811 dsl_dataset_rele(ohds
, FTAG
);
813 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
814 if (dsl_dataset_phys(origin
)->ds_next_clones_obj
== 0) {
815 dsl_dataset_phys(origin
)->ds_next_clones_obj
=
817 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
819 VERIFY0(zap_add_int(mos
,
820 dsl_dataset_phys(origin
)->ds_next_clones_obj
,
824 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
825 dsl_dir_phys(dd
)->dd_origin_obj
= origin
->ds_object
;
826 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
827 if (dsl_dir_phys(origin
->ds_dir
)->dd_clones
== 0) {
828 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
829 dsl_dir_phys(origin
->ds_dir
)->dd_clones
=
831 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
833 VERIFY0(zap_add_int(mos
,
834 dsl_dir_phys(origin
->ds_dir
)->dd_clones
,
839 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
840 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
842 dmu_buf_rele(dbuf
, FTAG
);
844 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
845 dsl_dir_phys(dd
)->dd_head_dataset_obj
= dsobj
;
851 dsl_dataset_zero_zil(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
855 VERIFY0(dmu_objset_from_ds(ds
, &os
));
856 bzero(&os
->os_zil_header
, sizeof (os
->os_zil_header
));
857 dsl_dataset_dirty(ds
, tx
);
861 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
862 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
, dmu_tx_t
*tx
)
864 dsl_pool_t
*dp
= pdd
->dd_pool
;
865 uint64_t dsobj
, ddobj
;
868 ASSERT(dmu_tx_is_syncing(tx
));
869 ASSERT(lastname
[0] != '@');
871 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
872 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
874 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
,
875 flags
& ~DS_CREATE_FLAG_NODIRTY
, tx
);
877 dsl_deleg_set_create_perms(dd
, tx
, cr
);
880 * Since we're creating a new node we know it's a leaf, so we can
881 * initialize the counts if the limit feature is active.
883 if (spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_FS_SS_LIMIT
)) {
885 objset_t
*os
= dd
->dd_pool
->dp_meta_objset
;
887 dsl_dir_zapify(dd
, tx
);
888 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_FILESYSTEM_COUNT
,
889 sizeof (cnt
), 1, &cnt
, tx
));
890 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_SNAPSHOT_COUNT
,
891 sizeof (cnt
), 1, &cnt
, tx
));
894 dsl_dir_rele(dd
, FTAG
);
897 * If we are creating a clone, make sure we zero out any stale
898 * data from the origin snapshots zil header.
900 if (origin
!= NULL
&& !(flags
& DS_CREATE_FLAG_NODIRTY
)) {
903 VERIFY0(dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
904 dsl_dataset_zero_zil(ds
, tx
);
905 dsl_dataset_rele(ds
, FTAG
);
912 * The unique space in the head dataset can be calculated by subtracting
913 * the space used in the most recent snapshot, that is still being used
914 * in this file system, from the space currently in use. To figure out
915 * the space in the most recent snapshot still in use, we need to take
916 * the total space used in the snapshot and subtract out the space that
917 * has been freed up since the snapshot was taken.
920 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
923 uint64_t dlused
, dlcomp
, dluncomp
;
925 ASSERT(!ds
->ds_is_snapshot
);
927 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0)
928 mrs_used
= dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
;
932 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
934 ASSERT3U(dlused
, <=, mrs_used
);
935 dsl_dataset_phys(ds
)->ds_unique_bytes
=
936 dsl_dataset_phys(ds
)->ds_referenced_bytes
- (mrs_used
- dlused
);
938 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
939 SPA_VERSION_UNIQUE_ACCURATE
)
940 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
944 dsl_dataset_remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
,
947 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
949 ASSERTV(uint64_t count
);
951 ASSERT(dsl_dataset_phys(ds
)->ds_num_children
>= 2);
952 err
= zap_remove_int(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
955 * The err should not be ENOENT, but a bug in a previous version
956 * of the code could cause upgrade_clones_cb() to not set
957 * ds_next_snap_obj when it should, leading to a missing entry.
958 * If we knew that the pool was created after
959 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
960 * ENOENT. However, at least we can check that we don't have
961 * too many entries in the next_clones_obj even after failing to
966 ASSERT0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
968 ASSERT3U(count
, <=, dsl_dataset_phys(ds
)->ds_num_children
- 2);
973 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
975 return (&dsl_dataset_phys(ds
)->ds_bp
);
979 dsl_dataset_set_blkptr(dsl_dataset_t
*ds
, blkptr_t
*bp
, dmu_tx_t
*tx
)
981 ASSERT(dmu_tx_is_syncing(tx
));
982 /* If it's the meta-objset, set dp_meta_rootbp */
984 tx
->tx_pool
->dp_meta_rootbp
= *bp
;
986 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
987 dsl_dataset_phys(ds
)->ds_bp
= *bp
;
992 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
994 return (ds
->ds_dir
->dd_pool
->dp_spa
);
998 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1002 if (ds
== NULL
) /* this is the meta-objset */
1005 ASSERT(ds
->ds_objset
!= NULL
);
1007 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0)
1008 panic("dirtying snapshot!");
1010 dp
= ds
->ds_dir
->dd_pool
;
1012 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
)) {
1013 /* up the hold count until we can be written out */
1014 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1019 dsl_dataset_is_dirty(dsl_dataset_t
*ds
)
1023 for (t
= 0; t
< TXG_SIZE
; t
++) {
1024 if (txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1032 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1036 if (!dmu_tx_is_syncing(tx
))
1040 * If there's an fs-only reservation, any blocks that might become
1041 * owned by the snapshot dataset must be accommodated by space
1042 * outside of the reservation.
1044 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1045 asize
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
, ds
->ds_reserved
);
1046 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1047 return (SET_ERROR(ENOSPC
));
1050 * Propagate any reserved space for this snapshot to other
1051 * snapshot checks in this sync group.
1054 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1059 typedef struct dsl_dataset_snapshot_arg
{
1060 nvlist_t
*ddsa_snaps
;
1061 nvlist_t
*ddsa_props
;
1062 nvlist_t
*ddsa_errors
;
1064 } dsl_dataset_snapshot_arg_t
;
1067 dsl_dataset_snapshot_check_impl(dsl_dataset_t
*ds
, const char *snapname
,
1068 dmu_tx_t
*tx
, boolean_t recv
, uint64_t cnt
, cred_t
*cr
)
1073 ds
->ds_trysnap_txg
= tx
->tx_txg
;
1075 if (!dmu_tx_is_syncing(tx
))
1079 * We don't allow multiple snapshots of the same txg. If there
1080 * is already one, try again.
1082 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
)
1083 return (SET_ERROR(EAGAIN
));
1086 * Check for conflicting snapshot name.
1088 error
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
1090 return (SET_ERROR(EEXIST
));
1091 if (error
!= ENOENT
)
1095 * We don't allow taking snapshots of inconsistent datasets, such as
1096 * those into which we are currently receiving. However, if we are
1097 * creating this snapshot as part of a receive, this check will be
1098 * executed atomically with respect to the completion of the receive
1099 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1100 * case we ignore this, knowing it will be fixed up for us shortly in
1101 * dmu_recv_end_sync().
1103 if (!recv
&& DS_IS_INCONSISTENT(ds
))
1104 return (SET_ERROR(EBUSY
));
1107 * Skip the check for temporary snapshots or if we have already checked
1108 * the counts in dsl_dataset_snapshot_check. This means we really only
1109 * check the count here when we're receiving a stream.
1111 if (cnt
!= 0 && cr
!= NULL
) {
1112 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1113 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, cr
);
1118 error
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
1126 dsl_dataset_snapshot_check(void *arg
, dmu_tx_t
*tx
)
1128 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1129 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1134 * Pre-compute how many total new snapshots will be created for each
1135 * level in the tree and below. This is needed for validating the
1136 * snapshot limit when either taking a recursive snapshot or when
1137 * taking multiple snapshots.
1139 * The problem is that the counts are not actually adjusted when
1140 * we are checking, only when we finally sync. For a single snapshot,
1141 * this is easy, the count will increase by 1 at each node up the tree,
1142 * but its more complicated for the recursive/multiple snapshot case.
1144 * The dsl_fs_ss_limit_check function does recursively check the count
1145 * at each level up the tree but since it is validating each snapshot
1146 * independently we need to be sure that we are validating the complete
1147 * count for the entire set of snapshots. We do this by rolling up the
1148 * counts for each component of the name into an nvlist and then
1149 * checking each of those cases with the aggregated count.
1151 * This approach properly handles not only the recursive snapshot
1152 * case (where we get all of those on the ddsa_snaps list) but also
1153 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1154 * validate the limit on 'a' using a count of 2).
1156 * We validate the snapshot names in the third loop and only report
1159 if (dmu_tx_is_syncing(tx
)) {
1161 nvlist_t
*cnt_track
= NULL
;
1162 cnt_track
= fnvlist_alloc();
1164 nm
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1166 /* Rollup aggregated counts into the cnt_track list */
1167 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1169 pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1173 (void) strlcpy(nm
, nvpair_name(pair
), MAXPATHLEN
);
1174 pdelim
= strchr(nm
, '@');
1180 if (nvlist_lookup_uint64(cnt_track
, nm
,
1182 /* update existing entry */
1183 fnvlist_add_uint64(cnt_track
, nm
,
1187 fnvlist_add_uint64(cnt_track
, nm
, 1);
1190 pdelim
= strrchr(nm
, '/');
1193 } while (pdelim
!= NULL
);
1196 kmem_free(nm
, MAXPATHLEN
);
1198 /* Check aggregated counts at each level */
1199 for (pair
= nvlist_next_nvpair(cnt_track
, NULL
);
1200 pair
!= NULL
; pair
= nvlist_next_nvpair(cnt_track
, pair
)) {
1206 name
= nvpair_name(pair
);
1207 cnt
= fnvpair_value_uint64(pair
);
1210 error
= dsl_dataset_hold(dp
, name
, FTAG
, &ds
);
1212 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1213 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
1215 dsl_dataset_rele(ds
, FTAG
);
1219 if (ddsa
->ddsa_errors
!= NULL
)
1220 fnvlist_add_int32(ddsa
->ddsa_errors
,
1223 /* only report one error for this check */
1227 nvlist_free(cnt_track
);
1230 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1231 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1235 char dsname
[MAXNAMELEN
];
1237 name
= nvpair_name(pair
);
1238 if (strlen(name
) >= MAXNAMELEN
)
1239 error
= SET_ERROR(ENAMETOOLONG
);
1241 atp
= strchr(name
, '@');
1243 error
= SET_ERROR(EINVAL
);
1245 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1248 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
1250 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1251 error
= dsl_dataset_snapshot_check_impl(ds
,
1252 atp
+ 1, tx
, B_FALSE
, 0, NULL
);
1253 dsl_dataset_rele(ds
, FTAG
);
1257 if (ddsa
->ddsa_errors
!= NULL
) {
1258 fnvlist_add_int32(ddsa
->ddsa_errors
,
1269 dsl_dataset_snapshot_sync_impl(dsl_dataset_t
*ds
, const char *snapname
,
1272 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1274 dsl_dataset_phys_t
*dsphys
;
1275 uint64_t dsobj
, crtxg
;
1276 objset_t
*mos
= dp
->dp_meta_objset
;
1278 ASSERTV(static zil_header_t zero_zil
);
1279 ASSERTV(objset_t
*os
);
1281 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
1284 * If we are on an old pool, the zil must not be active, in which
1285 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1287 ASSERT(spa_version(dmu_tx_pool(tx
)->dp_spa
) >= SPA_VERSION_FAST_SNAP
||
1288 dmu_objset_from_ds(ds
, &os
) != 0 ||
1289 bcmp(&os
->os_phys
->os_zil_header
, &zero_zil
,
1290 sizeof (zero_zil
)) == 0);
1292 dsl_fs_ss_count_adjust(ds
->ds_dir
, 1, DD_FIELD_SNAPSHOT_COUNT
, tx
);
1295 * The origin's ds_creation_txg has to be < TXG_INITIAL
1297 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
1302 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
1303 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
1304 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
1305 dmu_buf_will_dirty(dbuf
, tx
);
1306 dsphys
= dbuf
->db_data
;
1307 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
1308 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
1309 dsphys
->ds_fsid_guid
= unique_create();
1310 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
1311 sizeof (dsphys
->ds_guid
));
1312 dsphys
->ds_prev_snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1313 dsphys
->ds_prev_snap_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1314 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
1315 dsphys
->ds_num_children
= 1;
1316 dsphys
->ds_creation_time
= gethrestime_sec();
1317 dsphys
->ds_creation_txg
= crtxg
;
1318 dsphys
->ds_deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
1319 dsphys
->ds_referenced_bytes
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1320 dsphys
->ds_compressed_bytes
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1321 dsphys
->ds_uncompressed_bytes
=
1322 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1323 dsphys
->ds_flags
= dsl_dataset_phys(ds
)->ds_flags
;
1324 dsphys
->ds_bp
= dsl_dataset_phys(ds
)->ds_bp
;
1325 dmu_buf_rele(dbuf
, FTAG
);
1327 for (f
= 0; f
< SPA_FEATURES
; f
++) {
1328 if (ds
->ds_feature_inuse
[f
])
1329 dsl_dataset_activate_feature(dsobj
, f
, tx
);
1332 ASSERT3U(ds
->ds_prev
!= 0, ==,
1333 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
1335 uint64_t next_clones_obj
=
1336 dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
;
1337 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1339 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 1);
1340 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1342 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1343 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
1344 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_txg
);
1345 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
= dsobj
;
1346 } else if (next_clones_obj
!= 0) {
1347 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
1348 dsphys
->ds_next_snap_obj
, tx
);
1349 VERIFY0(zap_add_int(mos
,
1350 next_clones_obj
, dsobj
, tx
));
1355 * If we have a reference-reservation on this dataset, we will
1356 * need to increase the amount of refreservation being charged
1357 * since our unique space is going to zero.
1359 if (ds
->ds_reserved
) {
1361 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
1362 delta
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
,
1364 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
1368 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1369 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
1370 dsl_deadlist_clone(&ds
->ds_deadlist
, UINT64_MAX
,
1371 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
1372 dsl_deadlist_close(&ds
->ds_deadlist
);
1373 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
1374 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
1375 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1376 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
1378 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, <, tx
->tx_txg
);
1379 dsl_dataset_phys(ds
)->ds_prev_snap_obj
= dsobj
;
1380 dsl_dataset_phys(ds
)->ds_prev_snap_txg
= crtxg
;
1381 dsl_dataset_phys(ds
)->ds_unique_bytes
= 0;
1382 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
1383 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1385 VERIFY0(zap_add(mos
, dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1386 snapname
, 8, 1, &dsobj
, tx
));
1389 dsl_dataset_rele(ds
->ds_prev
, ds
);
1390 VERIFY0(dsl_dataset_hold_obj(dp
,
1391 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
1393 dsl_scan_ds_snapshotted(ds
, tx
);
1395 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
1397 spa_history_log_internal_ds(ds
->ds_prev
, "snapshot", tx
, "");
1401 dsl_dataset_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
1403 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1404 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1407 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1408 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1411 char dsname
[MAXNAMELEN
];
1413 name
= nvpair_name(pair
);
1414 atp
= strchr(name
, '@');
1415 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1416 VERIFY0(dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
));
1418 dsl_dataset_snapshot_sync_impl(ds
, atp
+ 1, tx
);
1419 if (ddsa
->ddsa_props
!= NULL
) {
1420 dsl_props_set_sync_impl(ds
->ds_prev
,
1421 ZPROP_SRC_LOCAL
, ddsa
->ddsa_props
, tx
);
1423 dsl_dataset_rele(ds
, FTAG
);
1428 * The snapshots must all be in the same pool.
1429 * All-or-nothing: if there are any failures, nothing will be modified.
1432 dsl_dataset_snapshot(nvlist_t
*snaps
, nvlist_t
*props
, nvlist_t
*errors
)
1434 dsl_dataset_snapshot_arg_t ddsa
;
1436 boolean_t needsuspend
;
1440 nvlist_t
*suspended
= NULL
;
1442 pair
= nvlist_next_nvpair(snaps
, NULL
);
1445 firstname
= nvpair_name(pair
);
1447 error
= spa_open(firstname
, &spa
, FTAG
);
1450 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1451 spa_close(spa
, FTAG
);
1454 suspended
= fnvlist_alloc();
1455 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
1456 pair
= nvlist_next_nvpair(snaps
, pair
)) {
1457 char fsname
[MAXNAMELEN
];
1458 char *snapname
= nvpair_name(pair
);
1462 atp
= strchr(snapname
, '@');
1464 error
= SET_ERROR(EINVAL
);
1467 (void) strlcpy(fsname
, snapname
, atp
- snapname
+ 1);
1469 error
= zil_suspend(fsname
, &cookie
);
1472 fnvlist_add_uint64(suspended
, fsname
,
1477 ddsa
.ddsa_snaps
= snaps
;
1478 ddsa
.ddsa_props
= props
;
1479 ddsa
.ddsa_errors
= errors
;
1480 ddsa
.ddsa_cr
= CRED();
1483 error
= dsl_sync_task(firstname
, dsl_dataset_snapshot_check
,
1484 dsl_dataset_snapshot_sync
, &ddsa
,
1485 fnvlist_num_pairs(snaps
) * 3, ZFS_SPACE_CHECK_NORMAL
);
1488 if (suspended
!= NULL
) {
1489 for (pair
= nvlist_next_nvpair(suspended
, NULL
); pair
!= NULL
;
1490 pair
= nvlist_next_nvpair(suspended
, pair
)) {
1491 zil_resume((void *)(uintptr_t)
1492 fnvpair_value_uint64(pair
));
1494 fnvlist_free(suspended
);
1499 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
1500 pair
= nvlist_next_nvpair(snaps
, pair
)) {
1501 char *snapname
= nvpair_name(pair
);
1502 zvol_create_minors(snapname
);
1510 typedef struct dsl_dataset_snapshot_tmp_arg
{
1511 const char *ddsta_fsname
;
1512 const char *ddsta_snapname
;
1513 minor_t ddsta_cleanup_minor
;
1514 const char *ddsta_htag
;
1515 } dsl_dataset_snapshot_tmp_arg_t
;
1518 dsl_dataset_snapshot_tmp_check(void *arg
, dmu_tx_t
*tx
)
1520 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1521 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1525 error
= dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
);
1529 /* NULL cred means no limit check for tmp snapshot */
1530 error
= dsl_dataset_snapshot_check_impl(ds
, ddsta
->ddsta_snapname
,
1531 tx
, B_FALSE
, 0, NULL
);
1533 dsl_dataset_rele(ds
, FTAG
);
1537 if (spa_version(dp
->dp_spa
) < SPA_VERSION_USERREFS
) {
1538 dsl_dataset_rele(ds
, FTAG
);
1539 return (SET_ERROR(ENOTSUP
));
1541 error
= dsl_dataset_user_hold_check_one(NULL
, ddsta
->ddsta_htag
,
1544 dsl_dataset_rele(ds
, FTAG
);
1548 dsl_dataset_rele(ds
, FTAG
);
1553 dsl_dataset_snapshot_tmp_sync(void *arg
, dmu_tx_t
*tx
)
1555 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1556 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1559 VERIFY0(dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
));
1561 dsl_dataset_snapshot_sync_impl(ds
, ddsta
->ddsta_snapname
, tx
);
1562 dsl_dataset_user_hold_sync_one(ds
->ds_prev
, ddsta
->ddsta_htag
,
1563 ddsta
->ddsta_cleanup_minor
, gethrestime_sec(), tx
);
1564 dsl_destroy_snapshot_sync_impl(ds
->ds_prev
, B_TRUE
, tx
);
1566 dsl_dataset_rele(ds
, FTAG
);
1570 dsl_dataset_snapshot_tmp(const char *fsname
, const char *snapname
,
1571 minor_t cleanup_minor
, const char *htag
)
1573 dsl_dataset_snapshot_tmp_arg_t ddsta
;
1576 boolean_t needsuspend
;
1579 ddsta
.ddsta_fsname
= fsname
;
1580 ddsta
.ddsta_snapname
= snapname
;
1581 ddsta
.ddsta_cleanup_minor
= cleanup_minor
;
1582 ddsta
.ddsta_htag
= htag
;
1584 error
= spa_open(fsname
, &spa
, FTAG
);
1587 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1588 spa_close(spa
, FTAG
);
1591 error
= zil_suspend(fsname
, &cookie
);
1596 error
= dsl_sync_task(fsname
, dsl_dataset_snapshot_tmp_check
,
1597 dsl_dataset_snapshot_tmp_sync
, &ddsta
, 3, ZFS_SPACE_CHECK_RESERVED
);
1606 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*zio
, dmu_tx_t
*tx
)
1610 ASSERT(dmu_tx_is_syncing(tx
));
1611 ASSERT(ds
->ds_objset
!= NULL
);
1612 ASSERT(dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0);
1615 * in case we had to change ds_fsid_guid when we opened it,
1618 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1619 dsl_dataset_phys(ds
)->ds_fsid_guid
= ds
->ds_fsid_guid
;
1621 dmu_objset_sync(ds
->ds_objset
, zio
, tx
);
1623 for (f
= 0; f
< SPA_FEATURES
; f
++) {
1624 if (ds
->ds_feature_activation_needed
[f
]) {
1625 if (ds
->ds_feature_inuse
[f
])
1627 dsl_dataset_activate_feature(ds
->ds_object
, f
, tx
);
1628 ds
->ds_feature_inuse
[f
] = B_TRUE
;
1634 get_clones_stat(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1637 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1640 nvlist_t
*propval
= fnvlist_alloc();
1641 nvlist_t
*val
= fnvlist_alloc();
1643 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1646 * There may be missing entries in ds_next_clones_obj
1647 * due to a bug in a previous version of the code.
1648 * Only trust it if it has the right number of entries.
1650 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
1651 VERIFY0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1654 if (count
!= dsl_dataset_phys(ds
)->ds_num_children
- 1)
1656 for (zap_cursor_init(&zc
, mos
,
1657 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
1658 zap_cursor_retrieve(&zc
, &za
) == 0;
1659 zap_cursor_advance(&zc
)) {
1660 dsl_dataset_t
*clone
;
1661 char buf
[ZFS_MAXNAMELEN
];
1662 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
1663 za
.za_first_integer
, FTAG
, &clone
));
1664 dsl_dir_name(clone
->ds_dir
, buf
);
1665 fnvlist_add_boolean(val
, buf
);
1666 dsl_dataset_rele(clone
, FTAG
);
1668 zap_cursor_fini(&zc
);
1669 fnvlist_add_nvlist(propval
, ZPROP_VALUE
, val
);
1670 fnvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_CLONES
), propval
);
1673 nvlist_free(propval
);
1677 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1679 uint64_t refd
, avail
, uobjs
, aobjs
, ratio
;
1680 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
1682 ASSERT(dsl_pool_config_held(dp
));
1684 ratio
= dsl_dataset_phys(ds
)->ds_compressed_bytes
== 0 ? 100 :
1685 (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
* 100 /
1686 dsl_dataset_phys(ds
)->ds_compressed_bytes
);
1688 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
, ratio
);
1689 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_LOGICALREFERENCED
,
1690 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
);
1692 if (ds
->ds_is_snapshot
) {
1693 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
, ratio
);
1694 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
1695 dsl_dataset_phys(ds
)->ds_unique_bytes
);
1696 get_clones_stat(ds
, nv
);
1698 dsl_dir_stats(ds
->ds_dir
, nv
);
1701 dsl_dataset_space(ds
, &refd
, &avail
, &uobjs
, &aobjs
);
1702 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
, avail
);
1703 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
, refd
);
1705 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
1706 dsl_dataset_phys(ds
)->ds_creation_time
);
1707 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
1708 dsl_dataset_phys(ds
)->ds_creation_txg
);
1709 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
1711 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
1713 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
1714 dsl_dataset_phys(ds
)->ds_guid
);
1715 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
1716 dsl_dataset_phys(ds
)->ds_unique_bytes
);
1717 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
1719 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
1721 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
1722 DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
1724 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
1725 uint64_t written
, comp
, uncomp
;
1726 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1727 dsl_dataset_t
*prev
;
1730 err
= dsl_dataset_hold_obj(dp
,
1731 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
1733 err
= dsl_dataset_space_written(prev
, ds
, &written
,
1735 dsl_dataset_rele(prev
, FTAG
);
1737 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_WRITTEN
,
1746 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
1748 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1749 ASSERT(dsl_pool_config_held(dp
));
1751 stat
->dds_creation_txg
= dsl_dataset_phys(ds
)->ds_creation_txg
;
1752 stat
->dds_inconsistent
=
1753 dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_INCONSISTENT
;
1754 stat
->dds_guid
= dsl_dataset_phys(ds
)->ds_guid
;
1755 stat
->dds_origin
[0] = '\0';
1756 if (ds
->ds_is_snapshot
) {
1757 stat
->dds_is_snapshot
= B_TRUE
;
1758 stat
->dds_num_clones
=
1759 dsl_dataset_phys(ds
)->ds_num_children
- 1;
1761 stat
->dds_is_snapshot
= B_FALSE
;
1762 stat
->dds_num_clones
= 0;
1764 if (dsl_dir_is_clone(ds
->ds_dir
)) {
1767 VERIFY0(dsl_dataset_hold_obj(dp
,
1768 dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
,
1770 dsl_dataset_name(ods
, stat
->dds_origin
);
1771 dsl_dataset_rele(ods
, FTAG
);
1777 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
1779 return (ds
->ds_fsid_guid
);
1783 dsl_dataset_space(dsl_dataset_t
*ds
,
1784 uint64_t *refdbytesp
, uint64_t *availbytesp
,
1785 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1787 *refdbytesp
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1788 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
1789 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
)
1791 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
1792 if (ds
->ds_quota
!= 0) {
1794 * Adjust available bytes according to refquota
1796 if (*refdbytesp
< ds
->ds_quota
)
1797 *availbytesp
= MIN(*availbytesp
,
1798 ds
->ds_quota
- *refdbytesp
);
1802 *usedobjsp
= BP_GET_FILL(&dsl_dataset_phys(ds
)->ds_bp
);
1803 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
1807 dsl_dataset_modified_since_snap(dsl_dataset_t
*ds
, dsl_dataset_t
*snap
)
1809 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1812 if (dsl_dataset_phys(ds
)->ds_bp
.blk_birth
>
1813 dsl_dataset_phys(snap
)->ds_creation_txg
) {
1814 objset_t
*os
, *os_snap
;
1816 * It may be that only the ZIL differs, because it was
1817 * reset in the head. Don't count that as being
1820 if (dmu_objset_from_ds(ds
, &os
) != 0)
1822 if (dmu_objset_from_ds(snap
, &os_snap
) != 0)
1824 return (bcmp(&os
->os_phys
->os_meta_dnode
,
1825 &os_snap
->os_phys
->os_meta_dnode
,
1826 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
1831 typedef struct dsl_dataset_rename_snapshot_arg
{
1832 const char *ddrsa_fsname
;
1833 const char *ddrsa_oldsnapname
;
1834 const char *ddrsa_newsnapname
;
1835 boolean_t ddrsa_recursive
;
1837 } dsl_dataset_rename_snapshot_arg_t
;
1841 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t
*dp
,
1842 dsl_dataset_t
*hds
, void *arg
)
1844 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
1848 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
1850 /* ignore nonexistent snapshots */
1851 return (error
== ENOENT
? 0 : error
);
1854 /* new name should not exist */
1855 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_newsnapname
, &val
);
1857 error
= SET_ERROR(EEXIST
);
1858 else if (error
== ENOENT
)
1861 /* dataset name + 1 for the "@" + the new snapshot name must fit */
1862 if (dsl_dir_namelen(hds
->ds_dir
) + 1 +
1863 strlen(ddrsa
->ddrsa_newsnapname
) >= MAXNAMELEN
)
1864 error
= SET_ERROR(ENAMETOOLONG
);
1870 dsl_dataset_rename_snapshot_check(void *arg
, dmu_tx_t
*tx
)
1872 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
1873 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1877 error
= dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
);
1881 if (ddrsa
->ddrsa_recursive
) {
1882 error
= dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
1883 dsl_dataset_rename_snapshot_check_impl
, ddrsa
,
1886 error
= dsl_dataset_rename_snapshot_check_impl(dp
, hds
, ddrsa
);
1888 dsl_dataset_rele(hds
, FTAG
);
1893 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t
*dp
,
1894 dsl_dataset_t
*hds
, void *arg
)
1896 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
1899 dmu_tx_t
*tx
= ddrsa
->ddrsa_tx
;
1902 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
1903 ASSERT(error
== 0 || error
== ENOENT
);
1904 if (error
== ENOENT
) {
1905 /* ignore nonexistent snapshots */
1909 VERIFY0(dsl_dataset_hold_obj(dp
, val
, FTAG
, &ds
));
1911 /* log before we change the name */
1912 spa_history_log_internal_ds(ds
, "rename", tx
,
1913 "-> @%s", ddrsa
->ddrsa_newsnapname
);
1915 VERIFY0(dsl_dataset_snap_remove(hds
, ddrsa
->ddrsa_oldsnapname
, tx
,
1917 mutex_enter(&ds
->ds_lock
);
1918 (void) strcpy(ds
->ds_snapname
, ddrsa
->ddrsa_newsnapname
);
1919 mutex_exit(&ds
->ds_lock
);
1920 VERIFY0(zap_add(dp
->dp_meta_objset
,
1921 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
,
1922 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
));
1924 dsl_dataset_rele(ds
, FTAG
);
1929 dsl_dataset_rename_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
1931 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
1932 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1935 VERIFY0(dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
));
1936 ddrsa
->ddrsa_tx
= tx
;
1937 if (ddrsa
->ddrsa_recursive
) {
1938 VERIFY0(dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
1939 dsl_dataset_rename_snapshot_sync_impl
, ddrsa
,
1942 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp
, hds
, ddrsa
));
1944 dsl_dataset_rele(hds
, FTAG
);
1948 dsl_dataset_rename_snapshot(const char *fsname
,
1949 const char *oldsnapname
, const char *newsnapname
, boolean_t recursive
)
1952 char *oldname
, *newname
;
1956 dsl_dataset_rename_snapshot_arg_t ddrsa
;
1958 ddrsa
.ddrsa_fsname
= fsname
;
1959 ddrsa
.ddrsa_oldsnapname
= oldsnapname
;
1960 ddrsa
.ddrsa_newsnapname
= newsnapname
;
1961 ddrsa
.ddrsa_recursive
= recursive
;
1963 error
= dsl_sync_task(fsname
, dsl_dataset_rename_snapshot_check
,
1964 dsl_dataset_rename_snapshot_sync
, &ddrsa
,
1965 1, ZFS_SPACE_CHECK_RESERVED
);
1968 return (SET_ERROR(error
));
1971 oldname
= kmem_asprintf("%s@%s", fsname
, oldsnapname
);
1972 newname
= kmem_asprintf("%s@%s", fsname
, newsnapname
);
1973 zvol_rename_minors(oldname
, newname
);
1982 * If we're doing an ownership handoff, we need to make sure that there is
1983 * only one long hold on the dataset. We're not allowed to change anything here
1984 * so we don't permanently release the long hold or regular hold here. We want
1985 * to do this only when syncing to avoid the dataset unexpectedly going away
1986 * when we release the long hold.
1989 dsl_dataset_handoff_check(dsl_dataset_t
*ds
, void *owner
, dmu_tx_t
*tx
)
1993 if (!dmu_tx_is_syncing(tx
))
1996 if (owner
!= NULL
) {
1997 VERIFY3P(ds
->ds_owner
, ==, owner
);
1998 dsl_dataset_long_rele(ds
, owner
);
2001 held
= dsl_dataset_long_held(ds
);
2004 dsl_dataset_long_hold(ds
, owner
);
2007 return (SET_ERROR(EBUSY
));
2012 typedef struct dsl_dataset_rollback_arg
{
2013 const char *ddra_fsname
;
2015 nvlist_t
*ddra_result
;
2016 } dsl_dataset_rollback_arg_t
;
2019 dsl_dataset_rollback_check(void *arg
, dmu_tx_t
*tx
)
2021 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2022 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2024 int64_t unused_refres_delta
;
2027 nvlist_t
*proprequest
, *bookmarks
;
2029 error
= dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
);
2033 /* must not be a snapshot */
2034 if (ds
->ds_is_snapshot
) {
2035 dsl_dataset_rele(ds
, FTAG
);
2036 return (SET_ERROR(EINVAL
));
2039 /* must have a most recent snapshot */
2040 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
< TXG_INITIAL
) {
2041 dsl_dataset_rele(ds
, FTAG
);
2042 return (SET_ERROR(EINVAL
));
2045 /* must not have any bookmarks after the most recent snapshot */
2046 proprequest
= fnvlist_alloc();
2047 fnvlist_add_boolean(proprequest
, zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2048 bookmarks
= fnvlist_alloc();
2049 error
= dsl_get_bookmarks_impl(ds
, proprequest
, bookmarks
);
2050 fnvlist_free(proprequest
);
2053 for (pair
= nvlist_next_nvpair(bookmarks
, NULL
);
2054 pair
!= NULL
; pair
= nvlist_next_nvpair(bookmarks
, pair
)) {
2056 fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair
),
2057 zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2058 uint64_t createtxg
= fnvlist_lookup_uint64(valuenv
, "value");
2059 if (createtxg
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
2060 fnvlist_free(bookmarks
);
2061 dsl_dataset_rele(ds
, FTAG
);
2062 return (SET_ERROR(EEXIST
));
2065 fnvlist_free(bookmarks
);
2067 error
= dsl_dataset_handoff_check(ds
, ddra
->ddra_owner
, tx
);
2069 dsl_dataset_rele(ds
, FTAG
);
2074 * Check if the snap we are rolling back to uses more than
2077 if (ds
->ds_quota
!= 0 &&
2078 dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
> ds
->ds_quota
) {
2079 dsl_dataset_rele(ds
, FTAG
);
2080 return (SET_ERROR(EDQUOT
));
2084 * When we do the clone swap, we will temporarily use more space
2085 * due to the refreservation (the head will no longer have any
2086 * unique space, so the entire amount of the refreservation will need
2087 * to be free). We will immediately destroy the clone, freeing
2088 * this space, but the freeing happens over many txg's.
2090 unused_refres_delta
= (int64_t)MIN(ds
->ds_reserved
,
2091 dsl_dataset_phys(ds
)->ds_unique_bytes
);
2093 if (unused_refres_delta
> 0 &&
2094 unused_refres_delta
>
2095 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
)) {
2096 dsl_dataset_rele(ds
, FTAG
);
2097 return (SET_ERROR(ENOSPC
));
2100 dsl_dataset_rele(ds
, FTAG
);
2105 dsl_dataset_rollback_sync(void *arg
, dmu_tx_t
*tx
)
2107 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2108 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2109 dsl_dataset_t
*ds
, *clone
;
2111 char namebuf
[ZFS_MAXNAMELEN
];
2113 VERIFY0(dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
));
2115 dsl_dataset_name(ds
->ds_prev
, namebuf
);
2116 fnvlist_add_string(ddra
->ddra_result
, "target", namebuf
);
2118 cloneobj
= dsl_dataset_create_sync(ds
->ds_dir
, "%rollback",
2119 ds
->ds_prev
, DS_CREATE_FLAG_NODIRTY
, kcred
, tx
);
2121 VERIFY0(dsl_dataset_hold_obj(dp
, cloneobj
, FTAG
, &clone
));
2123 dsl_dataset_clone_swap_sync_impl(clone
, ds
, tx
);
2124 dsl_dataset_zero_zil(ds
, tx
);
2126 dsl_destroy_head_sync_impl(clone
, tx
);
2128 dsl_dataset_rele(clone
, FTAG
);
2129 dsl_dataset_rele(ds
, FTAG
);
2133 * Rolls back the given filesystem or volume to the most recent snapshot.
2134 * The name of the most recent snapshot will be returned under key "target"
2135 * in the result nvlist.
2138 * - The existing dataset MUST be owned by the specified owner at entry
2139 * - Upon return, dataset will still be held by the same owner, whether we
2142 * This mode is required any time the existing filesystem is mounted. See
2143 * notes above zfs_suspend_fs() for further details.
2146 dsl_dataset_rollback(const char *fsname
, void *owner
, nvlist_t
*result
)
2148 dsl_dataset_rollback_arg_t ddra
;
2150 ddra
.ddra_fsname
= fsname
;
2151 ddra
.ddra_owner
= owner
;
2152 ddra
.ddra_result
= result
;
2154 return (dsl_sync_task(fsname
, dsl_dataset_rollback_check
,
2155 dsl_dataset_rollback_sync
, &ddra
,
2156 1, ZFS_SPACE_CHECK_RESERVED
));
2159 struct promotenode
{
2164 typedef struct dsl_dataset_promote_arg
{
2165 const char *ddpa_clonename
;
2166 dsl_dataset_t
*ddpa_clone
;
2167 list_t shared_snaps
, origin_snaps
, clone_snaps
;
2168 dsl_dataset_t
*origin_origin
; /* origin of the origin */
2169 uint64_t used
, comp
, uncomp
, unique
, cloneusedsnap
, originusedsnap
;
2172 } dsl_dataset_promote_arg_t
;
2174 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
2175 static int promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
,
2177 static void promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
);
2180 dsl_dataset_promote_check(void *arg
, dmu_tx_t
*tx
)
2182 dsl_dataset_promote_arg_t
*ddpa
= arg
;
2183 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2185 struct promotenode
*snap
;
2186 dsl_dataset_t
*origin_ds
;
2190 size_t max_snap_len
;
2192 err
= promote_hold(ddpa
, dp
, FTAG
);
2196 hds
= ddpa
->ddpa_clone
;
2197 max_snap_len
= MAXNAMELEN
- strlen(ddpa
->ddpa_clonename
) - 1;
2199 if (dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
) {
2200 promote_rele(ddpa
, FTAG
);
2201 return (SET_ERROR(EXDEV
));
2205 * Compute and check the amount of space to transfer. Since this is
2206 * so expensive, don't do the preliminary check.
2208 if (!dmu_tx_is_syncing(tx
)) {
2209 promote_rele(ddpa
, FTAG
);
2213 snap
= list_head(&ddpa
->shared_snaps
);
2214 origin_ds
= snap
->ds
;
2216 /* compute origin's new unique space */
2217 snap
= list_tail(&ddpa
->clone_snaps
);
2218 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
2219 origin_ds
->ds_object
);
2220 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2221 dsl_dataset_phys(origin_ds
)->ds_prev_snap_txg
, UINT64_MAX
,
2222 &ddpa
->unique
, &unused
, &unused
);
2225 * Walk the snapshots that we are moving
2227 * Compute space to transfer. Consider the incremental changes
2228 * to used by each snapshot:
2229 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2230 * So each snapshot gave birth to:
2231 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2232 * So a sequence would look like:
2233 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2234 * Which simplifies to:
2235 * uN + kN + kN-1 + ... + k1 + k0
2236 * Note however, if we stop before we reach the ORIGIN we get:
2237 * uN + kN + kN-1 + ... + kM - uM-1
2240 ddpa
->used
= dsl_dataset_phys(origin_ds
)->ds_referenced_bytes
;
2241 ddpa
->comp
= dsl_dataset_phys(origin_ds
)->ds_compressed_bytes
;
2242 ddpa
->uncomp
= dsl_dataset_phys(origin_ds
)->ds_uncompressed_bytes
;
2243 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
2244 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
2245 uint64_t val
, dlused
, dlcomp
, dluncomp
;
2246 dsl_dataset_t
*ds
= snap
->ds
;
2251 * If there are long holds, we won't be able to evict
2254 if (dsl_dataset_long_held(ds
)) {
2255 err
= SET_ERROR(EBUSY
);
2259 /* Check that the snapshot name does not conflict */
2260 VERIFY0(dsl_dataset_get_snapname(ds
));
2261 if (strlen(ds
->ds_snapname
) >= max_snap_len
) {
2262 err
= SET_ERROR(ENAMETOOLONG
);
2265 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
2267 (void) strcpy(ddpa
->err_ds
, snap
->ds
->ds_snapname
);
2268 err
= SET_ERROR(EEXIST
);
2274 /* The very first snapshot does not have a deadlist */
2275 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
== 0)
2278 dsl_deadlist_space(&ds
->ds_deadlist
,
2279 &dlused
, &dlcomp
, &dluncomp
);
2280 ddpa
->used
+= dlused
;
2281 ddpa
->comp
+= dlcomp
;
2282 ddpa
->uncomp
+= dluncomp
;
2286 * If we are a clone of a clone then we never reached ORIGIN,
2287 * so we need to subtract out the clone origin's used space.
2289 if (ddpa
->origin_origin
) {
2291 dsl_dataset_phys(ddpa
->origin_origin
)->ds_referenced_bytes
;
2293 dsl_dataset_phys(ddpa
->origin_origin
)->ds_compressed_bytes
;
2295 dsl_dataset_phys(ddpa
->origin_origin
)->
2296 ds_uncompressed_bytes
;
2299 /* Check that there is enough space and limit headroom here */
2300 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
2301 0, ss_mv_cnt
, ddpa
->used
, ddpa
->cr
);
2306 * Compute the amounts of space that will be used by snapshots
2307 * after the promotion (for both origin and clone). For each,
2308 * it is the amount of space that will be on all of their
2309 * deadlists (that was not born before their new origin).
2311 if (dsl_dir_phys(hds
->ds_dir
)->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2315 * Note, typically this will not be a clone of a clone,
2316 * so dd_origin_txg will be < TXG_INITIAL, so
2317 * these snaplist_space() -> dsl_deadlist_space_range()
2318 * calls will be fast because they do not have to
2319 * iterate over all bps.
2321 snap
= list_head(&ddpa
->origin_snaps
);
2322 err
= snaplist_space(&ddpa
->shared_snaps
,
2323 snap
->ds
->ds_dir
->dd_origin_txg
, &ddpa
->cloneusedsnap
);
2327 err
= snaplist_space(&ddpa
->clone_snaps
,
2328 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
2331 ddpa
->cloneusedsnap
+= space
;
2333 if (dsl_dir_phys(origin_ds
->ds_dir
)->dd_flags
&
2334 DD_FLAG_USED_BREAKDOWN
) {
2335 err
= snaplist_space(&ddpa
->origin_snaps
,
2336 dsl_dataset_phys(origin_ds
)->ds_creation_txg
,
2337 &ddpa
->originusedsnap
);
2343 promote_rele(ddpa
, FTAG
);
2348 dsl_dataset_promote_sync(void *arg
, dmu_tx_t
*tx
)
2350 dsl_dataset_promote_arg_t
*ddpa
= arg
;
2351 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2353 struct promotenode
*snap
;
2354 dsl_dataset_t
*origin_ds
;
2355 dsl_dataset_t
*origin_head
;
2357 dsl_dir_t
*odd
= NULL
;
2358 uint64_t oldnext_obj
;
2361 VERIFY0(promote_hold(ddpa
, dp
, FTAG
));
2362 hds
= ddpa
->ddpa_clone
;
2364 ASSERT0(dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
);
2366 snap
= list_head(&ddpa
->shared_snaps
);
2367 origin_ds
= snap
->ds
;
2370 snap
= list_head(&ddpa
->origin_snaps
);
2371 origin_head
= snap
->ds
;
2374 * We need to explicitly open odd, since origin_ds's dd will be
2377 VERIFY0(dsl_dir_hold_obj(dp
, origin_ds
->ds_dir
->dd_object
,
2380 /* change origin's next snap */
2381 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
2382 oldnext_obj
= dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
;
2383 snap
= list_tail(&ddpa
->clone_snaps
);
2384 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
2385 origin_ds
->ds_object
);
2386 dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
= snap
->ds
->ds_object
;
2388 /* change the origin's next clone */
2389 if (dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
) {
2390 dsl_dataset_remove_from_next_clones(origin_ds
,
2391 snap
->ds
->ds_object
, tx
);
2392 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2393 dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
,
2398 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
2399 ASSERT3U(dsl_dir_phys(dd
)->dd_origin_obj
, ==, origin_ds
->ds_object
);
2400 dsl_dir_phys(dd
)->dd_origin_obj
= dsl_dir_phys(odd
)->dd_origin_obj
;
2401 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
2402 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
2403 dsl_dir_phys(odd
)->dd_origin_obj
= origin_ds
->ds_object
;
2404 origin_head
->ds_dir
->dd_origin_txg
=
2405 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
2407 /* change dd_clone entries */
2408 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2409 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2410 dsl_dir_phys(odd
)->dd_clones
, hds
->ds_object
, tx
));
2411 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2412 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
2413 hds
->ds_object
, tx
));
2415 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2416 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
2417 origin_head
->ds_object
, tx
));
2418 if (dsl_dir_phys(dd
)->dd_clones
== 0) {
2419 dsl_dir_phys(dd
)->dd_clones
=
2420 zap_create(dp
->dp_meta_objset
, DMU_OT_DSL_CLONES
,
2421 DMU_OT_NONE
, 0, tx
);
2423 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2424 dsl_dir_phys(dd
)->dd_clones
, origin_head
->ds_object
, tx
));
2427 /* move snapshots to this dir */
2428 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
2429 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
2430 dsl_dataset_t
*ds
= snap
->ds
;
2433 * Property callbacks are registered to a particular
2434 * dsl_dir. Since ours is changing, evict the objset
2435 * so that they will be unregistered from the old dsl_dir.
2437 if (ds
->ds_objset
) {
2438 dmu_objset_evict(ds
->ds_objset
);
2439 ds
->ds_objset
= NULL
;
2442 /* move snap name entry */
2443 VERIFY0(dsl_dataset_get_snapname(ds
));
2444 VERIFY0(dsl_dataset_snap_remove(origin_head
,
2445 ds
->ds_snapname
, tx
, B_TRUE
));
2446 VERIFY0(zap_add(dp
->dp_meta_objset
,
2447 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
, ds
->ds_snapname
,
2448 8, 1, &ds
->ds_object
, tx
));
2449 dsl_fs_ss_count_adjust(hds
->ds_dir
, 1,
2450 DD_FIELD_SNAPSHOT_COUNT
, tx
);
2452 /* change containing dsl_dir */
2453 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2454 ASSERT3U(dsl_dataset_phys(ds
)->ds_dir_obj
, ==, odd
->dd_object
);
2455 dsl_dataset_phys(ds
)->ds_dir_obj
= dd
->dd_object
;
2456 ASSERT3P(ds
->ds_dir
, ==, odd
);
2457 dsl_dir_rele(ds
->ds_dir
, ds
);
2458 VERIFY0(dsl_dir_hold_obj(dp
, dd
->dd_object
,
2459 NULL
, ds
, &ds
->ds_dir
));
2461 /* move any clone references */
2462 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
&&
2463 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2467 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2468 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2469 zap_cursor_retrieve(&zc
, &za
) == 0;
2470 zap_cursor_advance(&zc
)) {
2471 dsl_dataset_t
*cnds
;
2474 if (za
.za_first_integer
== oldnext_obj
) {
2476 * We've already moved the
2477 * origin's reference.
2482 VERIFY0(dsl_dataset_hold_obj(dp
,
2483 za
.za_first_integer
, FTAG
, &cnds
));
2484 o
= dsl_dir_phys(cnds
->ds_dir
)->
2485 dd_head_dataset_obj
;
2487 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2488 dsl_dir_phys(odd
)->dd_clones
, o
, tx
));
2489 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2490 dsl_dir_phys(dd
)->dd_clones
, o
, tx
));
2491 dsl_dataset_rele(cnds
, FTAG
);
2493 zap_cursor_fini(&zc
);
2496 ASSERT(!dsl_prop_hascb(ds
));
2500 * Change space accounting.
2501 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2502 * both be valid, or both be 0 (resulting in delta == 0). This
2503 * is true for each of {clone,origin} independently.
2506 delta
= ddpa
->cloneusedsnap
-
2507 dsl_dir_phys(dd
)->dd_used_breakdown
[DD_USED_SNAP
];
2508 ASSERT3S(delta
, >=, 0);
2509 ASSERT3U(ddpa
->used
, >=, delta
);
2510 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2511 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
2512 ddpa
->used
- delta
, ddpa
->comp
, ddpa
->uncomp
, tx
);
2514 delta
= ddpa
->originusedsnap
-
2515 dsl_dir_phys(odd
)->dd_used_breakdown
[DD_USED_SNAP
];
2516 ASSERT3S(delta
, <=, 0);
2517 ASSERT3U(ddpa
->used
, >=, -delta
);
2518 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2519 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
2520 -ddpa
->used
- delta
, -ddpa
->comp
, -ddpa
->uncomp
, tx
);
2522 dsl_dataset_phys(origin_ds
)->ds_unique_bytes
= ddpa
->unique
;
2524 /* log history record */
2525 spa_history_log_internal_ds(hds
, "promote", tx
, "");
2527 dsl_dir_rele(odd
, FTAG
);
2528 promote_rele(ddpa
, FTAG
);
2532 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2533 * (exclusive) and last_obj (inclusive). The list will be in reverse
2534 * order (last_obj will be the list_head()). If first_obj == 0, do all
2535 * snapshots back to this dataset's origin.
2538 snaplist_make(dsl_pool_t
*dp
,
2539 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
, void *tag
)
2541 uint64_t obj
= last_obj
;
2543 list_create(l
, sizeof (struct promotenode
),
2544 offsetof(struct promotenode
, link
));
2546 while (obj
!= first_obj
) {
2548 struct promotenode
*snap
;
2551 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
2552 ASSERT(err
!= ENOENT
);
2557 first_obj
= dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
;
2559 snap
= kmem_alloc(sizeof (*snap
), KM_SLEEP
);
2561 list_insert_tail(l
, snap
);
2562 obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
2569 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
2571 struct promotenode
*snap
;
2574 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
2575 uint64_t used
, comp
, uncomp
;
2576 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2577 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
2584 snaplist_destroy(list_t
*l
, void *tag
)
2586 struct promotenode
*snap
;
2588 if (l
== NULL
|| !list_link_active(&l
->list_head
))
2591 while ((snap
= list_tail(l
)) != NULL
) {
2592 list_remove(l
, snap
);
2593 dsl_dataset_rele(snap
->ds
, tag
);
2594 kmem_free(snap
, sizeof (*snap
));
2600 promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
, void *tag
)
2604 struct promotenode
*snap
;
2606 error
= dsl_dataset_hold(dp
, ddpa
->ddpa_clonename
, tag
,
2610 dd
= ddpa
->ddpa_clone
->ds_dir
;
2612 if (ddpa
->ddpa_clone
->ds_is_snapshot
||
2613 !dsl_dir_is_clone(dd
)) {
2614 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
2615 return (SET_ERROR(EINVAL
));
2618 error
= snaplist_make(dp
, 0, dsl_dir_phys(dd
)->dd_origin_obj
,
2619 &ddpa
->shared_snaps
, tag
);
2623 error
= snaplist_make(dp
, 0, ddpa
->ddpa_clone
->ds_object
,
2624 &ddpa
->clone_snaps
, tag
);
2628 snap
= list_head(&ddpa
->shared_snaps
);
2629 ASSERT3U(snap
->ds
->ds_object
, ==, dsl_dir_phys(dd
)->dd_origin_obj
);
2630 error
= snaplist_make(dp
, dsl_dir_phys(dd
)->dd_origin_obj
,
2631 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_head_dataset_obj
,
2632 &ddpa
->origin_snaps
, tag
);
2636 if (dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
!= 0) {
2637 error
= dsl_dataset_hold_obj(dp
,
2638 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
,
2639 tag
, &ddpa
->origin_origin
);
2645 promote_rele(ddpa
, tag
);
2650 promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
)
2652 snaplist_destroy(&ddpa
->shared_snaps
, tag
);
2653 snaplist_destroy(&ddpa
->clone_snaps
, tag
);
2654 snaplist_destroy(&ddpa
->origin_snaps
, tag
);
2655 if (ddpa
->origin_origin
!= NULL
)
2656 dsl_dataset_rele(ddpa
->origin_origin
, tag
);
2657 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
2663 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
2664 * in with the name. (It must be at least MAXNAMELEN bytes long.)
2667 dsl_dataset_promote(const char *name
, char *conflsnap
)
2669 dsl_dataset_promote_arg_t ddpa
= { 0 };
2675 * We will modify space proportional to the number of
2676 * snapshots. Compute numsnaps.
2678 error
= dmu_objset_hold(name
, FTAG
, &os
);
2681 error
= zap_count(dmu_objset_pool(os
)->dp_meta_objset
,
2682 dsl_dataset_phys(dmu_objset_ds(os
))->ds_snapnames_zapobj
,
2684 dmu_objset_rele(os
, FTAG
);
2688 ddpa
.ddpa_clonename
= name
;
2689 ddpa
.err_ds
= conflsnap
;
2692 return (dsl_sync_task(name
, dsl_dataset_promote_check
,
2693 dsl_dataset_promote_sync
, &ddpa
,
2694 2 + numsnaps
, ZFS_SPACE_CHECK_RESERVED
));
2698 dsl_dataset_clone_swap_check_impl(dsl_dataset_t
*clone
,
2699 dsl_dataset_t
*origin_head
, boolean_t force
, void *owner
, dmu_tx_t
*tx
)
2701 int64_t unused_refres_delta
;
2703 /* they should both be heads */
2704 if (clone
->ds_is_snapshot
||
2705 origin_head
->ds_is_snapshot
)
2706 return (SET_ERROR(EINVAL
));
2708 /* if we are not forcing, the branch point should be just before them */
2709 if (!force
&& clone
->ds_prev
!= origin_head
->ds_prev
)
2710 return (SET_ERROR(EINVAL
));
2712 /* clone should be the clone (unless they are unrelated) */
2713 if (clone
->ds_prev
!= NULL
&&
2714 clone
->ds_prev
!= clone
->ds_dir
->dd_pool
->dp_origin_snap
&&
2715 origin_head
->ds_dir
!= clone
->ds_prev
->ds_dir
)
2716 return (SET_ERROR(EINVAL
));
2718 /* the clone should be a child of the origin */
2719 if (clone
->ds_dir
->dd_parent
!= origin_head
->ds_dir
)
2720 return (SET_ERROR(EINVAL
));
2722 /* origin_head shouldn't be modified unless 'force' */
2724 dsl_dataset_modified_since_snap(origin_head
, origin_head
->ds_prev
))
2725 return (SET_ERROR(ETXTBSY
));
2727 /* origin_head should have no long holds (e.g. is not mounted) */
2728 if (dsl_dataset_handoff_check(origin_head
, owner
, tx
))
2729 return (SET_ERROR(EBUSY
));
2731 /* check amount of any unconsumed refreservation */
2732 unused_refres_delta
=
2733 (int64_t)MIN(origin_head
->ds_reserved
,
2734 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
2735 (int64_t)MIN(origin_head
->ds_reserved
,
2736 dsl_dataset_phys(clone
)->ds_unique_bytes
);
2738 if (unused_refres_delta
> 0 &&
2739 unused_refres_delta
>
2740 dsl_dir_space_available(origin_head
->ds_dir
, NULL
, 0, TRUE
))
2741 return (SET_ERROR(ENOSPC
));
2743 /* clone can't be over the head's refquota */
2744 if (origin_head
->ds_quota
!= 0 &&
2745 dsl_dataset_phys(clone
)->ds_referenced_bytes
>
2746 origin_head
->ds_quota
)
2747 return (SET_ERROR(EDQUOT
));
2753 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t
*clone
,
2754 dsl_dataset_t
*origin_head
, dmu_tx_t
*tx
)
2757 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2758 int64_t unused_refres_delta
;
2760 ASSERT(clone
->ds_reserved
== 0);
2761 ASSERT(origin_head
->ds_quota
== 0 ||
2762 dsl_dataset_phys(clone
)->ds_unique_bytes
<= origin_head
->ds_quota
);
2763 ASSERT3P(clone
->ds_prev
, ==, origin_head
->ds_prev
);
2766 * Swap per-dataset feature flags.
2768 for (f
= 0; f
< SPA_FEATURES
; f
++) {
2769 boolean_t clone_inuse
;
2770 boolean_t origin_head_inuse
;
2772 if (!(spa_feature_table
[f
].fi_flags
&
2773 ZFEATURE_FLAG_PER_DATASET
)) {
2774 ASSERT(!clone
->ds_feature_inuse
[f
]);
2775 ASSERT(!origin_head
->ds_feature_inuse
[f
]);
2779 clone_inuse
= clone
->ds_feature_inuse
[f
];
2780 origin_head_inuse
= origin_head
->ds_feature_inuse
[f
];
2783 dsl_dataset_deactivate_feature(clone
->ds_object
, f
, tx
);
2784 clone
->ds_feature_inuse
[f
] = B_FALSE
;
2786 if (origin_head_inuse
) {
2787 dsl_dataset_deactivate_feature(origin_head
->ds_object
,
2789 origin_head
->ds_feature_inuse
[f
] = B_FALSE
;
2792 dsl_dataset_activate_feature(origin_head
->ds_object
,
2794 origin_head
->ds_feature_inuse
[f
] = B_TRUE
;
2796 if (origin_head_inuse
) {
2797 dsl_dataset_activate_feature(clone
->ds_object
, f
, tx
);
2798 clone
->ds_feature_inuse
[f
] = B_TRUE
;
2802 dmu_buf_will_dirty(clone
->ds_dbuf
, tx
);
2803 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
2805 if (clone
->ds_objset
!= NULL
) {
2806 dmu_objset_evict(clone
->ds_objset
);
2807 clone
->ds_objset
= NULL
;
2810 if (origin_head
->ds_objset
!= NULL
) {
2811 dmu_objset_evict(origin_head
->ds_objset
);
2812 origin_head
->ds_objset
= NULL
;
2815 unused_refres_delta
=
2816 (int64_t)MIN(origin_head
->ds_reserved
,
2817 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
2818 (int64_t)MIN(origin_head
->ds_reserved
,
2819 dsl_dataset_phys(clone
)->ds_unique_bytes
);
2822 * Reset origin's unique bytes, if it exists.
2824 if (clone
->ds_prev
) {
2825 dsl_dataset_t
*origin
= clone
->ds_prev
;
2826 uint64_t comp
, uncomp
;
2828 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
2829 dsl_deadlist_space_range(&clone
->ds_deadlist
,
2830 dsl_dataset_phys(origin
)->ds_prev_snap_txg
, UINT64_MAX
,
2831 &dsl_dataset_phys(origin
)->ds_unique_bytes
, &comp
, &uncomp
);
2837 tmp
= dsl_dataset_phys(origin_head
)->ds_bp
;
2838 dsl_dataset_phys(origin_head
)->ds_bp
=
2839 dsl_dataset_phys(clone
)->ds_bp
;
2840 dsl_dataset_phys(clone
)->ds_bp
= tmp
;
2843 /* set dd_*_bytes */
2845 int64_t dused
, dcomp
, duncomp
;
2846 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
2847 uint64_t odl_used
, odl_comp
, odl_uncomp
;
2849 ASSERT3U(dsl_dir_phys(clone
->ds_dir
)->
2850 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
2852 dsl_deadlist_space(&clone
->ds_deadlist
,
2853 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
2854 dsl_deadlist_space(&origin_head
->ds_deadlist
,
2855 &odl_used
, &odl_comp
, &odl_uncomp
);
2857 dused
= dsl_dataset_phys(clone
)->ds_referenced_bytes
+
2859 (dsl_dataset_phys(origin_head
)->ds_referenced_bytes
+
2861 dcomp
= dsl_dataset_phys(clone
)->ds_compressed_bytes
+
2863 (dsl_dataset_phys(origin_head
)->ds_compressed_bytes
+
2865 duncomp
= dsl_dataset_phys(clone
)->ds_uncompressed_bytes
+
2867 (dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
+
2870 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_HEAD
,
2871 dused
, dcomp
, duncomp
, tx
);
2872 dsl_dir_diduse_space(clone
->ds_dir
, DD_USED_HEAD
,
2873 -dused
, -dcomp
, -duncomp
, tx
);
2876 * The difference in the space used by snapshots is the
2877 * difference in snapshot space due to the head's
2878 * deadlist (since that's the only thing that's
2879 * changing that affects the snapused).
2881 dsl_deadlist_space_range(&clone
->ds_deadlist
,
2882 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
2883 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
2884 dsl_deadlist_space_range(&origin_head
->ds_deadlist
,
2885 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
2886 &odl_used
, &odl_comp
, &odl_uncomp
);
2887 dsl_dir_transfer_space(origin_head
->ds_dir
, cdl_used
- odl_used
,
2888 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
2891 /* swap ds_*_bytes */
2892 SWITCH64(dsl_dataset_phys(origin_head
)->ds_referenced_bytes
,
2893 dsl_dataset_phys(clone
)->ds_referenced_bytes
);
2894 SWITCH64(dsl_dataset_phys(origin_head
)->ds_compressed_bytes
,
2895 dsl_dataset_phys(clone
)->ds_compressed_bytes
);
2896 SWITCH64(dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
,
2897 dsl_dataset_phys(clone
)->ds_uncompressed_bytes
);
2898 SWITCH64(dsl_dataset_phys(origin_head
)->ds_unique_bytes
,
2899 dsl_dataset_phys(clone
)->ds_unique_bytes
);
2901 /* apply any parent delta for change in unconsumed refreservation */
2902 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_REFRSRV
,
2903 unused_refres_delta
, 0, 0, tx
);
2908 dsl_deadlist_close(&clone
->ds_deadlist
);
2909 dsl_deadlist_close(&origin_head
->ds_deadlist
);
2910 SWITCH64(dsl_dataset_phys(origin_head
)->ds_deadlist_obj
,
2911 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
2912 dsl_deadlist_open(&clone
->ds_deadlist
, dp
->dp_meta_objset
,
2913 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
2914 dsl_deadlist_open(&origin_head
->ds_deadlist
, dp
->dp_meta_objset
,
2915 dsl_dataset_phys(origin_head
)->ds_deadlist_obj
);
2917 dsl_scan_ds_clone_swapped(origin_head
, clone
, tx
);
2919 spa_history_log_internal_ds(clone
, "clone swap", tx
,
2920 "parent=%s", origin_head
->ds_dir
->dd_myname
);
2924 * Given a pool name and a dataset object number in that pool,
2925 * return the name of that dataset.
2928 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
2934 error
= dsl_pool_hold(pname
, FTAG
, &dp
);
2938 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
2940 dsl_dataset_name(ds
, buf
);
2941 dsl_dataset_rele(ds
, FTAG
);
2943 dsl_pool_rele(dp
, FTAG
);
2949 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
2950 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
2954 ASSERT3S(asize
, >, 0);
2957 * *ref_rsrv is the portion of asize that will come from any
2958 * unconsumed refreservation space.
2962 mutex_enter(&ds
->ds_lock
);
2964 * Make a space adjustment for reserved bytes.
2966 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
2968 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
2970 (ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
2972 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
2975 if (!check_quota
|| ds
->ds_quota
== 0) {
2976 mutex_exit(&ds
->ds_lock
);
2980 * If they are requesting more space, and our current estimate
2981 * is over quota, they get to try again unless the actual
2982 * on-disk is over quota and there are no pending changes (which
2983 * may free up space for us).
2985 if (dsl_dataset_phys(ds
)->ds_referenced_bytes
+ inflight
>=
2988 dsl_dataset_phys(ds
)->ds_referenced_bytes
< ds
->ds_quota
)
2989 error
= SET_ERROR(ERESTART
);
2991 error
= SET_ERROR(EDQUOT
);
2993 mutex_exit(&ds
->ds_lock
);
2998 typedef struct dsl_dataset_set_qr_arg
{
2999 const char *ddsqra_name
;
3000 zprop_source_t ddsqra_source
;
3001 uint64_t ddsqra_value
;
3002 } dsl_dataset_set_qr_arg_t
;
3007 dsl_dataset_set_refquota_check(void *arg
, dmu_tx_t
*tx
)
3009 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3010 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3015 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFQUOTA
)
3016 return (SET_ERROR(ENOTSUP
));
3018 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3022 if (ds
->ds_is_snapshot
) {
3023 dsl_dataset_rele(ds
, FTAG
);
3024 return (SET_ERROR(EINVAL
));
3027 error
= dsl_prop_predict(ds
->ds_dir
,
3028 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3029 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3031 dsl_dataset_rele(ds
, FTAG
);
3036 dsl_dataset_rele(ds
, FTAG
);
3040 if (newval
< dsl_dataset_phys(ds
)->ds_referenced_bytes
||
3041 newval
< ds
->ds_reserved
) {
3042 dsl_dataset_rele(ds
, FTAG
);
3043 return (SET_ERROR(ENOSPC
));
3046 dsl_dataset_rele(ds
, FTAG
);
3051 dsl_dataset_set_refquota_sync(void *arg
, dmu_tx_t
*tx
)
3053 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3054 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3058 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3060 dsl_prop_set_sync_impl(ds
,
3061 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3062 ddsqra
->ddsqra_source
, sizeof (ddsqra
->ddsqra_value
), 1,
3063 &ddsqra
->ddsqra_value
, tx
);
3065 VERIFY0(dsl_prop_get_int_ds(ds
,
3066 zfs_prop_to_name(ZFS_PROP_REFQUOTA
), &newval
));
3068 if (ds
->ds_quota
!= newval
) {
3069 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3070 ds
->ds_quota
= newval
;
3072 dsl_dataset_rele(ds
, FTAG
);
3076 dsl_dataset_set_refquota(const char *dsname
, zprop_source_t source
,
3079 dsl_dataset_set_qr_arg_t ddsqra
;
3081 ddsqra
.ddsqra_name
= dsname
;
3082 ddsqra
.ddsqra_source
= source
;
3083 ddsqra
.ddsqra_value
= refquota
;
3085 return (dsl_sync_task(dsname
, dsl_dataset_set_refquota_check
,
3086 dsl_dataset_set_refquota_sync
, &ddsqra
, 0, ZFS_SPACE_CHECK_NONE
));
3090 dsl_dataset_set_refreservation_check(void *arg
, dmu_tx_t
*tx
)
3092 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3093 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3096 uint64_t newval
, unique
;
3098 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFRESERVATION
)
3099 return (SET_ERROR(ENOTSUP
));
3101 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3105 if (ds
->ds_is_snapshot
) {
3106 dsl_dataset_rele(ds
, FTAG
);
3107 return (SET_ERROR(EINVAL
));
3110 error
= dsl_prop_predict(ds
->ds_dir
,
3111 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3112 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3114 dsl_dataset_rele(ds
, FTAG
);
3119 * If we are doing the preliminary check in open context, the
3120 * space estimates may be inaccurate.
3122 if (!dmu_tx_is_syncing(tx
)) {
3123 dsl_dataset_rele(ds
, FTAG
);
3127 mutex_enter(&ds
->ds_lock
);
3128 if (!DS_UNIQUE_IS_ACCURATE(ds
))
3129 dsl_dataset_recalc_head_uniq(ds
);
3130 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3131 mutex_exit(&ds
->ds_lock
);
3133 if (MAX(unique
, newval
) > MAX(unique
, ds
->ds_reserved
)) {
3134 uint64_t delta
= MAX(unique
, newval
) -
3135 MAX(unique
, ds
->ds_reserved
);
3138 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, B_TRUE
) ||
3139 (ds
->ds_quota
> 0 && newval
> ds
->ds_quota
)) {
3140 dsl_dataset_rele(ds
, FTAG
);
3141 return (SET_ERROR(ENOSPC
));
3145 dsl_dataset_rele(ds
, FTAG
);
3150 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t
*ds
,
3151 zprop_source_t source
, uint64_t value
, dmu_tx_t
*tx
)
3157 dsl_prop_set_sync_impl(ds
, zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3158 source
, sizeof (value
), 1, &value
, tx
);
3160 VERIFY0(dsl_prop_get_int_ds(ds
,
3161 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &newval
));
3163 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3164 mutex_enter(&ds
->ds_dir
->dd_lock
);
3165 mutex_enter(&ds
->ds_lock
);
3166 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
3167 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3168 delta
= MAX(0, (int64_t)(newval
- unique
)) -
3169 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
3170 ds
->ds_reserved
= newval
;
3171 mutex_exit(&ds
->ds_lock
);
3173 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
3174 mutex_exit(&ds
->ds_dir
->dd_lock
);
3178 dsl_dataset_set_refreservation_sync(void *arg
, dmu_tx_t
*tx
)
3180 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3181 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3184 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3185 dsl_dataset_set_refreservation_sync_impl(ds
,
3186 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, tx
);
3187 dsl_dataset_rele(ds
, FTAG
);
3191 dsl_dataset_set_refreservation(const char *dsname
, zprop_source_t source
,
3192 uint64_t refreservation
)
3194 dsl_dataset_set_qr_arg_t ddsqra
;
3196 ddsqra
.ddsqra_name
= dsname
;
3197 ddsqra
.ddsqra_source
= source
;
3198 ddsqra
.ddsqra_value
= refreservation
;
3200 return (dsl_sync_task(dsname
, dsl_dataset_set_refreservation_check
,
3201 dsl_dataset_set_refreservation_sync
, &ddsqra
,
3202 0, ZFS_SPACE_CHECK_NONE
));
3206 * Return (in *usedp) the amount of space written in new that is not
3207 * present in oldsnap. New may be a snapshot or the head. Old must be
3208 * a snapshot before new, in new's filesystem (or its origin). If not then
3209 * fail and return EINVAL.
3211 * The written space is calculated by considering two components: First, we
3212 * ignore any freed space, and calculate the written as new's used space
3213 * minus old's used space. Next, we add in the amount of space that was freed
3214 * between the two snapshots, thus reducing new's used space relative to old's.
3215 * Specifically, this is the space that was born before old->ds_creation_txg,
3216 * and freed before new (ie. on new's deadlist or a previous deadlist).
3218 * space freed [---------------------]
3219 * snapshots ---O-------O--------O-------O------
3223 dsl_dataset_space_written(dsl_dataset_t
*oldsnap
, dsl_dataset_t
*new,
3224 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
3228 dsl_pool_t
*dp
= new->ds_dir
->dd_pool
;
3230 ASSERT(dsl_pool_config_held(dp
));
3233 *usedp
+= dsl_dataset_phys(new)->ds_referenced_bytes
;
3234 *usedp
-= dsl_dataset_phys(oldsnap
)->ds_referenced_bytes
;
3237 *compp
+= dsl_dataset_phys(new)->ds_compressed_bytes
;
3238 *compp
-= dsl_dataset_phys(oldsnap
)->ds_compressed_bytes
;
3241 *uncompp
+= dsl_dataset_phys(new)->ds_uncompressed_bytes
;
3242 *uncompp
-= dsl_dataset_phys(oldsnap
)->ds_uncompressed_bytes
;
3244 snapobj
= new->ds_object
;
3245 while (snapobj
!= oldsnap
->ds_object
) {
3246 dsl_dataset_t
*snap
;
3247 uint64_t used
, comp
, uncomp
;
3249 if (snapobj
== new->ds_object
) {
3252 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snap
);
3257 if (dsl_dataset_phys(snap
)->ds_prev_snap_txg
==
3258 dsl_dataset_phys(oldsnap
)->ds_creation_txg
) {
3260 * The blocks in the deadlist can not be born after
3261 * ds_prev_snap_txg, so get the whole deadlist space,
3262 * which is more efficient (especially for old-format
3263 * deadlists). Unfortunately the deadlist code
3264 * doesn't have enough information to make this
3265 * optimization itself.
3267 dsl_deadlist_space(&snap
->ds_deadlist
,
3268 &used
, &comp
, &uncomp
);
3270 dsl_deadlist_space_range(&snap
->ds_deadlist
,
3271 0, dsl_dataset_phys(oldsnap
)->ds_creation_txg
,
3272 &used
, &comp
, &uncomp
);
3279 * If we get to the beginning of the chain of snapshots
3280 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
3281 * was not a snapshot of/before new.
3283 snapobj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3285 dsl_dataset_rele(snap
, FTAG
);
3287 err
= SET_ERROR(EINVAL
);
3296 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
3297 * lastsnap, and all snapshots in between are deleted.
3299 * blocks that would be freed [---------------------------]
3300 * snapshots ---O-------O--------O-------O--------O
3301 * firstsnap lastsnap
3303 * This is the set of blocks that were born after the snap before firstsnap,
3304 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
3305 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
3306 * We calculate this by iterating over the relevant deadlists (from the snap
3307 * after lastsnap, backward to the snap after firstsnap), summing up the
3308 * space on the deadlist that was born after the snap before firstsnap.
3311 dsl_dataset_space_wouldfree(dsl_dataset_t
*firstsnap
,
3312 dsl_dataset_t
*lastsnap
,
3313 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
3317 dsl_pool_t
*dp
= firstsnap
->ds_dir
->dd_pool
;
3319 ASSERT(firstsnap
->ds_is_snapshot
);
3320 ASSERT(lastsnap
->ds_is_snapshot
);
3323 * Check that the snapshots are in the same dsl_dir, and firstsnap
3324 * is before lastsnap.
3326 if (firstsnap
->ds_dir
!= lastsnap
->ds_dir
||
3327 dsl_dataset_phys(firstsnap
)->ds_creation_txg
>
3328 dsl_dataset_phys(lastsnap
)->ds_creation_txg
)
3329 return (SET_ERROR(EINVAL
));
3331 *usedp
= *compp
= *uncompp
= 0;
3333 snapobj
= dsl_dataset_phys(lastsnap
)->ds_next_snap_obj
;
3334 while (snapobj
!= firstsnap
->ds_object
) {
3336 uint64_t used
, comp
, uncomp
;
3338 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &ds
);
3342 dsl_deadlist_space_range(&ds
->ds_deadlist
,
3343 dsl_dataset_phys(firstsnap
)->ds_prev_snap_txg
, UINT64_MAX
,
3344 &used
, &comp
, &uncomp
);
3349 snapobj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
3350 ASSERT3U(snapobj
, !=, 0);
3351 dsl_dataset_rele(ds
, FTAG
);
3357 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
3358 * For example, they could both be snapshots of the same filesystem, and
3359 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
3360 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
3361 * filesystem. Or 'earlier' could be the origin's origin.
3363 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
3366 dsl_dataset_is_before(dsl_dataset_t
*later
, dsl_dataset_t
*earlier
,
3367 uint64_t earlier_txg
)
3369 dsl_pool_t
*dp
= later
->ds_dir
->dd_pool
;
3372 dsl_dataset_t
*origin
;
3374 ASSERT(dsl_pool_config_held(dp
));
3375 ASSERT(earlier
->ds_is_snapshot
|| earlier_txg
!= 0);
3377 if (earlier_txg
== 0)
3378 earlier_txg
= dsl_dataset_phys(earlier
)->ds_creation_txg
;
3380 if (later
->ds_is_snapshot
&&
3381 earlier_txg
>= dsl_dataset_phys(later
)->ds_creation_txg
)
3384 if (later
->ds_dir
== earlier
->ds_dir
)
3386 if (!dsl_dir_is_clone(later
->ds_dir
))
3389 if (dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
== earlier
->ds_object
)
3391 error
= dsl_dataset_hold_obj(dp
,
3392 dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
, FTAG
, &origin
);
3395 ret
= dsl_dataset_is_before(origin
, earlier
, earlier_txg
);
3396 dsl_dataset_rele(origin
, FTAG
);
3401 dsl_dataset_zapify(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
3403 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3404 dmu_object_zapify(mos
, ds
->ds_object
, DMU_OT_DSL_DATASET
, tx
);
3407 #if defined(_KERNEL) && defined(HAVE_SPL)
3409 module_param(zfs_max_recordsize
, int, 0644);
3410 MODULE_PARM_DESC(zfs_max_recordsize
, "Max allowed record size");
3412 /* Limited to 1M on 32-bit platforms due to lack of virtual address space */
3413 module_param(zfs_max_recordsize
, int, 0444);
3414 MODULE_PARM_DESC(zfs_max_recordsize
, "Max allowed record size");
3417 EXPORT_SYMBOL(dsl_dataset_hold
);
3418 EXPORT_SYMBOL(dsl_dataset_hold_obj
);
3419 EXPORT_SYMBOL(dsl_dataset_own
);
3420 EXPORT_SYMBOL(dsl_dataset_own_obj
);
3421 EXPORT_SYMBOL(dsl_dataset_name
);
3422 EXPORT_SYMBOL(dsl_dataset_rele
);
3423 EXPORT_SYMBOL(dsl_dataset_disown
);
3424 EXPORT_SYMBOL(dsl_dataset_tryown
);
3425 EXPORT_SYMBOL(dsl_dataset_create_sync
);
3426 EXPORT_SYMBOL(dsl_dataset_create_sync_dd
);
3427 EXPORT_SYMBOL(dsl_dataset_snapshot_check
);
3428 EXPORT_SYMBOL(dsl_dataset_snapshot_sync
);
3429 EXPORT_SYMBOL(dsl_dataset_promote
);
3430 EXPORT_SYMBOL(dsl_dataset_user_hold
);
3431 EXPORT_SYMBOL(dsl_dataset_user_release
);
3432 EXPORT_SYMBOL(dsl_dataset_get_holds
);
3433 EXPORT_SYMBOL(dsl_dataset_get_blkptr
);
3434 EXPORT_SYMBOL(dsl_dataset_set_blkptr
);
3435 EXPORT_SYMBOL(dsl_dataset_get_spa
);
3436 EXPORT_SYMBOL(dsl_dataset_modified_since_snap
);
3437 EXPORT_SYMBOL(dsl_dataset_space_written
);
3438 EXPORT_SYMBOL(dsl_dataset_space_wouldfree
);
3439 EXPORT_SYMBOL(dsl_dataset_sync
);
3440 EXPORT_SYMBOL(dsl_dataset_block_born
);
3441 EXPORT_SYMBOL(dsl_dataset_block_kill
);
3442 EXPORT_SYMBOL(dsl_dataset_block_freeable
);
3443 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg
);
3444 EXPORT_SYMBOL(dsl_dataset_dirty
);
3445 EXPORT_SYMBOL(dsl_dataset_stats
);
3446 EXPORT_SYMBOL(dsl_dataset_fast_stat
);
3447 EXPORT_SYMBOL(dsl_dataset_space
);
3448 EXPORT_SYMBOL(dsl_dataset_fsid_guid
);
3449 EXPORT_SYMBOL(dsl_dsobj_to_dsname
);
3450 EXPORT_SYMBOL(dsl_dataset_check_quota
);
3451 EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl
);
3452 EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl
);