4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 RackTop Systems.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dmu_impl.h>
40 #include <sys/dmu_tx.h>
44 #include <sys/zfeature.h>
45 #include <sys/unique.h>
46 #include <sys/zfs_context.h>
47 #include <sys/zfs_ioctl.h>
50 #include <sys/zfs_znode.h>
51 #include <sys/zfs_onexit.h>
53 #include <sys/dsl_scan.h>
54 #include <sys/dsl_deadlist.h>
55 #include <sys/dsl_destroy.h>
56 #include <sys/dsl_userhold.h>
57 #include <sys/dsl_bookmark.h>
58 #include <sys/policy.h>
59 #include <sys/dmu_send.h>
60 #include <sys/zio_compress.h>
61 #include <zfs_fletcher.h>
62 #include <sys/zio_checksum.h>
65 * The SPA supports block sizes up to 16MB. However, very large blocks
66 * can have an impact on i/o latency (e.g. tying up a spinning disk for
67 * ~300ms), and also potentially on the memory allocator. Therefore,
68 * we do not allow the recordsize to be set larger than zfs_max_recordsize
69 * (default 1MB). Larger blocks can be created by changing this tunable,
70 * and pools with larger blocks can always be imported and used, regardless
73 int zfs_max_recordsize
= 1 * 1024 * 1024;
75 #define SWITCH64(x, y) \
77 uint64_t __tmp = (x); \
82 #define DS_REF_MAX (1ULL << 62)
84 extern inline dsl_dataset_phys_t
*dsl_dataset_phys(dsl_dataset_t
*ds
);
86 static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t
*ds
,
87 uint64_t obj
, dmu_tx_t
*tx
);
88 static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t
*ds
,
91 extern int spa_asize_inflation
;
93 static zil_header_t zero_zil
;
96 * Figure out how much of this delta should be propagated to the dsl_dir
97 * layer. If there's a refreservation, that space has already been
98 * partially accounted for in our ancestors.
101 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
103 dsl_dataset_phys_t
*ds_phys
;
104 uint64_t old_bytes
, new_bytes
;
106 if (ds
->ds_reserved
== 0)
109 ds_phys
= dsl_dataset_phys(ds
);
110 old_bytes
= MAX(ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
111 new_bytes
= MAX(ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
113 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
114 return (new_bytes
- old_bytes
);
118 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
120 int used
, compressed
, uncompressed
;
123 used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
124 compressed
= BP_GET_PSIZE(bp
);
125 uncompressed
= BP_GET_UCSIZE(bp
);
127 dprintf_bp(bp
, "ds=%p", ds
);
129 ASSERT(dmu_tx_is_syncing(tx
));
130 /* It could have been compressed away to nothing */
133 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
134 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp
)));
136 dsl_pool_mos_diduse_space(tx
->tx_pool
,
137 used
, compressed
, uncompressed
);
141 ASSERT3U(bp
->blk_birth
, >, dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
142 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
143 mutex_enter(&ds
->ds_lock
);
144 delta
= parent_delta(ds
, used
);
145 dsl_dataset_phys(ds
)->ds_referenced_bytes
+= used
;
146 dsl_dataset_phys(ds
)->ds_compressed_bytes
+= compressed
;
147 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
+= uncompressed
;
148 dsl_dataset_phys(ds
)->ds_unique_bytes
+= used
;
150 if (BP_GET_LSIZE(bp
) > SPA_OLD_MAXBLOCKSIZE
) {
151 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_BLOCKS
] =
155 spa_feature_t f
= zio_checksum_to_feature(BP_GET_CHECKSUM(bp
));
156 if (f
!= SPA_FEATURE_NONE
)
157 ds
->ds_feature_activation_needed
[f
] = B_TRUE
;
159 mutex_exit(&ds
->ds_lock
);
160 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
, delta
,
161 compressed
, uncompressed
, tx
);
162 dsl_dir_transfer_space(ds
->ds_dir
, used
- delta
,
163 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
167 * Called when the specified segment has been remapped, and is thus no
168 * longer referenced in the head dataset. The vdev must be indirect.
170 * If the segment is referenced by a snapshot, put it on the remap deadlist.
171 * Otherwise, add this segment to the obsolete spacemap.
174 dsl_dataset_block_remapped(dsl_dataset_t
*ds
, uint64_t vdev
, uint64_t offset
,
175 uint64_t size
, uint64_t birth
, dmu_tx_t
*tx
)
177 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
179 ASSERT(dmu_tx_is_syncing(tx
));
180 ASSERT(birth
<= tx
->tx_txg
);
181 ASSERT(!ds
->ds_is_snapshot
);
183 if (birth
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
184 spa_vdev_indirect_mark_obsolete(spa
, vdev
, offset
, size
, tx
);
187 dva_t
*dva
= &fakebp
.blk_dva
[0];
191 mutex_enter(&ds
->ds_remap_deadlist_lock
);
192 if (!dsl_dataset_remap_deadlist_exists(ds
)) {
193 dsl_dataset_create_remap_deadlist(ds
, tx
);
195 mutex_exit(&ds
->ds_remap_deadlist_lock
);
198 fakebp
.blk_birth
= birth
;
199 DVA_SET_VDEV(dva
, vdev
);
200 DVA_SET_OFFSET(dva
, offset
);
201 DVA_SET_ASIZE(dva
, size
);
203 dsl_deadlist_insert(&ds
->ds_remap_deadlist
, &fakebp
, tx
);
208 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
211 int used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
212 int compressed
= BP_GET_PSIZE(bp
);
213 int uncompressed
= BP_GET_UCSIZE(bp
);
218 ASSERT(dmu_tx_is_syncing(tx
));
219 ASSERT(bp
->blk_birth
<= tx
->tx_txg
);
222 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
223 dsl_pool_mos_diduse_space(tx
->tx_pool
,
224 -used
, -compressed
, -uncompressed
);
227 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
229 ASSERT(!ds
->ds_is_snapshot
);
230 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
232 if (bp
->blk_birth
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
235 dprintf_bp(bp
, "freeing ds=%llu", ds
->ds_object
);
236 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
238 mutex_enter(&ds
->ds_lock
);
239 ASSERT(dsl_dataset_phys(ds
)->ds_unique_bytes
>= used
||
240 !DS_UNIQUE_IS_ACCURATE(ds
));
241 delta
= parent_delta(ds
, -used
);
242 dsl_dataset_phys(ds
)->ds_unique_bytes
-= used
;
243 mutex_exit(&ds
->ds_lock
);
244 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
245 delta
, -compressed
, -uncompressed
, tx
);
246 dsl_dir_transfer_space(ds
->ds_dir
, -used
- delta
,
247 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
249 dprintf_bp(bp
, "putting on dead list: %s", "");
252 * We are here as part of zio's write done callback,
253 * which means we're a zio interrupt thread. We can't
254 * call dsl_deadlist_insert() now because it may block
255 * waiting for I/O. Instead, put bp on the deferred
256 * queue and let dsl_pool_sync() finish the job.
258 bplist_append(&ds
->ds_pending_deadlist
, bp
);
260 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, tx
);
262 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
263 dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
264 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 0);
265 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
266 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
267 ds
->ds_object
&& bp
->blk_birth
>
268 dsl_dataset_phys(ds
->ds_prev
)->ds_prev_snap_txg
) {
269 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
270 mutex_enter(&ds
->ds_prev
->ds_lock
);
271 dsl_dataset_phys(ds
->ds_prev
)->ds_unique_bytes
+= used
;
272 mutex_exit(&ds
->ds_prev
->ds_lock
);
274 if (bp
->blk_birth
> ds
->ds_dir
->dd_origin_txg
) {
275 dsl_dir_transfer_space(ds
->ds_dir
, used
,
276 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
279 mutex_enter(&ds
->ds_lock
);
280 ASSERT3U(dsl_dataset_phys(ds
)->ds_referenced_bytes
, >=, used
);
281 dsl_dataset_phys(ds
)->ds_referenced_bytes
-= used
;
282 ASSERT3U(dsl_dataset_phys(ds
)->ds_compressed_bytes
, >=, compressed
);
283 dsl_dataset_phys(ds
)->ds_compressed_bytes
-= compressed
;
284 ASSERT3U(dsl_dataset_phys(ds
)->ds_uncompressed_bytes
, >=, uncompressed
);
285 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
-= uncompressed
;
286 mutex_exit(&ds
->ds_lock
);
292 * We have to release the fsid syncronously or we risk that a subsequent
293 * mount of the same dataset will fail to unique_insert the fsid. This
294 * failure would manifest itself as the fsid of this dataset changing
295 * between mounts which makes NFS clients quite unhappy.
298 dsl_dataset_evict_sync(void *dbu
)
300 dsl_dataset_t
*ds
= dbu
;
302 ASSERT(ds
->ds_owner
== NULL
);
304 unique_remove(ds
->ds_fsid_guid
);
308 dsl_dataset_evict_async(void *dbu
)
310 dsl_dataset_t
*ds
= dbu
;
312 ASSERT(ds
->ds_owner
== NULL
);
316 if (ds
->ds_objset
!= NULL
)
317 dmu_objset_evict(ds
->ds_objset
);
320 dsl_dataset_rele(ds
->ds_prev
, ds
);
324 bplist_destroy(&ds
->ds_pending_deadlist
);
325 if (dsl_deadlist_is_open(&ds
->ds_deadlist
))
326 dsl_deadlist_close(&ds
->ds_deadlist
);
327 if (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
))
328 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
330 dsl_dir_async_rele(ds
->ds_dir
, ds
);
332 ASSERT(!list_link_active(&ds
->ds_synced_link
));
334 list_destroy(&ds
->ds_prop_cbs
);
335 mutex_destroy(&ds
->ds_lock
);
336 mutex_destroy(&ds
->ds_opening_lock
);
337 mutex_destroy(&ds
->ds_sendstream_lock
);
338 mutex_destroy(&ds
->ds_remap_deadlist_lock
);
339 refcount_destroy(&ds
->ds_longholds
);
340 rrw_destroy(&ds
->ds_bp_rwlock
);
342 kmem_free(ds
, sizeof (dsl_dataset_t
));
346 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
348 dsl_dataset_phys_t
*headphys
;
351 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
352 objset_t
*mos
= dp
->dp_meta_objset
;
354 if (ds
->ds_snapname
[0])
356 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0)
359 err
= dmu_bonus_hold(mos
, dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
,
363 headphys
= headdbuf
->db_data
;
364 err
= zap_value_search(dp
->dp_meta_objset
,
365 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
);
366 if (err
!= 0 && zfs_recover
== B_TRUE
) {
368 (void) snprintf(ds
->ds_snapname
, sizeof (ds
->ds_snapname
),
369 "SNAPOBJ=%llu-ERR=%d",
370 (unsigned long long)ds
->ds_object
, err
);
372 dmu_buf_rele(headdbuf
, FTAG
);
377 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
379 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
380 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
384 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
387 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
388 value
, mt
, NULL
, 0, NULL
);
389 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
390 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
395 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, const char *name
, dmu_tx_t
*tx
,
398 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
399 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
403 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
405 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
408 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
409 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
410 err
= zap_remove(mos
, snapobj
, name
, tx
);
412 if (err
== 0 && adj_cnt
)
413 dsl_fs_ss_count_adjust(ds
->ds_dir
, -1,
414 DD_FIELD_SNAPSHOT_COUNT
, tx
);
420 dsl_dataset_try_add_ref(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *tag
)
422 dmu_buf_t
*dbuf
= ds
->ds_dbuf
;
423 boolean_t result
= B_FALSE
;
425 if (dbuf
!= NULL
&& dmu_buf_try_add_ref(dbuf
, dp
->dp_meta_objset
,
426 ds
->ds_object
, DMU_BONUS_BLKID
, tag
)) {
428 if (ds
== dmu_buf_get_user(dbuf
))
431 dmu_buf_rele(dbuf
, tag
);
438 dsl_dataset_hold_obj_flags(dsl_pool_t
*dp
, uint64_t dsobj
,
439 ds_hold_flags_t flags
, void *tag
, dsl_dataset_t
**dsp
)
441 objset_t
*mos
= dp
->dp_meta_objset
;
445 dmu_object_info_t doi
;
447 ASSERT(dsl_pool_config_held(dp
));
449 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
453 /* Make sure dsobj has the correct object type. */
454 dmu_object_info_from_db(dbuf
, &doi
);
455 if (doi
.doi_bonus_type
!= DMU_OT_DSL_DATASET
) {
456 dmu_buf_rele(dbuf
, tag
);
457 return (SET_ERROR(EINVAL
));
460 ds
= dmu_buf_get_user(dbuf
);
462 dsl_dataset_t
*winner
= NULL
;
464 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
466 ds
->ds_object
= dsobj
;
467 ds
->ds_is_snapshot
= dsl_dataset_phys(ds
)->ds_num_children
!= 0;
468 list_link_init(&ds
->ds_synced_link
);
470 err
= dsl_dir_hold_obj(dp
, dsl_dataset_phys(ds
)->ds_dir_obj
,
471 NULL
, ds
, &ds
->ds_dir
);
473 kmem_free(ds
, sizeof (dsl_dataset_t
));
474 dmu_buf_rele(dbuf
, tag
);
478 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
479 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
480 mutex_init(&ds
->ds_sendstream_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
481 mutex_init(&ds
->ds_remap_deadlist_lock
,
482 NULL
, MUTEX_DEFAULT
, NULL
);
483 rrw_init(&ds
->ds_bp_rwlock
, B_FALSE
);
484 refcount_create(&ds
->ds_longholds
);
486 bplist_create(&ds
->ds_pending_deadlist
);
488 list_create(&ds
->ds_sendstreams
, sizeof (dmu_sendarg_t
),
489 offsetof(dmu_sendarg_t
, dsa_link
));
491 list_create(&ds
->ds_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
492 offsetof(dsl_prop_cb_record_t
, cbr_ds_node
));
494 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
497 for (f
= 0; f
< SPA_FEATURES
; f
++) {
498 if (!(spa_feature_table
[f
].fi_flags
&
499 ZFEATURE_FLAG_PER_DATASET
))
501 err
= zap_contains(mos
, dsobj
,
502 spa_feature_table
[f
].fi_guid
);
504 ds
->ds_feature_inuse
[f
] = B_TRUE
;
506 ASSERT3U(err
, ==, ENOENT
);
512 if (!ds
->ds_is_snapshot
) {
513 ds
->ds_snapname
[0] = '\0';
514 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
515 err
= dsl_dataset_hold_obj(dp
,
516 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
519 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
520 int zaperr
= zap_lookup(mos
, ds
->ds_object
,
521 DS_FIELD_BOOKMARK_NAMES
,
522 sizeof (ds
->ds_bookmarks
), 1,
524 if (zaperr
!= ENOENT
)
528 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
529 err
= dsl_dataset_get_snapname(ds
);
531 dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0) {
533 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
534 dsl_dataset_phys(ds
)->ds_userrefs_obj
,
539 if (err
== 0 && !ds
->ds_is_snapshot
) {
540 err
= dsl_prop_get_int_ds(ds
,
541 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
544 err
= dsl_prop_get_int_ds(ds
,
545 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
549 ds
->ds_reserved
= ds
->ds_quota
= 0;
552 dsl_deadlist_open(&ds
->ds_deadlist
,
553 mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
);
554 uint64_t remap_deadlist_obj
=
555 dsl_dataset_get_remap_deadlist_object(ds
);
556 if (remap_deadlist_obj
!= 0) {
557 dsl_deadlist_open(&ds
->ds_remap_deadlist
, mos
,
561 dmu_buf_init_user(&ds
->ds_dbu
, dsl_dataset_evict_sync
,
562 dsl_dataset_evict_async
, &ds
->ds_dbuf
);
564 winner
= dmu_buf_set_user_ie(dbuf
, &ds
->ds_dbu
);
566 if (err
!= 0 || winner
!= NULL
) {
567 bplist_destroy(&ds
->ds_pending_deadlist
);
568 dsl_deadlist_close(&ds
->ds_deadlist
);
569 if (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
))
570 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
572 dsl_dataset_rele(ds
->ds_prev
, ds
);
573 dsl_dir_rele(ds
->ds_dir
, ds
);
574 mutex_destroy(&ds
->ds_lock
);
575 mutex_destroy(&ds
->ds_opening_lock
);
576 mutex_destroy(&ds
->ds_sendstream_lock
);
577 refcount_destroy(&ds
->ds_longholds
);
578 kmem_free(ds
, sizeof (dsl_dataset_t
));
580 dmu_buf_rele(dbuf
, tag
);
586 unique_insert(dsl_dataset_phys(ds
)->ds_fsid_guid
);
587 if (ds
->ds_fsid_guid
!=
588 dsl_dataset_phys(ds
)->ds_fsid_guid
) {
589 zfs_dbgmsg("ds_fsid_guid changed from "
590 "%llx to %llx for pool %s dataset id %llu",
592 dsl_dataset_phys(ds
)->ds_fsid_guid
,
593 (long long)ds
->ds_fsid_guid
,
594 spa_name(dp
->dp_spa
),
599 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
600 ASSERT3P(dsl_dataset_phys(ds
), ==, dbuf
->db_data
);
601 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0 ||
602 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
603 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
606 if ((flags
& DS_HOLD_FLAG_DECRYPT
) && ds
->ds_dir
->dd_crypto_obj
!= 0) {
607 err
= spa_keystore_create_mapping(dp
->dp_spa
, ds
, ds
);
609 dsl_dataset_rele(ds
, tag
);
610 return (SET_ERROR(EACCES
));
618 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
621 return (dsl_dataset_hold_obj_flags(dp
, dsobj
, 0, tag
, dsp
));
625 dsl_dataset_hold_flags(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
626 void *tag
, dsl_dataset_t
**dsp
)
629 const char *snapname
;
634 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, &snapname
);
638 ASSERT(dsl_pool_config_held(dp
));
639 obj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
641 err
= dsl_dataset_hold_obj_flags(dp
, obj
, flags
, tag
, &ds
);
643 err
= SET_ERROR(ENOENT
);
645 /* we may be looking for a snapshot */
646 if (err
== 0 && snapname
!= NULL
) {
647 dsl_dataset_t
*snap_ds
;
649 if (*snapname
++ != '@') {
650 dsl_dataset_rele_flags(ds
, flags
, tag
);
651 dsl_dir_rele(dd
, FTAG
);
652 return (SET_ERROR(ENOENT
));
655 dprintf("looking for snapshot '%s'\n", snapname
);
656 err
= dsl_dataset_snap_lookup(ds
, snapname
, &obj
);
658 err
= dsl_dataset_hold_obj_flags(dp
, obj
, flags
, tag
,
661 dsl_dataset_rele_flags(ds
, flags
, tag
);
664 mutex_enter(&snap_ds
->ds_lock
);
665 if (snap_ds
->ds_snapname
[0] == 0)
666 (void) strlcpy(snap_ds
->ds_snapname
, snapname
,
667 sizeof (snap_ds
->ds_snapname
));
668 mutex_exit(&snap_ds
->ds_lock
);
674 dsl_dir_rele(dd
, FTAG
);
679 dsl_dataset_hold(dsl_pool_t
*dp
, const char *name
, void *tag
,
682 return (dsl_dataset_hold_flags(dp
, name
, 0, tag
, dsp
));
686 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
, ds_hold_flags_t flags
,
687 void *tag
, dsl_dataset_t
**dsp
)
689 int err
= dsl_dataset_hold_obj_flags(dp
, dsobj
, flags
, tag
, dsp
);
692 if (!dsl_dataset_tryown(*dsp
, tag
)) {
693 dsl_dataset_rele_flags(*dsp
, flags
, tag
);
695 return (SET_ERROR(EBUSY
));
701 dsl_dataset_own(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
702 void *tag
, dsl_dataset_t
**dsp
)
704 int err
= dsl_dataset_hold_flags(dp
, name
, flags
, tag
, dsp
);
707 if (!dsl_dataset_tryown(*dsp
, tag
)) {
708 dsl_dataset_rele_flags(*dsp
, flags
, tag
);
709 return (SET_ERROR(EBUSY
));
715 * See the comment above dsl_pool_hold() for details. In summary, a long
716 * hold is used to prevent destruction of a dataset while the pool hold
717 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
719 * The dataset and pool must be held when this function is called. After it
720 * is called, the pool hold may be released while the dataset is still held
724 dsl_dataset_long_hold(dsl_dataset_t
*ds
, void *tag
)
726 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
727 (void) refcount_add(&ds
->ds_longholds
, tag
);
731 dsl_dataset_long_rele(dsl_dataset_t
*ds
, void *tag
)
733 (void) refcount_remove(&ds
->ds_longholds
, tag
);
736 /* Return B_TRUE if there are any long holds on this dataset. */
738 dsl_dataset_long_held(dsl_dataset_t
*ds
)
740 return (!refcount_is_zero(&ds
->ds_longholds
));
744 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
747 (void) strcpy(name
, "mos");
749 dsl_dir_name(ds
->ds_dir
, name
);
750 VERIFY0(dsl_dataset_get_snapname(ds
));
751 if (ds
->ds_snapname
[0]) {
752 VERIFY3U(strlcat(name
, "@", ZFS_MAX_DATASET_NAME_LEN
),
753 <, ZFS_MAX_DATASET_NAME_LEN
);
755 * We use a "recursive" mutex so that we
756 * can call dprintf_ds() with ds_lock held.
758 if (!MUTEX_HELD(&ds
->ds_lock
)) {
759 mutex_enter(&ds
->ds_lock
);
760 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
761 ZFS_MAX_DATASET_NAME_LEN
), <,
762 ZFS_MAX_DATASET_NAME_LEN
);
763 mutex_exit(&ds
->ds_lock
);
765 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
766 ZFS_MAX_DATASET_NAME_LEN
), <,
767 ZFS_MAX_DATASET_NAME_LEN
);
774 dsl_dataset_namelen(dsl_dataset_t
*ds
)
776 VERIFY0(dsl_dataset_get_snapname(ds
));
777 mutex_enter(&ds
->ds_lock
);
778 int len
= strlen(ds
->ds_snapname
);
779 /* add '@' if ds is a snap */
782 len
+= dsl_dir_namelen(ds
->ds_dir
);
783 mutex_exit(&ds
->ds_lock
);
788 dsl_dataset_rele_flags(dsl_dataset_t
*ds
, ds_hold_flags_t flags
, void *tag
)
790 if (ds
->ds_dir
!= NULL
&& ds
->ds_dir
->dd_crypto_obj
!= 0 &&
791 (flags
& DS_HOLD_FLAG_DECRYPT
)) {
792 (void) spa_keystore_remove_mapping(ds
->ds_dir
->dd_pool
->dp_spa
,
796 dmu_buf_rele(ds
->ds_dbuf
, tag
);
800 dsl_dataset_rele(dsl_dataset_t
*ds
, void *tag
)
802 dsl_dataset_rele_flags(ds
, 0, tag
);
806 dsl_dataset_disown(dsl_dataset_t
*ds
, ds_hold_flags_t flags
, void *tag
)
808 ASSERT3P(ds
->ds_owner
, ==, tag
);
809 ASSERT(ds
->ds_dbuf
!= NULL
);
811 mutex_enter(&ds
->ds_lock
);
813 mutex_exit(&ds
->ds_lock
);
814 dsl_dataset_long_rele(ds
, tag
);
815 dsl_dataset_rele_flags(ds
, flags
, tag
);
819 dsl_dataset_tryown(dsl_dataset_t
*ds
, void *tag
)
821 boolean_t gotit
= FALSE
;
823 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
824 mutex_enter(&ds
->ds_lock
);
825 if (ds
->ds_owner
== NULL
&& !DS_IS_INCONSISTENT(ds
)) {
827 dsl_dataset_long_hold(ds
, tag
);
830 mutex_exit(&ds
->ds_lock
);
835 dsl_dataset_has_owner(dsl_dataset_t
*ds
)
838 mutex_enter(&ds
->ds_lock
);
839 rv
= (ds
->ds_owner
!= NULL
);
840 mutex_exit(&ds
->ds_lock
);
845 dsl_dataset_activate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
847 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
848 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
851 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
853 spa_feature_incr(spa
, f
, tx
);
854 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
856 VERIFY0(zap_add(mos
, dsobj
, spa_feature_table
[f
].fi_guid
,
857 sizeof (zero
), 1, &zero
, tx
));
861 dsl_dataset_deactivate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
863 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
864 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
866 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
868 VERIFY0(zap_remove(mos
, dsobj
, spa_feature_table
[f
].fi_guid
, tx
));
869 spa_feature_decr(spa
, f
, tx
);
873 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
874 dsl_crypto_params_t
*dcp
, uint64_t flags
, dmu_tx_t
*tx
)
876 dsl_pool_t
*dp
= dd
->dd_pool
;
878 dsl_dataset_phys_t
*dsphys
;
880 objset_t
*mos
= dp
->dp_meta_objset
;
883 origin
= dp
->dp_origin_snap
;
885 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
886 ASSERT(origin
== NULL
|| dsl_dataset_phys(origin
)->ds_num_children
> 0);
887 ASSERT(dmu_tx_is_syncing(tx
));
888 ASSERT(dsl_dir_phys(dd
)->dd_head_dataset_obj
== 0);
890 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
891 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
892 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
893 dmu_buf_will_dirty(dbuf
, tx
);
894 dsphys
= dbuf
->db_data
;
895 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
896 dsphys
->ds_dir_obj
= dd
->dd_object
;
897 dsphys
->ds_flags
= flags
;
898 dsphys
->ds_fsid_guid
= unique_create();
899 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
900 sizeof (dsphys
->ds_guid
));
901 dsphys
->ds_snapnames_zapobj
=
902 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
904 dsphys
->ds_creation_time
= gethrestime_sec();
905 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
907 if (origin
== NULL
) {
908 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
910 dsl_dataset_t
*ohds
; /* head of the origin snapshot */
912 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
913 dsphys
->ds_prev_snap_txg
=
914 dsl_dataset_phys(origin
)->ds_creation_txg
;
915 dsphys
->ds_referenced_bytes
=
916 dsl_dataset_phys(origin
)->ds_referenced_bytes
;
917 dsphys
->ds_compressed_bytes
=
918 dsl_dataset_phys(origin
)->ds_compressed_bytes
;
919 dsphys
->ds_uncompressed_bytes
=
920 dsl_dataset_phys(origin
)->ds_uncompressed_bytes
;
921 rrw_enter(&origin
->ds_bp_rwlock
, RW_READER
, FTAG
);
922 dsphys
->ds_bp
= dsl_dataset_phys(origin
)->ds_bp
;
923 rrw_exit(&origin
->ds_bp_rwlock
, FTAG
);
926 * Inherit flags that describe the dataset's contents
927 * (INCONSISTENT) or properties (Case Insensitive).
929 dsphys
->ds_flags
|= dsl_dataset_phys(origin
)->ds_flags
&
930 (DS_FLAG_INCONSISTENT
| DS_FLAG_CI_DATASET
);
932 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
933 if (origin
->ds_feature_inuse
[f
])
934 dsl_dataset_activate_feature(dsobj
, f
, tx
);
937 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
938 dsl_dataset_phys(origin
)->ds_num_children
++;
940 VERIFY0(dsl_dataset_hold_obj(dp
,
941 dsl_dir_phys(origin
->ds_dir
)->dd_head_dataset_obj
,
943 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
944 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
945 dsl_dataset_rele(ohds
, FTAG
);
947 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
948 if (dsl_dataset_phys(origin
)->ds_next_clones_obj
== 0) {
949 dsl_dataset_phys(origin
)->ds_next_clones_obj
=
951 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
953 VERIFY0(zap_add_int(mos
,
954 dsl_dataset_phys(origin
)->ds_next_clones_obj
,
958 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
959 dsl_dir_phys(dd
)->dd_origin_obj
= origin
->ds_object
;
960 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
961 if (dsl_dir_phys(origin
->ds_dir
)->dd_clones
== 0) {
962 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
963 dsl_dir_phys(origin
->ds_dir
)->dd_clones
=
965 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
967 VERIFY0(zap_add_int(mos
,
968 dsl_dir_phys(origin
->ds_dir
)->dd_clones
,
973 /* handle encryption */
974 dsl_dataset_create_crypt_sync(dsobj
, dd
, origin
, dcp
, tx
);
976 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
977 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
979 dmu_buf_rele(dbuf
, FTAG
);
981 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
982 dsl_dir_phys(dd
)->dd_head_dataset_obj
= dsobj
;
988 dsl_dataset_zero_zil(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
992 VERIFY0(dmu_objset_from_ds(ds
, &os
));
993 if (bcmp(&os
->os_zil_header
, &zero_zil
, sizeof (zero_zil
)) != 0) {
994 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
997 bzero(&os
->os_zil_header
, sizeof (os
->os_zil_header
));
998 if (os
->os_encrypted
)
999 os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
] = B_TRUE
;
1001 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1002 dsl_dataset_sync(ds
, zio
, tx
);
1003 VERIFY0(zio_wait(zio
));
1005 /* dsl_dataset_sync_done will drop this reference. */
1006 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1007 dsl_dataset_sync_done(ds
, tx
);
1012 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
1013 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
,
1014 dsl_crypto_params_t
*dcp
, dmu_tx_t
*tx
)
1016 dsl_pool_t
*dp
= pdd
->dd_pool
;
1017 uint64_t dsobj
, ddobj
;
1020 ASSERT(dmu_tx_is_syncing(tx
));
1021 ASSERT(lastname
[0] != '@');
1023 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
1024 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
1026 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
, dcp
,
1027 flags
& ~DS_CREATE_FLAG_NODIRTY
, tx
);
1029 dsl_deleg_set_create_perms(dd
, tx
, cr
);
1032 * Since we're creating a new node we know it's a leaf, so we can
1033 * initialize the counts if the limit feature is active.
1035 if (spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_FS_SS_LIMIT
)) {
1037 objset_t
*os
= dd
->dd_pool
->dp_meta_objset
;
1039 dsl_dir_zapify(dd
, tx
);
1040 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_FILESYSTEM_COUNT
,
1041 sizeof (cnt
), 1, &cnt
, tx
));
1042 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_SNAPSHOT_COUNT
,
1043 sizeof (cnt
), 1, &cnt
, tx
));
1046 dsl_dir_rele(dd
, FTAG
);
1049 * If we are creating a clone, make sure we zero out any stale
1050 * data from the origin snapshots zil header.
1052 if (origin
!= NULL
&& !(flags
& DS_CREATE_FLAG_NODIRTY
)) {
1055 VERIFY0(dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
1056 dsl_dataset_zero_zil(ds
, tx
);
1057 dsl_dataset_rele(ds
, FTAG
);
1064 * The unique space in the head dataset can be calculated by subtracting
1065 * the space used in the most recent snapshot, that is still being used
1066 * in this file system, from the space currently in use. To figure out
1067 * the space in the most recent snapshot still in use, we need to take
1068 * the total space used in the snapshot and subtract out the space that
1069 * has been freed up since the snapshot was taken.
1072 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
1075 uint64_t dlused
, dlcomp
, dluncomp
;
1077 ASSERT(!ds
->ds_is_snapshot
);
1079 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0)
1080 mrs_used
= dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
;
1084 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
1086 ASSERT3U(dlused
, <=, mrs_used
);
1087 dsl_dataset_phys(ds
)->ds_unique_bytes
=
1088 dsl_dataset_phys(ds
)->ds_referenced_bytes
- (mrs_used
- dlused
);
1090 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
1091 SPA_VERSION_UNIQUE_ACCURATE
)
1092 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1096 dsl_dataset_remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
,
1099 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1100 ASSERTV(uint64_t count
);
1103 ASSERT(dsl_dataset_phys(ds
)->ds_num_children
>= 2);
1104 err
= zap_remove_int(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1107 * The err should not be ENOENT, but a bug in a previous version
1108 * of the code could cause upgrade_clones_cb() to not set
1109 * ds_next_snap_obj when it should, leading to a missing entry.
1110 * If we knew that the pool was created after
1111 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1112 * ENOENT. However, at least we can check that we don't have
1113 * too many entries in the next_clones_obj even after failing to
1118 ASSERT0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1120 ASSERT3U(count
, <=, dsl_dataset_phys(ds
)->ds_num_children
- 2);
1125 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
1127 return (&dsl_dataset_phys(ds
)->ds_bp
);
1131 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
1133 return (ds
->ds_dir
->dd_pool
->dp_spa
);
1137 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1141 if (ds
== NULL
) /* this is the meta-objset */
1144 ASSERT(ds
->ds_objset
!= NULL
);
1146 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0)
1147 panic("dirtying snapshot!");
1149 /* Must not dirty a dataset in the same txg where it got snapshotted. */
1150 ASSERT3U(tx
->tx_txg
, >, dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
1152 dp
= ds
->ds_dir
->dd_pool
;
1153 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
)) {
1154 /* up the hold count until we can be written out */
1155 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1160 dsl_dataset_is_dirty(dsl_dataset_t
*ds
)
1162 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1163 if (txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1171 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1175 if (!dmu_tx_is_syncing(tx
))
1179 * If there's an fs-only reservation, any blocks that might become
1180 * owned by the snapshot dataset must be accommodated by space
1181 * outside of the reservation.
1183 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1184 asize
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
, ds
->ds_reserved
);
1185 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1186 return (SET_ERROR(ENOSPC
));
1189 * Propagate any reserved space for this snapshot to other
1190 * snapshot checks in this sync group.
1193 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1199 dsl_dataset_snapshot_check_impl(dsl_dataset_t
*ds
, const char *snapname
,
1200 dmu_tx_t
*tx
, boolean_t recv
, uint64_t cnt
, cred_t
*cr
)
1205 ds
->ds_trysnap_txg
= tx
->tx_txg
;
1207 if (!dmu_tx_is_syncing(tx
))
1211 * We don't allow multiple snapshots of the same txg. If there
1212 * is already one, try again.
1214 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
)
1215 return (SET_ERROR(EAGAIN
));
1218 * Check for conflicting snapshot name.
1220 error
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
1222 return (SET_ERROR(EEXIST
));
1223 if (error
!= ENOENT
)
1227 * We don't allow taking snapshots of inconsistent datasets, such as
1228 * those into which we are currently receiving. However, if we are
1229 * creating this snapshot as part of a receive, this check will be
1230 * executed atomically with respect to the completion of the receive
1231 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1232 * case we ignore this, knowing it will be fixed up for us shortly in
1233 * dmu_recv_end_sync().
1235 if (!recv
&& DS_IS_INCONSISTENT(ds
))
1236 return (SET_ERROR(EBUSY
));
1239 * Skip the check for temporary snapshots or if we have already checked
1240 * the counts in dsl_dataset_snapshot_check. This means we really only
1241 * check the count here when we're receiving a stream.
1243 if (cnt
!= 0 && cr
!= NULL
) {
1244 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1245 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, cr
);
1250 error
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
1258 dsl_dataset_snapshot_check(void *arg
, dmu_tx_t
*tx
)
1260 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1261 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1266 * Pre-compute how many total new snapshots will be created for each
1267 * level in the tree and below. This is needed for validating the
1268 * snapshot limit when either taking a recursive snapshot or when
1269 * taking multiple snapshots.
1271 * The problem is that the counts are not actually adjusted when
1272 * we are checking, only when we finally sync. For a single snapshot,
1273 * this is easy, the count will increase by 1 at each node up the tree,
1274 * but its more complicated for the recursive/multiple snapshot case.
1276 * The dsl_fs_ss_limit_check function does recursively check the count
1277 * at each level up the tree but since it is validating each snapshot
1278 * independently we need to be sure that we are validating the complete
1279 * count for the entire set of snapshots. We do this by rolling up the
1280 * counts for each component of the name into an nvlist and then
1281 * checking each of those cases with the aggregated count.
1283 * This approach properly handles not only the recursive snapshot
1284 * case (where we get all of those on the ddsa_snaps list) but also
1285 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1286 * validate the limit on 'a' using a count of 2).
1288 * We validate the snapshot names in the third loop and only report
1291 if (dmu_tx_is_syncing(tx
)) {
1293 nvlist_t
*cnt_track
= NULL
;
1294 cnt_track
= fnvlist_alloc();
1296 nm
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1298 /* Rollup aggregated counts into the cnt_track list */
1299 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1301 pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1305 (void) strlcpy(nm
, nvpair_name(pair
), MAXPATHLEN
);
1306 pdelim
= strchr(nm
, '@');
1312 if (nvlist_lookup_uint64(cnt_track
, nm
,
1314 /* update existing entry */
1315 fnvlist_add_uint64(cnt_track
, nm
,
1319 fnvlist_add_uint64(cnt_track
, nm
, 1);
1322 pdelim
= strrchr(nm
, '/');
1325 } while (pdelim
!= NULL
);
1328 kmem_free(nm
, MAXPATHLEN
);
1330 /* Check aggregated counts at each level */
1331 for (pair
= nvlist_next_nvpair(cnt_track
, NULL
);
1332 pair
!= NULL
; pair
= nvlist_next_nvpair(cnt_track
, pair
)) {
1338 name
= nvpair_name(pair
);
1339 cnt
= fnvpair_value_uint64(pair
);
1342 error
= dsl_dataset_hold(dp
, name
, FTAG
, &ds
);
1344 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1345 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
1347 dsl_dataset_rele(ds
, FTAG
);
1351 if (ddsa
->ddsa_errors
!= NULL
)
1352 fnvlist_add_int32(ddsa
->ddsa_errors
,
1355 /* only report one error for this check */
1359 nvlist_free(cnt_track
);
1362 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1363 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1366 char *name
, *atp
= NULL
;
1367 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1369 name
= nvpair_name(pair
);
1370 if (strlen(name
) >= ZFS_MAX_DATASET_NAME_LEN
)
1371 error
= SET_ERROR(ENAMETOOLONG
);
1373 atp
= strchr(name
, '@');
1375 error
= SET_ERROR(EINVAL
);
1377 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1380 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
1382 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1383 error
= dsl_dataset_snapshot_check_impl(ds
,
1384 atp
+ 1, tx
, B_FALSE
, 0, NULL
);
1385 dsl_dataset_rele(ds
, FTAG
);
1389 if (ddsa
->ddsa_errors
!= NULL
) {
1390 fnvlist_add_int32(ddsa
->ddsa_errors
,
1401 dsl_dataset_snapshot_sync_impl(dsl_dataset_t
*ds
, const char *snapname
,
1404 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1406 dsl_dataset_phys_t
*dsphys
;
1407 uint64_t dsobj
, crtxg
;
1408 objset_t
*mos
= dp
->dp_meta_objset
;
1409 ASSERTV(static zil_header_t zero_zil
);
1410 ASSERTV(objset_t
*os
);
1412 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
1415 * If we are on an old pool, the zil must not be active, in which
1416 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1418 ASSERT(spa_version(dmu_tx_pool(tx
)->dp_spa
) >= SPA_VERSION_FAST_SNAP
||
1419 dmu_objset_from_ds(ds
, &os
) != 0 ||
1420 bcmp(&os
->os_phys
->os_zil_header
, &zero_zil
,
1421 sizeof (zero_zil
)) == 0);
1423 /* Should not snapshot a dirty dataset. */
1424 ASSERT(!txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1427 dsl_fs_ss_count_adjust(ds
->ds_dir
, 1, DD_FIELD_SNAPSHOT_COUNT
, tx
);
1430 * The origin's ds_creation_txg has to be < TXG_INITIAL
1432 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
1437 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
1438 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
1439 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
1440 dmu_buf_will_dirty(dbuf
, tx
);
1441 dsphys
= dbuf
->db_data
;
1442 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
1443 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
1444 dsphys
->ds_fsid_guid
= unique_create();
1445 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
1446 sizeof (dsphys
->ds_guid
));
1447 dsphys
->ds_prev_snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1448 dsphys
->ds_prev_snap_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1449 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
1450 dsphys
->ds_num_children
= 1;
1451 dsphys
->ds_creation_time
= gethrestime_sec();
1452 dsphys
->ds_creation_txg
= crtxg
;
1453 dsphys
->ds_deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
1454 dsphys
->ds_referenced_bytes
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1455 dsphys
->ds_compressed_bytes
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1456 dsphys
->ds_uncompressed_bytes
=
1457 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1458 dsphys
->ds_flags
= dsl_dataset_phys(ds
)->ds_flags
;
1459 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1460 dsphys
->ds_bp
= dsl_dataset_phys(ds
)->ds_bp
;
1461 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1462 dmu_buf_rele(dbuf
, FTAG
);
1464 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1465 if (ds
->ds_feature_inuse
[f
])
1466 dsl_dataset_activate_feature(dsobj
, f
, tx
);
1469 ASSERT3U(ds
->ds_prev
!= 0, ==,
1470 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
1472 uint64_t next_clones_obj
=
1473 dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
;
1474 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1476 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 1);
1477 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1479 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1480 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
1481 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_txg
);
1482 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
= dsobj
;
1483 } else if (next_clones_obj
!= 0) {
1484 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
1485 dsphys
->ds_next_snap_obj
, tx
);
1486 VERIFY0(zap_add_int(mos
,
1487 next_clones_obj
, dsobj
, tx
));
1492 * If we have a reference-reservation on this dataset, we will
1493 * need to increase the amount of refreservation being charged
1494 * since our unique space is going to zero.
1496 if (ds
->ds_reserved
) {
1498 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
1499 delta
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
,
1501 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
1505 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1506 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
1507 dsl_deadlist_clone(&ds
->ds_deadlist
, UINT64_MAX
,
1508 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
1509 dsl_deadlist_close(&ds
->ds_deadlist
);
1510 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
1511 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
1512 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1513 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
1515 if (dsl_dataset_remap_deadlist_exists(ds
)) {
1516 uint64_t remap_deadlist_obj
=
1517 dsl_dataset_get_remap_deadlist_object(ds
);
1519 * Move the remap_deadlist to the snapshot. The head
1520 * will create a new remap deadlist on demand, from
1521 * dsl_dataset_block_remapped().
1523 dsl_dataset_unset_remap_deadlist_object(ds
, tx
);
1524 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
1526 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
1527 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_REMAP_DEADLIST
,
1528 sizeof (remap_deadlist_obj
), 1, &remap_deadlist_obj
, tx
));
1531 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, <, tx
->tx_txg
);
1532 dsl_dataset_phys(ds
)->ds_prev_snap_obj
= dsobj
;
1533 dsl_dataset_phys(ds
)->ds_prev_snap_txg
= crtxg
;
1534 dsl_dataset_phys(ds
)->ds_unique_bytes
= 0;
1536 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
1537 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1539 VERIFY0(zap_add(mos
, dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1540 snapname
, 8, 1, &dsobj
, tx
));
1543 dsl_dataset_rele(ds
->ds_prev
, ds
);
1544 VERIFY0(dsl_dataset_hold_obj(dp
,
1545 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
1547 dsl_scan_ds_snapshotted(ds
, tx
);
1549 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
1551 spa_history_log_internal_ds(ds
->ds_prev
, "snapshot", tx
, "");
1555 dsl_dataset_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
1557 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1558 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1561 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1562 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1565 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1567 name
= nvpair_name(pair
);
1568 atp
= strchr(name
, '@');
1569 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1570 VERIFY0(dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
));
1572 dsl_dataset_snapshot_sync_impl(ds
, atp
+ 1, tx
);
1573 if (ddsa
->ddsa_props
!= NULL
) {
1574 dsl_props_set_sync_impl(ds
->ds_prev
,
1575 ZPROP_SRC_LOCAL
, ddsa
->ddsa_props
, tx
);
1577 zvol_create_minors(dp
->dp_spa
, nvpair_name(pair
), B_TRUE
);
1578 dsl_dataset_rele(ds
, FTAG
);
1583 * The snapshots must all be in the same pool.
1584 * All-or-nothing: if there are any failures, nothing will be modified.
1587 dsl_dataset_snapshot(nvlist_t
*snaps
, nvlist_t
*props
, nvlist_t
*errors
)
1589 dsl_dataset_snapshot_arg_t ddsa
;
1591 boolean_t needsuspend
;
1595 nvlist_t
*suspended
= NULL
;
1597 pair
= nvlist_next_nvpair(snaps
, NULL
);
1600 firstname
= nvpair_name(pair
);
1602 error
= spa_open(firstname
, &spa
, FTAG
);
1605 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1606 spa_close(spa
, FTAG
);
1609 suspended
= fnvlist_alloc();
1610 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
1611 pair
= nvlist_next_nvpair(snaps
, pair
)) {
1612 char fsname
[ZFS_MAX_DATASET_NAME_LEN
];
1613 char *snapname
= nvpair_name(pair
);
1617 atp
= strchr(snapname
, '@');
1619 error
= SET_ERROR(EINVAL
);
1622 (void) strlcpy(fsname
, snapname
, atp
- snapname
+ 1);
1624 error
= zil_suspend(fsname
, &cookie
);
1627 fnvlist_add_uint64(suspended
, fsname
,
1632 ddsa
.ddsa_snaps
= snaps
;
1633 ddsa
.ddsa_props
= props
;
1634 ddsa
.ddsa_errors
= errors
;
1635 ddsa
.ddsa_cr
= CRED();
1638 error
= dsl_sync_task(firstname
, dsl_dataset_snapshot_check
,
1639 dsl_dataset_snapshot_sync
, &ddsa
,
1640 fnvlist_num_pairs(snaps
) * 3, ZFS_SPACE_CHECK_NORMAL
);
1643 if (suspended
!= NULL
) {
1644 for (pair
= nvlist_next_nvpair(suspended
, NULL
); pair
!= NULL
;
1645 pair
= nvlist_next_nvpair(suspended
, pair
)) {
1646 zil_resume((void *)(uintptr_t)
1647 fnvpair_value_uint64(pair
));
1649 fnvlist_free(suspended
);
1655 typedef struct dsl_dataset_snapshot_tmp_arg
{
1656 const char *ddsta_fsname
;
1657 const char *ddsta_snapname
;
1658 minor_t ddsta_cleanup_minor
;
1659 const char *ddsta_htag
;
1660 } dsl_dataset_snapshot_tmp_arg_t
;
1663 dsl_dataset_snapshot_tmp_check(void *arg
, dmu_tx_t
*tx
)
1665 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1666 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1670 error
= dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
);
1674 /* NULL cred means no limit check for tmp snapshot */
1675 error
= dsl_dataset_snapshot_check_impl(ds
, ddsta
->ddsta_snapname
,
1676 tx
, B_FALSE
, 0, NULL
);
1678 dsl_dataset_rele(ds
, FTAG
);
1682 if (spa_version(dp
->dp_spa
) < SPA_VERSION_USERREFS
) {
1683 dsl_dataset_rele(ds
, FTAG
);
1684 return (SET_ERROR(ENOTSUP
));
1686 error
= dsl_dataset_user_hold_check_one(NULL
, ddsta
->ddsta_htag
,
1689 dsl_dataset_rele(ds
, FTAG
);
1693 dsl_dataset_rele(ds
, FTAG
);
1698 dsl_dataset_snapshot_tmp_sync(void *arg
, dmu_tx_t
*tx
)
1700 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1701 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1702 dsl_dataset_t
*ds
= NULL
;
1704 VERIFY0(dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
));
1706 dsl_dataset_snapshot_sync_impl(ds
, ddsta
->ddsta_snapname
, tx
);
1707 dsl_dataset_user_hold_sync_one(ds
->ds_prev
, ddsta
->ddsta_htag
,
1708 ddsta
->ddsta_cleanup_minor
, gethrestime_sec(), tx
);
1709 dsl_destroy_snapshot_sync_impl(ds
->ds_prev
, B_TRUE
, tx
);
1711 dsl_dataset_rele(ds
, FTAG
);
1715 dsl_dataset_snapshot_tmp(const char *fsname
, const char *snapname
,
1716 minor_t cleanup_minor
, const char *htag
)
1718 dsl_dataset_snapshot_tmp_arg_t ddsta
;
1721 boolean_t needsuspend
;
1724 ddsta
.ddsta_fsname
= fsname
;
1725 ddsta
.ddsta_snapname
= snapname
;
1726 ddsta
.ddsta_cleanup_minor
= cleanup_minor
;
1727 ddsta
.ddsta_htag
= htag
;
1729 error
= spa_open(fsname
, &spa
, FTAG
);
1732 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1733 spa_close(spa
, FTAG
);
1736 error
= zil_suspend(fsname
, &cookie
);
1741 error
= dsl_sync_task(fsname
, dsl_dataset_snapshot_tmp_check
,
1742 dsl_dataset_snapshot_tmp_sync
, &ddsta
, 3, ZFS_SPACE_CHECK_RESERVED
);
1750 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*zio
, dmu_tx_t
*tx
)
1752 ASSERT(dmu_tx_is_syncing(tx
));
1753 ASSERT(ds
->ds_objset
!= NULL
);
1754 ASSERT(dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0);
1757 * in case we had to change ds_fsid_guid when we opened it,
1760 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1761 dsl_dataset_phys(ds
)->ds_fsid_guid
= ds
->ds_fsid_guid
;
1763 if (ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] != 0) {
1764 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1765 ds
->ds_object
, DS_FIELD_RESUME_OBJECT
, 8, 1,
1766 &ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
], tx
));
1767 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1768 ds
->ds_object
, DS_FIELD_RESUME_OFFSET
, 8, 1,
1769 &ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
], tx
));
1770 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1771 ds
->ds_object
, DS_FIELD_RESUME_BYTES
, 8, 1,
1772 &ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
], tx
));
1773 ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
] = 0;
1774 ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
] = 0;
1775 ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] = 0;
1778 dmu_objset_sync(ds
->ds_objset
, zio
, tx
);
1780 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1781 if (ds
->ds_feature_activation_needed
[f
]) {
1782 if (ds
->ds_feature_inuse
[f
])
1784 dsl_dataset_activate_feature(ds
->ds_object
, f
, tx
);
1785 ds
->ds_feature_inuse
[f
] = B_TRUE
;
1791 deadlist_enqueue_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1793 dsl_deadlist_t
*dl
= arg
;
1794 dsl_deadlist_insert(dl
, bp
, tx
);
1799 dsl_dataset_sync_done(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1801 objset_t
*os
= ds
->ds_objset
;
1803 bplist_iterate(&ds
->ds_pending_deadlist
,
1804 deadlist_enqueue_cb
, &ds
->ds_deadlist
, tx
);
1806 if (os
->os_synced_dnodes
!= NULL
) {
1807 multilist_destroy(os
->os_synced_dnodes
);
1808 os
->os_synced_dnodes
= NULL
;
1811 ASSERT(!dmu_objset_is_dirty(os
, dmu_tx_get_txg(tx
)));
1813 dmu_buf_rele(ds
->ds_dbuf
, ds
);
1817 get_clones_stat_impl(dsl_dataset_t
*ds
, nvlist_t
*val
)
1820 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1824 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1827 * There may be missing entries in ds_next_clones_obj
1828 * due to a bug in a previous version of the code.
1829 * Only trust it if it has the right number of entries.
1831 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
1832 VERIFY0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1835 if (count
!= dsl_dataset_phys(ds
)->ds_num_children
- 1) {
1838 for (zap_cursor_init(&zc
, mos
,
1839 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
1840 zap_cursor_retrieve(&zc
, &za
) == 0;
1841 zap_cursor_advance(&zc
)) {
1842 dsl_dataset_t
*clone
;
1843 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
1844 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
1845 za
.za_first_integer
, FTAG
, &clone
));
1846 dsl_dir_name(clone
->ds_dir
, buf
);
1847 fnvlist_add_boolean(val
, buf
);
1848 dsl_dataset_rele(clone
, FTAG
);
1850 zap_cursor_fini(&zc
);
1855 get_clones_stat(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1857 nvlist_t
*propval
= fnvlist_alloc();
1861 * We use nvlist_alloc() instead of fnvlist_alloc() because the
1862 * latter would allocate the list with NV_UNIQUE_NAME flag.
1863 * As a result, every time a clone name is appended to the list
1864 * it would be (linearly) searched for for a duplicate name.
1865 * We already know that all clone names must be unique and we
1866 * want avoid the quadratic complexity of double-checking that
1867 * because we can have a large number of clones.
1869 VERIFY0(nvlist_alloc(&val
, 0, KM_SLEEP
));
1871 if (get_clones_stat_impl(ds
, val
) == 0) {
1872 fnvlist_add_nvlist(propval
, ZPROP_VALUE
, val
);
1873 fnvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_CLONES
),
1878 nvlist_free(propval
);
1882 * Returns a string that represents the receive resume stats token. It should
1883 * be freed with strfree().
1886 get_receive_resume_stats_impl(dsl_dataset_t
*ds
)
1888 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1890 if (dsl_dataset_has_resume_receive_state(ds
)) {
1893 uint8_t *compressed
;
1895 nvlist_t
*token_nv
= fnvlist_alloc();
1896 size_t packed_size
, compressed_size
;
1898 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1899 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
) == 0) {
1900 fnvlist_add_uint64(token_nv
, "fromguid", val
);
1902 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1903 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
) == 0) {
1904 fnvlist_add_uint64(token_nv
, "object", val
);
1906 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1907 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
) == 0) {
1908 fnvlist_add_uint64(token_nv
, "offset", val
);
1910 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1911 DS_FIELD_RESUME_BYTES
, sizeof (val
), 1, &val
) == 0) {
1912 fnvlist_add_uint64(token_nv
, "bytes", val
);
1914 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1915 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
) == 0) {
1916 fnvlist_add_uint64(token_nv
, "toguid", val
);
1918 char buf
[MAXNAMELEN
];
1919 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1920 DS_FIELD_RESUME_TONAME
, 1, sizeof (buf
), buf
) == 0) {
1921 fnvlist_add_string(token_nv
, "toname", buf
);
1923 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1924 DS_FIELD_RESUME_LARGEBLOCK
) == 0) {
1925 fnvlist_add_boolean(token_nv
, "largeblockok");
1927 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1928 DS_FIELD_RESUME_EMBEDOK
) == 0) {
1929 fnvlist_add_boolean(token_nv
, "embedok");
1931 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1932 DS_FIELD_RESUME_COMPRESSOK
) == 0) {
1933 fnvlist_add_boolean(token_nv
, "compressok");
1935 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1936 DS_FIELD_RESUME_RAWOK
) == 0) {
1937 fnvlist_add_boolean(token_nv
, "rawok");
1939 packed
= fnvlist_pack(token_nv
, &packed_size
);
1940 fnvlist_free(token_nv
);
1941 compressed
= kmem_alloc(packed_size
, KM_SLEEP
);
1943 compressed_size
= gzip_compress(packed
, compressed
,
1944 packed_size
, packed_size
, 6);
1947 fletcher_4_native_varsize(compressed
, compressed_size
, &cksum
);
1949 str
= kmem_alloc(compressed_size
* 2 + 1, KM_SLEEP
);
1950 for (int i
= 0; i
< compressed_size
; i
++) {
1951 (void) sprintf(str
+ i
* 2, "%02x", compressed
[i
]);
1953 str
[compressed_size
* 2] = '\0';
1954 char *propval
= kmem_asprintf("%u-%llx-%llx-%s",
1955 ZFS_SEND_RESUME_TOKEN_VERSION
,
1956 (longlong_t
)cksum
.zc_word
[0],
1957 (longlong_t
)packed_size
, str
);
1958 kmem_free(packed
, packed_size
);
1959 kmem_free(str
, compressed_size
* 2 + 1);
1960 kmem_free(compressed
, packed_size
);
1963 return (strdup(""));
1967 * Returns a string that represents the receive resume stats token of the
1968 * dataset's child. It should be freed with strfree().
1971 get_child_receive_stats(dsl_dataset_t
*ds
)
1973 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1974 dsl_dataset_t
*recv_ds
;
1975 dsl_dataset_name(ds
, recvname
);
1976 if (strlcat(recvname
, "/", sizeof (recvname
)) <
1977 sizeof (recvname
) &&
1978 strlcat(recvname
, recv_clone_name
, sizeof (recvname
)) <
1979 sizeof (recvname
) &&
1980 dsl_dataset_hold(ds
->ds_dir
->dd_pool
, recvname
, FTAG
,
1982 char *propval
= get_receive_resume_stats_impl(recv_ds
);
1983 dsl_dataset_rele(recv_ds
, FTAG
);
1986 return (strdup(""));
1990 get_receive_resume_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1992 char *propval
= get_receive_resume_stats_impl(ds
);
1993 if (strcmp(propval
, "") != 0) {
1994 dsl_prop_nvlist_add_string(nv
,
1995 ZFS_PROP_RECEIVE_RESUME_TOKEN
, propval
);
1997 char *childval
= get_child_receive_stats(ds
);
1998 if (strcmp(childval
, "") != 0) {
1999 dsl_prop_nvlist_add_string(nv
,
2000 ZFS_PROP_RECEIVE_RESUME_TOKEN
, childval
);
2008 dsl_get_refratio(dsl_dataset_t
*ds
)
2010 uint64_t ratio
= dsl_dataset_phys(ds
)->ds_compressed_bytes
== 0 ? 100 :
2011 (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
* 100 /
2012 dsl_dataset_phys(ds
)->ds_compressed_bytes
);
2017 dsl_get_logicalreferenced(dsl_dataset_t
*ds
)
2019 return (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
);
2023 dsl_get_compressratio(dsl_dataset_t
*ds
)
2025 if (ds
->ds_is_snapshot
) {
2026 return (dsl_get_refratio(ds
));
2028 dsl_dir_t
*dd
= ds
->ds_dir
;
2029 mutex_enter(&dd
->dd_lock
);
2030 uint64_t val
= dsl_dir_get_compressratio(dd
);
2031 mutex_exit(&dd
->dd_lock
);
2037 dsl_get_used(dsl_dataset_t
*ds
)
2039 if (ds
->ds_is_snapshot
) {
2040 return (dsl_dataset_phys(ds
)->ds_unique_bytes
);
2042 dsl_dir_t
*dd
= ds
->ds_dir
;
2043 mutex_enter(&dd
->dd_lock
);
2044 uint64_t val
= dsl_dir_get_used(dd
);
2045 mutex_exit(&dd
->dd_lock
);
2051 dsl_get_creation(dsl_dataset_t
*ds
)
2053 return (dsl_dataset_phys(ds
)->ds_creation_time
);
2057 dsl_get_creationtxg(dsl_dataset_t
*ds
)
2059 return (dsl_dataset_phys(ds
)->ds_creation_txg
);
2063 dsl_get_refquota(dsl_dataset_t
*ds
)
2065 return (ds
->ds_quota
);
2069 dsl_get_refreservation(dsl_dataset_t
*ds
)
2071 return (ds
->ds_reserved
);
2075 dsl_get_guid(dsl_dataset_t
*ds
)
2077 return (dsl_dataset_phys(ds
)->ds_guid
);
2081 dsl_get_unique(dsl_dataset_t
*ds
)
2083 return (dsl_dataset_phys(ds
)->ds_unique_bytes
);
2087 dsl_get_objsetid(dsl_dataset_t
*ds
)
2089 return (ds
->ds_object
);
2093 dsl_get_userrefs(dsl_dataset_t
*ds
)
2095 return (ds
->ds_userrefs
);
2099 dsl_get_defer_destroy(dsl_dataset_t
*ds
)
2101 return (DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
2105 dsl_get_referenced(dsl_dataset_t
*ds
)
2107 return (dsl_dataset_phys(ds
)->ds_referenced_bytes
);
2111 dsl_get_numclones(dsl_dataset_t
*ds
)
2113 ASSERT(ds
->ds_is_snapshot
);
2114 return (dsl_dataset_phys(ds
)->ds_num_children
- 1);
2118 dsl_get_inconsistent(dsl_dataset_t
*ds
)
2120 return ((dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_INCONSISTENT
) ?
2125 dsl_get_available(dsl_dataset_t
*ds
)
2127 uint64_t refdbytes
= dsl_get_referenced(ds
);
2128 uint64_t availbytes
= dsl_dir_space_available(ds
->ds_dir
,
2130 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
2132 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
2134 if (ds
->ds_quota
!= 0) {
2136 * Adjust available bytes according to refquota
2138 if (refdbytes
< ds
->ds_quota
) {
2139 availbytes
= MIN(availbytes
,
2140 ds
->ds_quota
- refdbytes
);
2145 return (availbytes
);
2149 dsl_get_written(dsl_dataset_t
*ds
, uint64_t *written
)
2151 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2152 dsl_dataset_t
*prev
;
2153 int err
= dsl_dataset_hold_obj(dp
,
2154 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2156 uint64_t comp
, uncomp
;
2157 err
= dsl_dataset_space_written(prev
, ds
, written
,
2159 dsl_dataset_rele(prev
, FTAG
);
2165 * 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
2168 dsl_get_prev_snap(dsl_dataset_t
*ds
, char *snap
)
2170 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2171 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
!= dp
->dp_origin_snap
) {
2172 dsl_dataset_name(ds
->ds_prev
, snap
);
2180 * Returns the mountpoint property and source for the given dataset in the value
2181 * and source buffers. The value buffer must be at least as large as MAXPATHLEN
2182 * and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
2183 * Returns 0 on success and an error on failure.
2186 dsl_get_mountpoint(dsl_dataset_t
*ds
, const char *dsname
, char *value
,
2190 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2192 /* Retrieve the mountpoint value stored in the zap opbject */
2193 error
= dsl_prop_get_ds(ds
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
), 1,
2194 ZAP_MAXVALUELEN
, value
, source
);
2200 * Process the dsname and source to find the full mountpoint string.
2201 * Can be skipped for 'legacy' or 'none'.
2203 if (value
[0] == '/') {
2204 char *buf
= kmem_alloc(ZAP_MAXVALUELEN
, KM_SLEEP
);
2206 const char *relpath
;
2209 * If we inherit the mountpoint, even from a dataset
2210 * with a received value, the source will be the path of
2211 * the dataset we inherit from. If source is
2212 * ZPROP_SOURCE_VAL_RECVD, the received value is not
2215 if (strcmp(source
, ZPROP_SOURCE_VAL_RECVD
) == 0) {
2218 ASSERT0(strncmp(dsname
, source
, strlen(source
)));
2219 relpath
= dsname
+ strlen(source
);
2220 if (relpath
[0] == '/')
2224 spa_altroot(dp
->dp_spa
, root
, ZAP_MAXVALUELEN
);
2227 * Special case an alternate root of '/'. This will
2228 * avoid having multiple leading slashes in the
2231 if (strcmp(root
, "/") == 0)
2235 * If the mountpoint is '/' then skip over this
2236 * if we are obtaining either an alternate root or
2237 * an inherited mountpoint.
2240 if (value
[1] == '\0' && (root
[0] != '\0' ||
2241 relpath
[0] != '\0'))
2244 if (relpath
[0] == '\0') {
2245 (void) snprintf(value
, ZAP_MAXVALUELEN
, "%s%s",
2248 (void) snprintf(value
, ZAP_MAXVALUELEN
, "%s%s%s%s",
2249 root
, mnt
, relpath
[0] == '@' ? "" : "/",
2252 kmem_free(buf
, ZAP_MAXVALUELEN
);
2259 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2261 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2263 ASSERT(dsl_pool_config_held(dp
));
2265 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
,
2266 dsl_get_refratio(ds
));
2267 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_LOGICALREFERENCED
,
2268 dsl_get_logicalreferenced(ds
));
2269 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
,
2270 dsl_get_compressratio(ds
));
2271 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
2274 if (ds
->ds_is_snapshot
) {
2275 get_clones_stat(ds
, nv
);
2277 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
2278 if (dsl_get_prev_snap(ds
, buf
) == 0)
2279 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_PREV_SNAP
,
2281 dsl_dir_stats(ds
->ds_dir
, nv
);
2284 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
,
2285 dsl_get_available(ds
));
2286 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
,
2287 dsl_get_referenced(ds
));
2288 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
2289 dsl_get_creation(ds
));
2290 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
2291 dsl_get_creationtxg(ds
));
2292 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
2293 dsl_get_refquota(ds
));
2294 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
2295 dsl_get_refreservation(ds
));
2296 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
2298 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
2299 dsl_get_unique(ds
));
2300 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
2301 dsl_get_objsetid(ds
));
2302 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
2303 dsl_get_userrefs(ds
));
2304 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
2305 dsl_get_defer_destroy(ds
));
2306 dsl_dataset_crypt_stats(ds
, nv
);
2308 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
2310 if (dsl_get_written(ds
, &written
) == 0) {
2311 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_WRITTEN
,
2316 if (!dsl_dataset_is_snapshot(ds
)) {
2318 * A failed "newfs" (e.g. full) resumable receive leaves
2319 * the stats set on this dataset. Check here for the prop.
2321 get_receive_resume_stats(ds
, nv
);
2324 * A failed incremental resumable receive leaves the
2325 * stats set on our child named "%recv". Check the child
2328 /* 6 extra bytes for /%recv */
2329 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
2330 dsl_dataset_t
*recv_ds
;
2331 dsl_dataset_name(ds
, recvname
);
2332 if (strlcat(recvname
, "/", sizeof (recvname
)) <
2333 sizeof (recvname
) &&
2334 strlcat(recvname
, recv_clone_name
, sizeof (recvname
)) <
2335 sizeof (recvname
) &&
2336 dsl_dataset_hold(dp
, recvname
, FTAG
, &recv_ds
) == 0) {
2337 get_receive_resume_stats(recv_ds
, nv
);
2338 dsl_dataset_rele(recv_ds
, FTAG
);
2344 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
2346 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
2347 ASSERT(dsl_pool_config_held(dp
));
2349 stat
->dds_creation_txg
= dsl_get_creationtxg(ds
);
2350 stat
->dds_inconsistent
= dsl_get_inconsistent(ds
);
2351 stat
->dds_guid
= dsl_get_guid(ds
);
2352 stat
->dds_origin
[0] = '\0';
2353 if (ds
->ds_is_snapshot
) {
2354 stat
->dds_is_snapshot
= B_TRUE
;
2355 stat
->dds_num_clones
= dsl_get_numclones(ds
);
2357 stat
->dds_is_snapshot
= B_FALSE
;
2358 stat
->dds_num_clones
= 0;
2360 if (dsl_dir_is_clone(ds
->ds_dir
)) {
2361 dsl_dir_get_origin(ds
->ds_dir
, stat
->dds_origin
);
2367 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
2369 return (ds
->ds_fsid_guid
);
2373 dsl_dataset_space(dsl_dataset_t
*ds
,
2374 uint64_t *refdbytesp
, uint64_t *availbytesp
,
2375 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2377 *refdbytesp
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
2378 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
2379 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
)
2381 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
2382 if (ds
->ds_quota
!= 0) {
2384 * Adjust available bytes according to refquota
2386 if (*refdbytesp
< ds
->ds_quota
)
2387 *availbytesp
= MIN(*availbytesp
,
2388 ds
->ds_quota
- *refdbytesp
);
2392 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2393 *usedobjsp
= BP_GET_FILL(&dsl_dataset_phys(ds
)->ds_bp
);
2394 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2395 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
2399 dsl_dataset_modified_since_snap(dsl_dataset_t
*ds
, dsl_dataset_t
*snap
)
2401 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
2404 ASSERT(dsl_pool_config_held(dp
));
2407 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2408 birth
= dsl_dataset_get_blkptr(ds
)->blk_birth
;
2409 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2410 if (birth
> dsl_dataset_phys(snap
)->ds_creation_txg
) {
2411 objset_t
*os
, *os_snap
;
2413 * It may be that only the ZIL differs, because it was
2414 * reset in the head. Don't count that as being
2417 if (dmu_objset_from_ds(ds
, &os
) != 0)
2419 if (dmu_objset_from_ds(snap
, &os_snap
) != 0)
2421 return (bcmp(&os
->os_phys
->os_meta_dnode
,
2422 &os_snap
->os_phys
->os_meta_dnode
,
2423 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
2428 typedef struct dsl_dataset_rename_snapshot_arg
{
2429 const char *ddrsa_fsname
;
2430 const char *ddrsa_oldsnapname
;
2431 const char *ddrsa_newsnapname
;
2432 boolean_t ddrsa_recursive
;
2434 } dsl_dataset_rename_snapshot_arg_t
;
2438 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t
*dp
,
2439 dsl_dataset_t
*hds
, void *arg
)
2441 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2445 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
2447 /* ignore nonexistent snapshots */
2448 return (error
== ENOENT
? 0 : error
);
2451 /* new name should not exist */
2452 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_newsnapname
, &val
);
2454 error
= SET_ERROR(EEXIST
);
2455 else if (error
== ENOENT
)
2458 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2459 if (dsl_dir_namelen(hds
->ds_dir
) + 1 +
2460 strlen(ddrsa
->ddrsa_newsnapname
) >= ZFS_MAX_DATASET_NAME_LEN
)
2461 error
= SET_ERROR(ENAMETOOLONG
);
2467 dsl_dataset_rename_snapshot_check(void *arg
, dmu_tx_t
*tx
)
2469 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2470 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2474 error
= dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
);
2478 if (ddrsa
->ddrsa_recursive
) {
2479 error
= dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
2480 dsl_dataset_rename_snapshot_check_impl
, ddrsa
,
2483 error
= dsl_dataset_rename_snapshot_check_impl(dp
, hds
, ddrsa
);
2485 dsl_dataset_rele(hds
, FTAG
);
2490 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t
*dp
,
2491 dsl_dataset_t
*hds
, void *arg
)
2493 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2496 dmu_tx_t
*tx
= ddrsa
->ddrsa_tx
;
2499 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
2500 ASSERT(error
== 0 || error
== ENOENT
);
2501 if (error
== ENOENT
) {
2502 /* ignore nonexistent snapshots */
2506 VERIFY0(dsl_dataset_hold_obj(dp
, val
, FTAG
, &ds
));
2508 /* log before we change the name */
2509 spa_history_log_internal_ds(ds
, "rename", tx
,
2510 "-> @%s", ddrsa
->ddrsa_newsnapname
);
2512 VERIFY0(dsl_dataset_snap_remove(hds
, ddrsa
->ddrsa_oldsnapname
, tx
,
2514 mutex_enter(&ds
->ds_lock
);
2515 (void) strlcpy(ds
->ds_snapname
, ddrsa
->ddrsa_newsnapname
,
2516 sizeof (ds
->ds_snapname
));
2517 mutex_exit(&ds
->ds_lock
);
2518 VERIFY0(zap_add(dp
->dp_meta_objset
,
2519 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
,
2520 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
));
2521 zvol_rename_minors(dp
->dp_spa
, ddrsa
->ddrsa_oldsnapname
,
2522 ddrsa
->ddrsa_newsnapname
, B_TRUE
);
2524 dsl_dataset_rele(ds
, FTAG
);
2529 dsl_dataset_rename_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
2531 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2532 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2533 dsl_dataset_t
*hds
= NULL
;
2535 VERIFY0(dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
));
2536 ddrsa
->ddrsa_tx
= tx
;
2537 if (ddrsa
->ddrsa_recursive
) {
2538 VERIFY0(dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
2539 dsl_dataset_rename_snapshot_sync_impl
, ddrsa
,
2542 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp
, hds
, ddrsa
));
2544 dsl_dataset_rele(hds
, FTAG
);
2548 dsl_dataset_rename_snapshot(const char *fsname
,
2549 const char *oldsnapname
, const char *newsnapname
, boolean_t recursive
)
2551 dsl_dataset_rename_snapshot_arg_t ddrsa
;
2553 ddrsa
.ddrsa_fsname
= fsname
;
2554 ddrsa
.ddrsa_oldsnapname
= oldsnapname
;
2555 ddrsa
.ddrsa_newsnapname
= newsnapname
;
2556 ddrsa
.ddrsa_recursive
= recursive
;
2558 return (dsl_sync_task(fsname
, dsl_dataset_rename_snapshot_check
,
2559 dsl_dataset_rename_snapshot_sync
, &ddrsa
,
2560 1, ZFS_SPACE_CHECK_RESERVED
));
2564 * If we're doing an ownership handoff, we need to make sure that there is
2565 * only one long hold on the dataset. We're not allowed to change anything here
2566 * so we don't permanently release the long hold or regular hold here. We want
2567 * to do this only when syncing to avoid the dataset unexpectedly going away
2568 * when we release the long hold.
2571 dsl_dataset_handoff_check(dsl_dataset_t
*ds
, void *owner
, dmu_tx_t
*tx
)
2575 if (!dmu_tx_is_syncing(tx
))
2578 if (owner
!= NULL
) {
2579 VERIFY3P(ds
->ds_owner
, ==, owner
);
2580 dsl_dataset_long_rele(ds
, owner
);
2583 held
= dsl_dataset_long_held(ds
);
2586 dsl_dataset_long_hold(ds
, owner
);
2589 return (SET_ERROR(EBUSY
));
2595 dsl_dataset_rollback_check(void *arg
, dmu_tx_t
*tx
)
2597 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2598 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2600 int64_t unused_refres_delta
;
2603 error
= dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
);
2607 /* must not be a snapshot */
2608 if (ds
->ds_is_snapshot
) {
2609 dsl_dataset_rele(ds
, FTAG
);
2610 return (SET_ERROR(EINVAL
));
2613 /* must have a most recent snapshot */
2614 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
< TXG_INITIAL
) {
2615 dsl_dataset_rele(ds
, FTAG
);
2616 return (SET_ERROR(ESRCH
));
2620 * No rollback to a snapshot created in the current txg, because
2621 * the rollback may dirty the dataset and create blocks that are
2622 * not reachable from the rootbp while having a birth txg that
2623 * falls into the snapshot's range.
2625 if (dmu_tx_is_syncing(tx
) &&
2626 dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
) {
2627 dsl_dataset_rele(ds
, FTAG
);
2628 return (SET_ERROR(EAGAIN
));
2632 * If the expected target snapshot is specified, then check that
2633 * the latest snapshot is it.
2635 if (ddra
->ddra_tosnap
!= NULL
) {
2636 dsl_dataset_t
*snapds
;
2638 /* Check if the target snapshot exists at all. */
2639 error
= dsl_dataset_hold(dp
, ddra
->ddra_tosnap
, FTAG
, &snapds
);
2642 * ESRCH is used to signal that the target snapshot does
2643 * not exist, while ENOENT is used to report that
2644 * the rolled back dataset does not exist.
2645 * ESRCH is also used to cover other cases where the
2646 * target snapshot is not related to the dataset being
2647 * rolled back such as being in a different pool.
2649 if (error
== ENOENT
|| error
== EXDEV
)
2650 error
= SET_ERROR(ESRCH
);
2651 dsl_dataset_rele(ds
, FTAG
);
2654 ASSERT(snapds
->ds_is_snapshot
);
2656 /* Check if the snapshot is the latest snapshot indeed. */
2657 if (snapds
!= ds
->ds_prev
) {
2659 * Distinguish between the case where the only problem
2660 * is intervening snapshots (EEXIST) vs the snapshot
2661 * not being a valid target for rollback (ESRCH).
2663 if (snapds
->ds_dir
== ds
->ds_dir
||
2664 (dsl_dir_is_clone(ds
->ds_dir
) &&
2665 dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
==
2666 snapds
->ds_object
)) {
2667 error
= SET_ERROR(EEXIST
);
2669 error
= SET_ERROR(ESRCH
);
2671 dsl_dataset_rele(snapds
, FTAG
);
2672 dsl_dataset_rele(ds
, FTAG
);
2675 dsl_dataset_rele(snapds
, FTAG
);
2678 /* must not have any bookmarks after the most recent snapshot */
2679 nvlist_t
*proprequest
= fnvlist_alloc();
2680 fnvlist_add_boolean(proprequest
, zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2681 nvlist_t
*bookmarks
= fnvlist_alloc();
2682 error
= dsl_get_bookmarks_impl(ds
, proprequest
, bookmarks
);
2683 fnvlist_free(proprequest
);
2685 dsl_dataset_rele(ds
, FTAG
);
2688 for (nvpair_t
*pair
= nvlist_next_nvpair(bookmarks
, NULL
);
2689 pair
!= NULL
; pair
= nvlist_next_nvpair(bookmarks
, pair
)) {
2691 fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair
),
2692 zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2693 uint64_t createtxg
= fnvlist_lookup_uint64(valuenv
, "value");
2694 if (createtxg
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
2695 fnvlist_free(bookmarks
);
2696 dsl_dataset_rele(ds
, FTAG
);
2697 return (SET_ERROR(EEXIST
));
2700 fnvlist_free(bookmarks
);
2702 error
= dsl_dataset_handoff_check(ds
, ddra
->ddra_owner
, tx
);
2704 dsl_dataset_rele(ds
, FTAG
);
2709 * Check if the snap we are rolling back to uses more than
2712 if (ds
->ds_quota
!= 0 &&
2713 dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
> ds
->ds_quota
) {
2714 dsl_dataset_rele(ds
, FTAG
);
2715 return (SET_ERROR(EDQUOT
));
2719 * When we do the clone swap, we will temporarily use more space
2720 * due to the refreservation (the head will no longer have any
2721 * unique space, so the entire amount of the refreservation will need
2722 * to be free). We will immediately destroy the clone, freeing
2723 * this space, but the freeing happens over many txg's.
2725 unused_refres_delta
= (int64_t)MIN(ds
->ds_reserved
,
2726 dsl_dataset_phys(ds
)->ds_unique_bytes
);
2728 if (unused_refres_delta
> 0 &&
2729 unused_refres_delta
>
2730 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
)) {
2731 dsl_dataset_rele(ds
, FTAG
);
2732 return (SET_ERROR(ENOSPC
));
2735 dsl_dataset_rele(ds
, FTAG
);
2740 dsl_dataset_rollback_sync(void *arg
, dmu_tx_t
*tx
)
2742 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2743 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2744 dsl_dataset_t
*ds
, *clone
;
2746 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
2748 VERIFY0(dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
));
2750 dsl_dataset_name(ds
->ds_prev
, namebuf
);
2751 fnvlist_add_string(ddra
->ddra_result
, "target", namebuf
);
2753 cloneobj
= dsl_dataset_create_sync(ds
->ds_dir
, "%rollback",
2754 ds
->ds_prev
, DS_CREATE_FLAG_NODIRTY
, kcred
, NULL
, tx
);
2756 VERIFY0(dsl_dataset_hold_obj(dp
, cloneobj
, FTAG
, &clone
));
2758 dsl_dataset_clone_swap_sync_impl(clone
, ds
, tx
);
2759 dsl_dataset_zero_zil(ds
, tx
);
2761 dsl_destroy_head_sync_impl(clone
, tx
);
2763 dsl_dataset_rele(clone
, FTAG
);
2764 dsl_dataset_rele(ds
, FTAG
);
2768 * Rolls back the given filesystem or volume to the most recent snapshot.
2769 * The name of the most recent snapshot will be returned under key "target"
2770 * in the result nvlist.
2773 * - The existing dataset MUST be owned by the specified owner at entry
2774 * - Upon return, dataset will still be held by the same owner, whether we
2777 * This mode is required any time the existing filesystem is mounted. See
2778 * notes above zfs_suspend_fs() for further details.
2781 dsl_dataset_rollback(const char *fsname
, const char *tosnap
, void *owner
,
2784 dsl_dataset_rollback_arg_t ddra
;
2786 ddra
.ddra_fsname
= fsname
;
2787 ddra
.ddra_tosnap
= tosnap
;
2788 ddra
.ddra_owner
= owner
;
2789 ddra
.ddra_result
= result
;
2791 return (dsl_sync_task(fsname
, dsl_dataset_rollback_check
,
2792 dsl_dataset_rollback_sync
, &ddra
,
2793 1, ZFS_SPACE_CHECK_RESERVED
));
2796 struct promotenode
{
2801 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
2802 static int promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
,
2804 static void promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
);
2807 dsl_dataset_promote_check(void *arg
, dmu_tx_t
*tx
)
2809 dsl_dataset_promote_arg_t
*ddpa
= arg
;
2810 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2812 struct promotenode
*snap
;
2813 dsl_dataset_t
*origin_ds
;
2817 size_t max_snap_len
;
2818 boolean_t conflicting_snaps
;
2820 err
= promote_hold(ddpa
, dp
, FTAG
);
2824 hds
= ddpa
->ddpa_clone
;
2825 max_snap_len
= MAXNAMELEN
- strlen(ddpa
->ddpa_clonename
) - 1;
2827 if (dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
) {
2828 promote_rele(ddpa
, FTAG
);
2829 return (SET_ERROR(EXDEV
));
2832 snap
= list_head(&ddpa
->shared_snaps
);
2834 err
= SET_ERROR(ENOENT
);
2837 origin_ds
= snap
->ds
;
2840 * Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
2841 * When doing a promote we must make sure the encryption root for
2842 * both the target and the target's origin does not change to avoid
2843 * needing to rewrap encryption keys
2845 err
= dsl_dataset_promote_crypt_check(hds
->ds_dir
, origin_ds
->ds_dir
);
2850 * Compute and check the amount of space to transfer. Since this is
2851 * so expensive, don't do the preliminary check.
2853 if (!dmu_tx_is_syncing(tx
)) {
2854 promote_rele(ddpa
, FTAG
);
2858 /* compute origin's new unique space */
2859 snap
= list_tail(&ddpa
->clone_snaps
);
2860 ASSERT(snap
!= NULL
);
2861 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
2862 origin_ds
->ds_object
);
2863 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2864 dsl_dataset_phys(origin_ds
)->ds_prev_snap_txg
, UINT64_MAX
,
2865 &ddpa
->unique
, &unused
, &unused
);
2868 * Walk the snapshots that we are moving
2870 * Compute space to transfer. Consider the incremental changes
2871 * to used by each snapshot:
2872 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2873 * So each snapshot gave birth to:
2874 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2875 * So a sequence would look like:
2876 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2877 * Which simplifies to:
2878 * uN + kN + kN-1 + ... + k1 + k0
2879 * Note however, if we stop before we reach the ORIGIN we get:
2880 * uN + kN + kN-1 + ... + kM - uM-1
2882 conflicting_snaps
= B_FALSE
;
2884 ddpa
->used
= dsl_dataset_phys(origin_ds
)->ds_referenced_bytes
;
2885 ddpa
->comp
= dsl_dataset_phys(origin_ds
)->ds_compressed_bytes
;
2886 ddpa
->uncomp
= dsl_dataset_phys(origin_ds
)->ds_uncompressed_bytes
;
2887 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
2888 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
2889 uint64_t val
, dlused
, dlcomp
, dluncomp
;
2890 dsl_dataset_t
*ds
= snap
->ds
;
2895 * If there are long holds, we won't be able to evict
2898 if (dsl_dataset_long_held(ds
)) {
2899 err
= SET_ERROR(EBUSY
);
2903 /* Check that the snapshot name does not conflict */
2904 VERIFY0(dsl_dataset_get_snapname(ds
));
2905 if (strlen(ds
->ds_snapname
) >= max_snap_len
) {
2906 err
= SET_ERROR(ENAMETOOLONG
);
2909 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
2911 fnvlist_add_boolean(ddpa
->err_ds
,
2912 snap
->ds
->ds_snapname
);
2913 conflicting_snaps
= B_TRUE
;
2914 } else if (err
!= ENOENT
) {
2918 /* The very first snapshot does not have a deadlist */
2919 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
== 0)
2922 dsl_deadlist_space(&ds
->ds_deadlist
,
2923 &dlused
, &dlcomp
, &dluncomp
);
2924 ddpa
->used
+= dlused
;
2925 ddpa
->comp
+= dlcomp
;
2926 ddpa
->uncomp
+= dluncomp
;
2930 * In order to return the full list of conflicting snapshots, we check
2931 * whether there was a conflict after traversing all of them.
2933 if (conflicting_snaps
) {
2934 err
= SET_ERROR(EEXIST
);
2939 * If we are a clone of a clone then we never reached ORIGIN,
2940 * so we need to subtract out the clone origin's used space.
2942 if (ddpa
->origin_origin
) {
2944 dsl_dataset_phys(ddpa
->origin_origin
)->ds_referenced_bytes
;
2946 dsl_dataset_phys(ddpa
->origin_origin
)->ds_compressed_bytes
;
2948 dsl_dataset_phys(ddpa
->origin_origin
)->
2949 ds_uncompressed_bytes
;
2952 /* Check that there is enough space and limit headroom here */
2953 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
2954 0, ss_mv_cnt
, ddpa
->used
, ddpa
->cr
);
2959 * Compute the amounts of space that will be used by snapshots
2960 * after the promotion (for both origin and clone). For each,
2961 * it is the amount of space that will be on all of their
2962 * deadlists (that was not born before their new origin).
2964 if (dsl_dir_phys(hds
->ds_dir
)->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2968 * Note, typically this will not be a clone of a clone,
2969 * so dd_origin_txg will be < TXG_INITIAL, so
2970 * these snaplist_space() -> dsl_deadlist_space_range()
2971 * calls will be fast because they do not have to
2972 * iterate over all bps.
2974 snap
= list_head(&ddpa
->origin_snaps
);
2976 err
= SET_ERROR(ENOENT
);
2979 err
= snaplist_space(&ddpa
->shared_snaps
,
2980 snap
->ds
->ds_dir
->dd_origin_txg
, &ddpa
->cloneusedsnap
);
2984 err
= snaplist_space(&ddpa
->clone_snaps
,
2985 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
2988 ddpa
->cloneusedsnap
+= space
;
2990 if (dsl_dir_phys(origin_ds
->ds_dir
)->dd_flags
&
2991 DD_FLAG_USED_BREAKDOWN
) {
2992 err
= snaplist_space(&ddpa
->origin_snaps
,
2993 dsl_dataset_phys(origin_ds
)->ds_creation_txg
,
2994 &ddpa
->originusedsnap
);
3000 promote_rele(ddpa
, FTAG
);
3005 dsl_dataset_promote_sync(void *arg
, dmu_tx_t
*tx
)
3007 dsl_dataset_promote_arg_t
*ddpa
= arg
;
3008 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3010 struct promotenode
*snap
;
3011 dsl_dataset_t
*origin_ds
;
3012 dsl_dataset_t
*origin_head
;
3014 dsl_dir_t
*odd
= NULL
;
3015 uint64_t oldnext_obj
;
3018 VERIFY0(promote_hold(ddpa
, dp
, FTAG
));
3019 hds
= ddpa
->ddpa_clone
;
3021 ASSERT0(dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
);
3023 snap
= list_head(&ddpa
->shared_snaps
);
3024 origin_ds
= snap
->ds
;
3027 snap
= list_head(&ddpa
->origin_snaps
);
3028 origin_head
= snap
->ds
;
3031 * We need to explicitly open odd, since origin_ds's dd will be
3034 VERIFY0(dsl_dir_hold_obj(dp
, origin_ds
->ds_dir
->dd_object
,
3037 dsl_dataset_promote_crypt_sync(hds
->ds_dir
, odd
, tx
);
3039 /* change origin's next snap */
3040 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
3041 oldnext_obj
= dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
;
3042 snap
= list_tail(&ddpa
->clone_snaps
);
3043 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
3044 origin_ds
->ds_object
);
3045 dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
= snap
->ds
->ds_object
;
3047 /* change the origin's next clone */
3048 if (dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
) {
3049 dsl_dataset_remove_from_next_clones(origin_ds
,
3050 snap
->ds
->ds_object
, tx
);
3051 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3052 dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
,
3057 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
3058 ASSERT3U(dsl_dir_phys(dd
)->dd_origin_obj
, ==, origin_ds
->ds_object
);
3059 dsl_dir_phys(dd
)->dd_origin_obj
= dsl_dir_phys(odd
)->dd_origin_obj
;
3060 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
3061 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
3062 dsl_dir_phys(odd
)->dd_origin_obj
= origin_ds
->ds_object
;
3063 origin_head
->ds_dir
->dd_origin_txg
=
3064 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
3066 /* change dd_clone entries */
3067 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
3068 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3069 dsl_dir_phys(odd
)->dd_clones
, hds
->ds_object
, tx
));
3070 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3071 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
3072 hds
->ds_object
, tx
));
3074 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3075 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
3076 origin_head
->ds_object
, tx
));
3077 if (dsl_dir_phys(dd
)->dd_clones
== 0) {
3078 dsl_dir_phys(dd
)->dd_clones
=
3079 zap_create(dp
->dp_meta_objset
, DMU_OT_DSL_CLONES
,
3080 DMU_OT_NONE
, 0, tx
);
3082 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3083 dsl_dir_phys(dd
)->dd_clones
, origin_head
->ds_object
, tx
));
3086 /* move snapshots to this dir */
3087 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
3088 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
3089 dsl_dataset_t
*ds
= snap
->ds
;
3092 * Property callbacks are registered to a particular
3093 * dsl_dir. Since ours is changing, evict the objset
3094 * so that they will be unregistered from the old dsl_dir.
3096 if (ds
->ds_objset
) {
3097 dmu_objset_evict(ds
->ds_objset
);
3098 ds
->ds_objset
= NULL
;
3101 /* move snap name entry */
3102 VERIFY0(dsl_dataset_get_snapname(ds
));
3103 VERIFY0(dsl_dataset_snap_remove(origin_head
,
3104 ds
->ds_snapname
, tx
, B_TRUE
));
3105 VERIFY0(zap_add(dp
->dp_meta_objset
,
3106 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
, ds
->ds_snapname
,
3107 8, 1, &ds
->ds_object
, tx
));
3108 dsl_fs_ss_count_adjust(hds
->ds_dir
, 1,
3109 DD_FIELD_SNAPSHOT_COUNT
, tx
);
3111 /* change containing dsl_dir */
3112 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3113 ASSERT3U(dsl_dataset_phys(ds
)->ds_dir_obj
, ==, odd
->dd_object
);
3114 dsl_dataset_phys(ds
)->ds_dir_obj
= dd
->dd_object
;
3115 ASSERT3P(ds
->ds_dir
, ==, odd
);
3116 dsl_dir_rele(ds
->ds_dir
, ds
);
3117 VERIFY0(dsl_dir_hold_obj(dp
, dd
->dd_object
,
3118 NULL
, ds
, &ds
->ds_dir
));
3120 /* move any clone references */
3121 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
&&
3122 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
3126 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
3127 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
3128 zap_cursor_retrieve(&zc
, &za
) == 0;
3129 zap_cursor_advance(&zc
)) {
3130 dsl_dataset_t
*cnds
;
3133 if (za
.za_first_integer
== oldnext_obj
) {
3135 * We've already moved the
3136 * origin's reference.
3141 VERIFY0(dsl_dataset_hold_obj(dp
,
3142 za
.za_first_integer
, FTAG
, &cnds
));
3143 o
= dsl_dir_phys(cnds
->ds_dir
)->
3144 dd_head_dataset_obj
;
3146 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3147 dsl_dir_phys(odd
)->dd_clones
, o
, tx
));
3148 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3149 dsl_dir_phys(dd
)->dd_clones
, o
, tx
));
3150 dsl_dataset_rele(cnds
, FTAG
);
3152 zap_cursor_fini(&zc
);
3155 ASSERT(!dsl_prop_hascb(ds
));
3159 * Change space accounting.
3160 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
3161 * both be valid, or both be 0 (resulting in delta == 0). This
3162 * is true for each of {clone,origin} independently.
3165 delta
= ddpa
->cloneusedsnap
-
3166 dsl_dir_phys(dd
)->dd_used_breakdown
[DD_USED_SNAP
];
3167 ASSERT3S(delta
, >=, 0);
3168 ASSERT3U(ddpa
->used
, >=, delta
);
3169 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
3170 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
3171 ddpa
->used
- delta
, ddpa
->comp
, ddpa
->uncomp
, tx
);
3173 delta
= ddpa
->originusedsnap
-
3174 dsl_dir_phys(odd
)->dd_used_breakdown
[DD_USED_SNAP
];
3175 ASSERT3S(delta
, <=, 0);
3176 ASSERT3U(ddpa
->used
, >=, -delta
);
3177 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
3178 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
3179 -ddpa
->used
- delta
, -ddpa
->comp
, -ddpa
->uncomp
, tx
);
3181 dsl_dataset_phys(origin_ds
)->ds_unique_bytes
= ddpa
->unique
;
3183 /* log history record */
3184 spa_history_log_internal_ds(hds
, "promote", tx
, "");
3186 dsl_dir_rele(odd
, FTAG
);
3187 promote_rele(ddpa
, FTAG
);
3191 * Make a list of dsl_dataset_t's for the snapshots between first_obj
3192 * (exclusive) and last_obj (inclusive). The list will be in reverse
3193 * order (last_obj will be the list_head()). If first_obj == 0, do all
3194 * snapshots back to this dataset's origin.
3197 snaplist_make(dsl_pool_t
*dp
,
3198 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
, void *tag
)
3200 uint64_t obj
= last_obj
;
3202 list_create(l
, sizeof (struct promotenode
),
3203 offsetof(struct promotenode
, link
));
3205 while (obj
!= first_obj
) {
3207 struct promotenode
*snap
;
3210 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
3211 ASSERT(err
!= ENOENT
);
3216 first_obj
= dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
;
3218 snap
= kmem_alloc(sizeof (*snap
), KM_SLEEP
);
3220 list_insert_tail(l
, snap
);
3221 obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
3228 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
3230 struct promotenode
*snap
;
3233 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
3234 uint64_t used
, comp
, uncomp
;
3235 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
3236 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
3243 snaplist_destroy(list_t
*l
, void *tag
)
3245 struct promotenode
*snap
;
3247 if (l
== NULL
|| !list_link_active(&l
->list_head
))
3250 while ((snap
= list_tail(l
)) != NULL
) {
3251 list_remove(l
, snap
);
3252 dsl_dataset_rele(snap
->ds
, tag
);
3253 kmem_free(snap
, sizeof (*snap
));
3259 promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
, void *tag
)
3263 struct promotenode
*snap
;
3265 error
= dsl_dataset_hold(dp
, ddpa
->ddpa_clonename
, tag
,
3269 dd
= ddpa
->ddpa_clone
->ds_dir
;
3271 if (ddpa
->ddpa_clone
->ds_is_snapshot
||
3272 !dsl_dir_is_clone(dd
)) {
3273 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
3274 return (SET_ERROR(EINVAL
));
3277 error
= snaplist_make(dp
, 0, dsl_dir_phys(dd
)->dd_origin_obj
,
3278 &ddpa
->shared_snaps
, tag
);
3282 error
= snaplist_make(dp
, 0, ddpa
->ddpa_clone
->ds_object
,
3283 &ddpa
->clone_snaps
, tag
);
3287 snap
= list_head(&ddpa
->shared_snaps
);
3288 ASSERT3U(snap
->ds
->ds_object
, ==, dsl_dir_phys(dd
)->dd_origin_obj
);
3289 error
= snaplist_make(dp
, dsl_dir_phys(dd
)->dd_origin_obj
,
3290 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_head_dataset_obj
,
3291 &ddpa
->origin_snaps
, tag
);
3295 if (dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
!= 0) {
3296 error
= dsl_dataset_hold_obj(dp
,
3297 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
,
3298 tag
, &ddpa
->origin_origin
);
3304 promote_rele(ddpa
, tag
);
3309 promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
)
3311 snaplist_destroy(&ddpa
->shared_snaps
, tag
);
3312 snaplist_destroy(&ddpa
->clone_snaps
, tag
);
3313 snaplist_destroy(&ddpa
->origin_snaps
, tag
);
3314 if (ddpa
->origin_origin
!= NULL
)
3315 dsl_dataset_rele(ddpa
->origin_origin
, tag
);
3316 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
3322 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
3323 * in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
3326 dsl_dataset_promote(const char *name
, char *conflsnap
)
3328 dsl_dataset_promote_arg_t ddpa
= { 0 };
3331 nvpair_t
*snap_pair
;
3335 * We will modify space proportional to the number of
3336 * snapshots. Compute numsnaps.
3338 error
= dmu_objset_hold(name
, FTAG
, &os
);
3341 error
= zap_count(dmu_objset_pool(os
)->dp_meta_objset
,
3342 dsl_dataset_phys(dmu_objset_ds(os
))->ds_snapnames_zapobj
,
3344 dmu_objset_rele(os
, FTAG
);
3348 ddpa
.ddpa_clonename
= name
;
3349 ddpa
.err_ds
= fnvlist_alloc();
3352 error
= dsl_sync_task(name
, dsl_dataset_promote_check
,
3353 dsl_dataset_promote_sync
, &ddpa
,
3354 2 + numsnaps
, ZFS_SPACE_CHECK_RESERVED
);
3357 * Return the first conflicting snapshot found.
3359 snap_pair
= nvlist_next_nvpair(ddpa
.err_ds
, NULL
);
3360 if (snap_pair
!= NULL
&& conflsnap
!= NULL
)
3361 (void) strcpy(conflsnap
, nvpair_name(snap_pair
));
3363 fnvlist_free(ddpa
.err_ds
);
3368 dsl_dataset_clone_swap_check_impl(dsl_dataset_t
*clone
,
3369 dsl_dataset_t
*origin_head
, boolean_t force
, void *owner
, dmu_tx_t
*tx
)
3372 * "slack" factor for received datasets with refquota set on them.
3373 * See the bottom of this function for details on its use.
3375 uint64_t refquota_slack
= (uint64_t)DMU_MAX_ACCESS
*
3376 spa_asize_inflation
;
3377 int64_t unused_refres_delta
;
3379 /* they should both be heads */
3380 if (clone
->ds_is_snapshot
||
3381 origin_head
->ds_is_snapshot
)
3382 return (SET_ERROR(EINVAL
));
3384 /* if we are not forcing, the branch point should be just before them */
3385 if (!force
&& clone
->ds_prev
!= origin_head
->ds_prev
)
3386 return (SET_ERROR(EINVAL
));
3388 /* clone should be the clone (unless they are unrelated) */
3389 if (clone
->ds_prev
!= NULL
&&
3390 clone
->ds_prev
!= clone
->ds_dir
->dd_pool
->dp_origin_snap
&&
3391 origin_head
->ds_dir
!= clone
->ds_prev
->ds_dir
)
3392 return (SET_ERROR(EINVAL
));
3394 /* the clone should be a child of the origin */
3395 if (clone
->ds_dir
->dd_parent
!= origin_head
->ds_dir
)
3396 return (SET_ERROR(EINVAL
));
3398 /* origin_head shouldn't be modified unless 'force' */
3400 dsl_dataset_modified_since_snap(origin_head
, origin_head
->ds_prev
))
3401 return (SET_ERROR(ETXTBSY
));
3403 /* origin_head should have no long holds (e.g. is not mounted) */
3404 if (dsl_dataset_handoff_check(origin_head
, owner
, tx
))
3405 return (SET_ERROR(EBUSY
));
3407 /* check amount of any unconsumed refreservation */
3408 unused_refres_delta
=
3409 (int64_t)MIN(origin_head
->ds_reserved
,
3410 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
3411 (int64_t)MIN(origin_head
->ds_reserved
,
3412 dsl_dataset_phys(clone
)->ds_unique_bytes
);
3414 if (unused_refres_delta
> 0 &&
3415 unused_refres_delta
>
3416 dsl_dir_space_available(origin_head
->ds_dir
, NULL
, 0, TRUE
))
3417 return (SET_ERROR(ENOSPC
));
3420 * The clone can't be too much over the head's refquota.
3422 * To ensure that the entire refquota can be used, we allow one
3423 * transaction to exceed the the refquota. Therefore, this check
3424 * needs to also allow for the space referenced to be more than the
3425 * refquota. The maximum amount of space that one transaction can use
3426 * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
3427 * overage ensures that we are able to receive a filesystem that
3428 * exceeds the refquota on the source system.
3430 * So that overage is the refquota_slack we use below.
3432 if (origin_head
->ds_quota
!= 0 &&
3433 dsl_dataset_phys(clone
)->ds_referenced_bytes
>
3434 origin_head
->ds_quota
+ refquota_slack
)
3435 return (SET_ERROR(EDQUOT
));
3441 dsl_dataset_swap_remap_deadlists(dsl_dataset_t
*clone
,
3442 dsl_dataset_t
*origin
, dmu_tx_t
*tx
)
3444 uint64_t clone_remap_dl_obj
, origin_remap_dl_obj
;
3445 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3447 ASSERT(dsl_pool_sync_context(dp
));
3449 clone_remap_dl_obj
= dsl_dataset_get_remap_deadlist_object(clone
);
3450 origin_remap_dl_obj
= dsl_dataset_get_remap_deadlist_object(origin
);
3452 if (clone_remap_dl_obj
!= 0) {
3453 dsl_deadlist_close(&clone
->ds_remap_deadlist
);
3454 dsl_dataset_unset_remap_deadlist_object(clone
, tx
);
3456 if (origin_remap_dl_obj
!= 0) {
3457 dsl_deadlist_close(&origin
->ds_remap_deadlist
);
3458 dsl_dataset_unset_remap_deadlist_object(origin
, tx
);
3461 if (clone_remap_dl_obj
!= 0) {
3462 dsl_dataset_set_remap_deadlist_object(origin
,
3463 clone_remap_dl_obj
, tx
);
3464 dsl_deadlist_open(&origin
->ds_remap_deadlist
,
3465 dp
->dp_meta_objset
, clone_remap_dl_obj
);
3467 if (origin_remap_dl_obj
!= 0) {
3468 dsl_dataset_set_remap_deadlist_object(clone
,
3469 origin_remap_dl_obj
, tx
);
3470 dsl_deadlist_open(&clone
->ds_remap_deadlist
,
3471 dp
->dp_meta_objset
, origin_remap_dl_obj
);
3476 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t
*clone
,
3477 dsl_dataset_t
*origin_head
, dmu_tx_t
*tx
)
3479 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3480 int64_t unused_refres_delta
;
3482 ASSERT(clone
->ds_reserved
== 0);
3484 * NOTE: On DEBUG kernels there could be a race between this and
3485 * the check function if spa_asize_inflation is adjusted...
3487 ASSERT(origin_head
->ds_quota
== 0 ||
3488 dsl_dataset_phys(clone
)->ds_unique_bytes
<= origin_head
->ds_quota
+
3489 DMU_MAX_ACCESS
* spa_asize_inflation
);
3490 ASSERT3P(clone
->ds_prev
, ==, origin_head
->ds_prev
);
3493 * Swap per-dataset feature flags.
3495 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
3496 if (!(spa_feature_table
[f
].fi_flags
&
3497 ZFEATURE_FLAG_PER_DATASET
)) {
3498 ASSERT(!clone
->ds_feature_inuse
[f
]);
3499 ASSERT(!origin_head
->ds_feature_inuse
[f
]);
3503 boolean_t clone_inuse
= clone
->ds_feature_inuse
[f
];
3504 boolean_t origin_head_inuse
= origin_head
->ds_feature_inuse
[f
];
3507 dsl_dataset_deactivate_feature(clone
->ds_object
, f
, tx
);
3508 clone
->ds_feature_inuse
[f
] = B_FALSE
;
3510 if (origin_head_inuse
) {
3511 dsl_dataset_deactivate_feature(origin_head
->ds_object
,
3513 origin_head
->ds_feature_inuse
[f
] = B_FALSE
;
3516 dsl_dataset_activate_feature(origin_head
->ds_object
,
3518 origin_head
->ds_feature_inuse
[f
] = B_TRUE
;
3520 if (origin_head_inuse
) {
3521 dsl_dataset_activate_feature(clone
->ds_object
, f
, tx
);
3522 clone
->ds_feature_inuse
[f
] = B_TRUE
;
3526 dmu_buf_will_dirty(clone
->ds_dbuf
, tx
);
3527 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
3529 if (clone
->ds_objset
!= NULL
) {
3530 dmu_objset_evict(clone
->ds_objset
);
3531 clone
->ds_objset
= NULL
;
3534 if (origin_head
->ds_objset
!= NULL
) {
3535 dmu_objset_evict(origin_head
->ds_objset
);
3536 origin_head
->ds_objset
= NULL
;
3539 unused_refres_delta
=
3540 (int64_t)MIN(origin_head
->ds_reserved
,
3541 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
3542 (int64_t)MIN(origin_head
->ds_reserved
,
3543 dsl_dataset_phys(clone
)->ds_unique_bytes
);
3546 * Reset origin's unique bytes, if it exists.
3548 if (clone
->ds_prev
) {
3549 dsl_dataset_t
*origin
= clone
->ds_prev
;
3550 uint64_t comp
, uncomp
;
3552 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
3553 dsl_deadlist_space_range(&clone
->ds_deadlist
,
3554 dsl_dataset_phys(origin
)->ds_prev_snap_txg
, UINT64_MAX
,
3555 &dsl_dataset_phys(origin
)->ds_unique_bytes
, &comp
, &uncomp
);
3560 rrw_enter(&clone
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
3561 rrw_enter(&origin_head
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
3563 tmp
= dsl_dataset_phys(origin_head
)->ds_bp
;
3564 dsl_dataset_phys(origin_head
)->ds_bp
=
3565 dsl_dataset_phys(clone
)->ds_bp
;
3566 dsl_dataset_phys(clone
)->ds_bp
= tmp
;
3567 rrw_exit(&origin_head
->ds_bp_rwlock
, FTAG
);
3568 rrw_exit(&clone
->ds_bp_rwlock
, FTAG
);
3571 /* set dd_*_bytes */
3573 int64_t dused
, dcomp
, duncomp
;
3574 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
3575 uint64_t odl_used
, odl_comp
, odl_uncomp
;
3577 ASSERT3U(dsl_dir_phys(clone
->ds_dir
)->
3578 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
3580 dsl_deadlist_space(&clone
->ds_deadlist
,
3581 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3582 dsl_deadlist_space(&origin_head
->ds_deadlist
,
3583 &odl_used
, &odl_comp
, &odl_uncomp
);
3585 dused
= dsl_dataset_phys(clone
)->ds_referenced_bytes
+
3587 (dsl_dataset_phys(origin_head
)->ds_referenced_bytes
+
3589 dcomp
= dsl_dataset_phys(clone
)->ds_compressed_bytes
+
3591 (dsl_dataset_phys(origin_head
)->ds_compressed_bytes
+
3593 duncomp
= dsl_dataset_phys(clone
)->ds_uncompressed_bytes
+
3595 (dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
+
3598 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_HEAD
,
3599 dused
, dcomp
, duncomp
, tx
);
3600 dsl_dir_diduse_space(clone
->ds_dir
, DD_USED_HEAD
,
3601 -dused
, -dcomp
, -duncomp
, tx
);
3604 * The difference in the space used by snapshots is the
3605 * difference in snapshot space due to the head's
3606 * deadlist (since that's the only thing that's
3607 * changing that affects the snapused).
3609 dsl_deadlist_space_range(&clone
->ds_deadlist
,
3610 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3611 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3612 dsl_deadlist_space_range(&origin_head
->ds_deadlist
,
3613 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3614 &odl_used
, &odl_comp
, &odl_uncomp
);
3615 dsl_dir_transfer_space(origin_head
->ds_dir
, cdl_used
- odl_used
,
3616 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
3619 /* swap ds_*_bytes */
3620 SWITCH64(dsl_dataset_phys(origin_head
)->ds_referenced_bytes
,
3621 dsl_dataset_phys(clone
)->ds_referenced_bytes
);
3622 SWITCH64(dsl_dataset_phys(origin_head
)->ds_compressed_bytes
,
3623 dsl_dataset_phys(clone
)->ds_compressed_bytes
);
3624 SWITCH64(dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
,
3625 dsl_dataset_phys(clone
)->ds_uncompressed_bytes
);
3626 SWITCH64(dsl_dataset_phys(origin_head
)->ds_unique_bytes
,
3627 dsl_dataset_phys(clone
)->ds_unique_bytes
);
3629 /* apply any parent delta for change in unconsumed refreservation */
3630 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_REFRSRV
,
3631 unused_refres_delta
, 0, 0, tx
);
3636 dsl_deadlist_close(&clone
->ds_deadlist
);
3637 dsl_deadlist_close(&origin_head
->ds_deadlist
);
3638 SWITCH64(dsl_dataset_phys(origin_head
)->ds_deadlist_obj
,
3639 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
3640 dsl_deadlist_open(&clone
->ds_deadlist
, dp
->dp_meta_objset
,
3641 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
3642 dsl_deadlist_open(&origin_head
->ds_deadlist
, dp
->dp_meta_objset
,
3643 dsl_dataset_phys(origin_head
)->ds_deadlist_obj
);
3644 dsl_dataset_swap_remap_deadlists(clone
, origin_head
, tx
);
3646 dsl_scan_ds_clone_swapped(origin_head
, clone
, tx
);
3648 spa_history_log_internal_ds(clone
, "clone swap", tx
,
3649 "parent=%s", origin_head
->ds_dir
->dd_myname
);
3653 * Given a pool name and a dataset object number in that pool,
3654 * return the name of that dataset.
3657 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
3663 error
= dsl_pool_hold(pname
, FTAG
, &dp
);
3667 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
3669 dsl_dataset_name(ds
, buf
);
3670 dsl_dataset_rele(ds
, FTAG
);
3672 dsl_pool_rele(dp
, FTAG
);
3678 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
3679 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
3683 ASSERT3S(asize
, >, 0);
3686 * *ref_rsrv is the portion of asize that will come from any
3687 * unconsumed refreservation space.
3691 mutex_enter(&ds
->ds_lock
);
3693 * Make a space adjustment for reserved bytes.
3695 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
3697 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
3699 (ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
3701 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
3704 if (!check_quota
|| ds
->ds_quota
== 0) {
3705 mutex_exit(&ds
->ds_lock
);
3709 * If they are requesting more space, and our current estimate
3710 * is over quota, they get to try again unless the actual
3711 * on-disk is over quota and there are no pending changes (which
3712 * may free up space for us).
3714 if (dsl_dataset_phys(ds
)->ds_referenced_bytes
+ inflight
>=
3717 dsl_dataset_phys(ds
)->ds_referenced_bytes
< ds
->ds_quota
)
3718 error
= SET_ERROR(ERESTART
);
3720 error
= SET_ERROR(EDQUOT
);
3722 mutex_exit(&ds
->ds_lock
);
3727 typedef struct dsl_dataset_set_qr_arg
{
3728 const char *ddsqra_name
;
3729 zprop_source_t ddsqra_source
;
3730 uint64_t ddsqra_value
;
3731 } dsl_dataset_set_qr_arg_t
;
3736 dsl_dataset_set_refquota_check(void *arg
, dmu_tx_t
*tx
)
3738 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3739 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3744 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFQUOTA
)
3745 return (SET_ERROR(ENOTSUP
));
3747 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3751 if (ds
->ds_is_snapshot
) {
3752 dsl_dataset_rele(ds
, FTAG
);
3753 return (SET_ERROR(EINVAL
));
3756 error
= dsl_prop_predict(ds
->ds_dir
,
3757 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3758 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3760 dsl_dataset_rele(ds
, FTAG
);
3765 dsl_dataset_rele(ds
, FTAG
);
3769 if (newval
< dsl_dataset_phys(ds
)->ds_referenced_bytes
||
3770 newval
< ds
->ds_reserved
) {
3771 dsl_dataset_rele(ds
, FTAG
);
3772 return (SET_ERROR(ENOSPC
));
3775 dsl_dataset_rele(ds
, FTAG
);
3780 dsl_dataset_set_refquota_sync(void *arg
, dmu_tx_t
*tx
)
3782 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3783 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3784 dsl_dataset_t
*ds
= NULL
;
3787 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3789 dsl_prop_set_sync_impl(ds
,
3790 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3791 ddsqra
->ddsqra_source
, sizeof (ddsqra
->ddsqra_value
), 1,
3792 &ddsqra
->ddsqra_value
, tx
);
3794 VERIFY0(dsl_prop_get_int_ds(ds
,
3795 zfs_prop_to_name(ZFS_PROP_REFQUOTA
), &newval
));
3797 if (ds
->ds_quota
!= newval
) {
3798 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3799 ds
->ds_quota
= newval
;
3801 dsl_dataset_rele(ds
, FTAG
);
3805 dsl_dataset_set_refquota(const char *dsname
, zprop_source_t source
,
3808 dsl_dataset_set_qr_arg_t ddsqra
;
3810 ddsqra
.ddsqra_name
= dsname
;
3811 ddsqra
.ddsqra_source
= source
;
3812 ddsqra
.ddsqra_value
= refquota
;
3814 return (dsl_sync_task(dsname
, dsl_dataset_set_refquota_check
,
3815 dsl_dataset_set_refquota_sync
, &ddsqra
, 0, ZFS_SPACE_CHECK_NONE
));
3819 dsl_dataset_set_refreservation_check(void *arg
, dmu_tx_t
*tx
)
3821 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3822 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3825 uint64_t newval
, unique
;
3827 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFRESERVATION
)
3828 return (SET_ERROR(ENOTSUP
));
3830 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3834 if (ds
->ds_is_snapshot
) {
3835 dsl_dataset_rele(ds
, FTAG
);
3836 return (SET_ERROR(EINVAL
));
3839 error
= dsl_prop_predict(ds
->ds_dir
,
3840 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3841 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3843 dsl_dataset_rele(ds
, FTAG
);
3848 * If we are doing the preliminary check in open context, the
3849 * space estimates may be inaccurate.
3851 if (!dmu_tx_is_syncing(tx
)) {
3852 dsl_dataset_rele(ds
, FTAG
);
3856 mutex_enter(&ds
->ds_lock
);
3857 if (!DS_UNIQUE_IS_ACCURATE(ds
))
3858 dsl_dataset_recalc_head_uniq(ds
);
3859 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3860 mutex_exit(&ds
->ds_lock
);
3862 if (MAX(unique
, newval
) > MAX(unique
, ds
->ds_reserved
)) {
3863 uint64_t delta
= MAX(unique
, newval
) -
3864 MAX(unique
, ds
->ds_reserved
);
3867 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, B_TRUE
) ||
3868 (ds
->ds_quota
> 0 && newval
> ds
->ds_quota
)) {
3869 dsl_dataset_rele(ds
, FTAG
);
3870 return (SET_ERROR(ENOSPC
));
3874 dsl_dataset_rele(ds
, FTAG
);
3879 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t
*ds
,
3880 zprop_source_t source
, uint64_t value
, dmu_tx_t
*tx
)
3886 dsl_prop_set_sync_impl(ds
, zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3887 source
, sizeof (value
), 1, &value
, tx
);
3889 VERIFY0(dsl_prop_get_int_ds(ds
,
3890 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &newval
));
3892 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3893 mutex_enter(&ds
->ds_dir
->dd_lock
);
3894 mutex_enter(&ds
->ds_lock
);
3895 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
3896 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3897 delta
= MAX(0, (int64_t)(newval
- unique
)) -
3898 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
3899 ds
->ds_reserved
= newval
;
3900 mutex_exit(&ds
->ds_lock
);
3902 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
3903 mutex_exit(&ds
->ds_dir
->dd_lock
);
3907 dsl_dataset_set_refreservation_sync(void *arg
, dmu_tx_t
*tx
)
3909 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3910 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3911 dsl_dataset_t
*ds
= NULL
;
3913 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3914 dsl_dataset_set_refreservation_sync_impl(ds
,
3915 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, tx
);
3916 dsl_dataset_rele(ds
, FTAG
);
3920 dsl_dataset_set_refreservation(const char *dsname
, zprop_source_t source
,
3921 uint64_t refreservation
)
3923 dsl_dataset_set_qr_arg_t ddsqra
;
3925 ddsqra
.ddsqra_name
= dsname
;
3926 ddsqra
.ddsqra_source
= source
;
3927 ddsqra
.ddsqra_value
= refreservation
;
3929 return (dsl_sync_task(dsname
, dsl_dataset_set_refreservation_check
,
3930 dsl_dataset_set_refreservation_sync
, &ddsqra
,
3931 0, ZFS_SPACE_CHECK_NONE
));
3935 * Return (in *usedp) the amount of space written in new that is not
3936 * present in oldsnap. New may be a snapshot or the head. Old must be
3937 * a snapshot before new, in new's filesystem (or its origin). If not then
3938 * fail and return EINVAL.
3940 * The written space is calculated by considering two components: First, we
3941 * ignore any freed space, and calculate the written as new's used space
3942 * minus old's used space. Next, we add in the amount of space that was freed
3943 * between the two snapshots, thus reducing new's used space relative to old's.
3944 * Specifically, this is the space that was born before old->ds_creation_txg,
3945 * and freed before new (ie. on new's deadlist or a previous deadlist).
3947 * space freed [---------------------]
3948 * snapshots ---O-------O--------O-------O------
3952 dsl_dataset_space_written(dsl_dataset_t
*oldsnap
, dsl_dataset_t
*new,
3953 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
3957 dsl_pool_t
*dp
= new->ds_dir
->dd_pool
;
3959 ASSERT(dsl_pool_config_held(dp
));
3962 *usedp
+= dsl_dataset_phys(new)->ds_referenced_bytes
;
3963 *usedp
-= dsl_dataset_phys(oldsnap
)->ds_referenced_bytes
;
3966 *compp
+= dsl_dataset_phys(new)->ds_compressed_bytes
;
3967 *compp
-= dsl_dataset_phys(oldsnap
)->ds_compressed_bytes
;
3970 *uncompp
+= dsl_dataset_phys(new)->ds_uncompressed_bytes
;
3971 *uncompp
-= dsl_dataset_phys(oldsnap
)->ds_uncompressed_bytes
;
3973 snapobj
= new->ds_object
;
3974 while (snapobj
!= oldsnap
->ds_object
) {
3975 dsl_dataset_t
*snap
;
3976 uint64_t used
, comp
, uncomp
;
3978 if (snapobj
== new->ds_object
) {
3981 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snap
);
3986 if (dsl_dataset_phys(snap
)->ds_prev_snap_txg
==
3987 dsl_dataset_phys(oldsnap
)->ds_creation_txg
) {
3989 * The blocks in the deadlist can not be born after
3990 * ds_prev_snap_txg, so get the whole deadlist space,
3991 * which is more efficient (especially for old-format
3992 * deadlists). Unfortunately the deadlist code
3993 * doesn't have enough information to make this
3994 * optimization itself.
3996 dsl_deadlist_space(&snap
->ds_deadlist
,
3997 &used
, &comp
, &uncomp
);
3999 dsl_deadlist_space_range(&snap
->ds_deadlist
,
4000 0, dsl_dataset_phys(oldsnap
)->ds_creation_txg
,
4001 &used
, &comp
, &uncomp
);
4008 * If we get to the beginning of the chain of snapshots
4009 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4010 * was not a snapshot of/before new.
4012 snapobj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
4014 dsl_dataset_rele(snap
, FTAG
);
4016 err
= SET_ERROR(EINVAL
);
4025 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4026 * lastsnap, and all snapshots in between are deleted.
4028 * blocks that would be freed [---------------------------]
4029 * snapshots ---O-------O--------O-------O--------O
4030 * firstsnap lastsnap
4032 * This is the set of blocks that were born after the snap before firstsnap,
4033 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4034 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4035 * We calculate this by iterating over the relevant deadlists (from the snap
4036 * after lastsnap, backward to the snap after firstsnap), summing up the
4037 * space on the deadlist that was born after the snap before firstsnap.
4040 dsl_dataset_space_wouldfree(dsl_dataset_t
*firstsnap
,
4041 dsl_dataset_t
*lastsnap
,
4042 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4046 dsl_pool_t
*dp
= firstsnap
->ds_dir
->dd_pool
;
4048 ASSERT(firstsnap
->ds_is_snapshot
);
4049 ASSERT(lastsnap
->ds_is_snapshot
);
4052 * Check that the snapshots are in the same dsl_dir, and firstsnap
4053 * is before lastsnap.
4055 if (firstsnap
->ds_dir
!= lastsnap
->ds_dir
||
4056 dsl_dataset_phys(firstsnap
)->ds_creation_txg
>
4057 dsl_dataset_phys(lastsnap
)->ds_creation_txg
)
4058 return (SET_ERROR(EINVAL
));
4060 *usedp
= *compp
= *uncompp
= 0;
4062 snapobj
= dsl_dataset_phys(lastsnap
)->ds_next_snap_obj
;
4063 while (snapobj
!= firstsnap
->ds_object
) {
4065 uint64_t used
, comp
, uncomp
;
4067 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &ds
);
4071 dsl_deadlist_space_range(&ds
->ds_deadlist
,
4072 dsl_dataset_phys(firstsnap
)->ds_prev_snap_txg
, UINT64_MAX
,
4073 &used
, &comp
, &uncomp
);
4078 snapobj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
4079 ASSERT3U(snapobj
, !=, 0);
4080 dsl_dataset_rele(ds
, FTAG
);
4086 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
4087 * For example, they could both be snapshots of the same filesystem, and
4088 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
4089 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
4090 * filesystem. Or 'earlier' could be the origin's origin.
4092 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
4095 dsl_dataset_is_before(dsl_dataset_t
*later
, dsl_dataset_t
*earlier
,
4096 uint64_t earlier_txg
)
4098 dsl_pool_t
*dp
= later
->ds_dir
->dd_pool
;
4102 ASSERT(dsl_pool_config_held(dp
));
4103 ASSERT(earlier
->ds_is_snapshot
|| earlier_txg
!= 0);
4105 if (earlier_txg
== 0)
4106 earlier_txg
= dsl_dataset_phys(earlier
)->ds_creation_txg
;
4108 if (later
->ds_is_snapshot
&&
4109 earlier_txg
>= dsl_dataset_phys(later
)->ds_creation_txg
)
4112 if (later
->ds_dir
== earlier
->ds_dir
)
4114 if (!dsl_dir_is_clone(later
->ds_dir
))
4117 if (dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
== earlier
->ds_object
)
4119 dsl_dataset_t
*origin
;
4120 error
= dsl_dataset_hold_obj(dp
,
4121 dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
, FTAG
, &origin
);
4124 ret
= dsl_dataset_is_before(origin
, earlier
, earlier_txg
);
4125 dsl_dataset_rele(origin
, FTAG
);
4130 dsl_dataset_zapify(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4132 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
4133 dmu_object_zapify(mos
, ds
->ds_object
, DMU_OT_DSL_DATASET
, tx
);
4137 dsl_dataset_is_zapified(dsl_dataset_t
*ds
)
4139 dmu_object_info_t doi
;
4141 dmu_object_info_from_db(ds
->ds_dbuf
, &doi
);
4142 return (doi
.doi_type
== DMU_OTN_ZAP_METADATA
);
4146 dsl_dataset_has_resume_receive_state(dsl_dataset_t
*ds
)
4148 return (dsl_dataset_is_zapified(ds
) &&
4149 zap_contains(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4150 ds
->ds_object
, DS_FIELD_RESUME_TOGUID
) == 0);
4154 dsl_dataset_get_remap_deadlist_object(dsl_dataset_t
*ds
)
4156 uint64_t remap_deadlist_obj
;
4159 if (!dsl_dataset_is_zapified(ds
))
4162 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
, ds
->ds_object
,
4163 DS_FIELD_REMAP_DEADLIST
, sizeof (remap_deadlist_obj
), 1,
4164 &remap_deadlist_obj
);
4167 VERIFY3S(err
, ==, ENOENT
);
4171 ASSERT(remap_deadlist_obj
!= 0);
4172 return (remap_deadlist_obj
);
4176 dsl_dataset_remap_deadlist_exists(dsl_dataset_t
*ds
)
4178 EQUIV(dsl_deadlist_is_open(&ds
->ds_remap_deadlist
),
4179 dsl_dataset_get_remap_deadlist_object(ds
) != 0);
4180 return (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
));
4184 dsl_dataset_set_remap_deadlist_object(dsl_dataset_t
*ds
, uint64_t obj
,
4188 dsl_dataset_zapify(ds
, tx
);
4189 VERIFY0(zap_add(ds
->ds_dir
->dd_pool
->dp_meta_objset
, ds
->ds_object
,
4190 DS_FIELD_REMAP_DEADLIST
, sizeof (obj
), 1, &obj
, tx
));
4194 dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4196 VERIFY0(zap_remove(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4197 ds
->ds_object
, DS_FIELD_REMAP_DEADLIST
, tx
));
4201 dsl_dataset_destroy_remap_deadlist(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4203 uint64_t remap_deadlist_object
;
4204 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
4206 ASSERT(dmu_tx_is_syncing(tx
));
4207 ASSERT(dsl_dataset_remap_deadlist_exists(ds
));
4209 remap_deadlist_object
= ds
->ds_remap_deadlist
.dl_object
;
4210 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
4211 dsl_deadlist_free(spa_meta_objset(spa
), remap_deadlist_object
, tx
);
4212 dsl_dataset_unset_remap_deadlist_object(ds
, tx
);
4213 spa_feature_decr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
4217 dsl_dataset_create_remap_deadlist(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4219 uint64_t remap_deadlist_obj
;
4220 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
4222 ASSERT(dmu_tx_is_syncing(tx
));
4223 ASSERT(MUTEX_HELD(&ds
->ds_remap_deadlist_lock
));
4225 * Currently we only create remap deadlists when there are indirect
4226 * vdevs with referenced mappings.
4228 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_DEVICE_REMOVAL
));
4230 remap_deadlist_obj
= dsl_deadlist_clone(
4231 &ds
->ds_deadlist
, UINT64_MAX
,
4232 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
4233 dsl_dataset_set_remap_deadlist_object(ds
,
4234 remap_deadlist_obj
, tx
);
4235 dsl_deadlist_open(&ds
->ds_remap_deadlist
, spa_meta_objset(spa
),
4236 remap_deadlist_obj
);
4237 spa_feature_incr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
4240 #if defined(_KERNEL) && defined(HAVE_SPL)
4242 module_param(zfs_max_recordsize
, int, 0644);
4243 MODULE_PARM_DESC(zfs_max_recordsize
, "Max allowed record size");
4245 /* Limited to 1M on 32-bit platforms due to lack of virtual address space */
4246 module_param(zfs_max_recordsize
, int, 0444);
4247 MODULE_PARM_DESC(zfs_max_recordsize
, "Max allowed record size");
4250 EXPORT_SYMBOL(dsl_dataset_hold
);
4251 EXPORT_SYMBOL(dsl_dataset_hold_flags
);
4252 EXPORT_SYMBOL(dsl_dataset_hold_obj
);
4253 EXPORT_SYMBOL(dsl_dataset_hold_obj_flags
);
4254 EXPORT_SYMBOL(dsl_dataset_own
);
4255 EXPORT_SYMBOL(dsl_dataset_own_obj
);
4256 EXPORT_SYMBOL(dsl_dataset_name
);
4257 EXPORT_SYMBOL(dsl_dataset_rele
);
4258 EXPORT_SYMBOL(dsl_dataset_rele_flags
);
4259 EXPORT_SYMBOL(dsl_dataset_disown
);
4260 EXPORT_SYMBOL(dsl_dataset_tryown
);
4261 EXPORT_SYMBOL(dsl_dataset_create_sync
);
4262 EXPORT_SYMBOL(dsl_dataset_create_sync_dd
);
4263 EXPORT_SYMBOL(dsl_dataset_snapshot_check
);
4264 EXPORT_SYMBOL(dsl_dataset_snapshot_sync
);
4265 EXPORT_SYMBOL(dsl_dataset_promote
);
4266 EXPORT_SYMBOL(dsl_dataset_user_hold
);
4267 EXPORT_SYMBOL(dsl_dataset_user_release
);
4268 EXPORT_SYMBOL(dsl_dataset_get_holds
);
4269 EXPORT_SYMBOL(dsl_dataset_get_blkptr
);
4270 EXPORT_SYMBOL(dsl_dataset_get_spa
);
4271 EXPORT_SYMBOL(dsl_dataset_modified_since_snap
);
4272 EXPORT_SYMBOL(dsl_dataset_space_written
);
4273 EXPORT_SYMBOL(dsl_dataset_space_wouldfree
);
4274 EXPORT_SYMBOL(dsl_dataset_sync
);
4275 EXPORT_SYMBOL(dsl_dataset_block_born
);
4276 EXPORT_SYMBOL(dsl_dataset_block_kill
);
4277 EXPORT_SYMBOL(dsl_dataset_dirty
);
4278 EXPORT_SYMBOL(dsl_dataset_stats
);
4279 EXPORT_SYMBOL(dsl_dataset_fast_stat
);
4280 EXPORT_SYMBOL(dsl_dataset_space
);
4281 EXPORT_SYMBOL(dsl_dataset_fsid_guid
);
4282 EXPORT_SYMBOL(dsl_dsobj_to_dsname
);
4283 EXPORT_SYMBOL(dsl_dataset_check_quota
);
4284 EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl
);
4285 EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl
);