4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18 * Copyright 2017 Nexenta Systems, Inc.
21 #include <sys/zfs_context.h>
22 #include <sys/dsl_dataset.h>
23 #include <sys/dsl_dir.h>
24 #include <sys/dsl_prop.h>
25 #include <sys/dsl_synctask.h>
26 #include <sys/dsl_destroy.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
31 #include <sys/zfeature.h>
33 #include <sys/dsl_bookmark.h>
34 #include <zfs_namecheck.h>
35 #include <sys/dmu_send.h>
38 dsl_bookmark_hold_ds(dsl_pool_t
*dp
, const char *fullname
,
39 dsl_dataset_t
**dsp
, void *tag
, char **shortnamep
)
41 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
44 if (strlen(fullname
) >= ZFS_MAX_DATASET_NAME_LEN
)
45 return (SET_ERROR(ENAMETOOLONG
));
46 hashp
= strchr(fullname
, '#');
48 return (SET_ERROR(EINVAL
));
50 *shortnamep
= hashp
+ 1;
51 if (zfs_component_namecheck(*shortnamep
, NULL
, NULL
))
52 return (SET_ERROR(EINVAL
));
53 (void) strlcpy(buf
, fullname
, hashp
- fullname
+ 1);
54 return (dsl_dataset_hold(dp
, buf
, tag
, dsp
));
58 * Returns ESRCH if bookmark is not found.
59 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
60 * by name, because only the ZAP honors the casesensitivity setting.
63 dsl_bookmark_lookup_impl(dsl_dataset_t
*ds
, const char *shortname
,
64 zfs_bookmark_phys_t
*bmark_phys
)
66 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
67 uint64_t bmark_zapobj
= ds
->ds_bookmarks_obj
;
71 if (bmark_zapobj
== 0)
72 return (SET_ERROR(ESRCH
));
74 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
78 * Zero out the bookmark in case the one stored on disk
79 * is in an older, shorter format.
81 bzero(bmark_phys
, sizeof (*bmark_phys
));
83 err
= zap_lookup_norm(mos
, bmark_zapobj
, shortname
, sizeof (uint64_t),
84 sizeof (*bmark_phys
) / sizeof (uint64_t), bmark_phys
, mt
, NULL
, 0,
87 return (err
== ENOENT
? ESRCH
: err
);
91 * If later_ds is non-NULL, this will return EXDEV if the the specified bookmark
92 * does not represents an earlier point in later_ds's timeline. However,
93 * bmp will still be filled in if we return EXDEV.
95 * Returns ENOENT if the dataset containing the bookmark does not exist.
96 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
99 dsl_bookmark_lookup(dsl_pool_t
*dp
, const char *fullname
,
100 dsl_dataset_t
*later_ds
, zfs_bookmark_phys_t
*bmp
)
106 error
= dsl_bookmark_hold_ds(dp
, fullname
, &ds
, FTAG
, &shortname
);
110 error
= dsl_bookmark_lookup_impl(ds
, shortname
, bmp
);
111 if (error
== 0 && later_ds
!= NULL
) {
112 if (!dsl_dataset_is_before(later_ds
, ds
, bmp
->zbm_creation_txg
))
113 error
= SET_ERROR(EXDEV
);
115 dsl_dataset_rele(ds
, FTAG
);
119 typedef struct dsl_bookmark_create_redacted_arg
{
120 const char *dbcra_bmark
;
121 const char *dbcra_snap
;
122 redaction_list_t
**dbcra_rl
;
123 uint64_t dbcra_numsnaps
;
124 uint64_t *dbcra_snaps
;
126 } dsl_bookmark_create_redacted_arg_t
;
128 typedef struct dsl_bookmark_create_arg
{
129 nvlist_t
*dbca_bmarks
;
130 nvlist_t
*dbca_errors
;
131 } dsl_bookmark_create_arg_t
;
134 dsl_bookmark_create_check_impl(dsl_dataset_t
*snapds
, const char *bookmark_name
,
137 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
138 dsl_dataset_t
*bmark_fs
;
141 zfs_bookmark_phys_t bmark_phys
= { 0 };
143 if (!snapds
->ds_is_snapshot
)
144 return (SET_ERROR(EINVAL
));
146 error
= dsl_bookmark_hold_ds(dp
, bookmark_name
,
147 &bmark_fs
, FTAG
, &shortname
);
151 if (!dsl_dataset_is_before(bmark_fs
, snapds
, 0)) {
152 dsl_dataset_rele(bmark_fs
, FTAG
);
153 return (SET_ERROR(EINVAL
));
156 error
= dsl_bookmark_lookup_impl(bmark_fs
, shortname
,
158 dsl_dataset_rele(bmark_fs
, FTAG
);
160 return (SET_ERROR(EEXIST
));
167 dsl_bookmark_create_check(void *arg
, dmu_tx_t
*tx
)
169 dsl_bookmark_create_arg_t
*dbca
= arg
;
170 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
173 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
))
174 return (SET_ERROR(ENOTSUP
));
176 for (nvpair_t
*pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, NULL
);
177 pair
!= NULL
; pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, pair
)) {
178 dsl_dataset_t
*snapds
;
181 /* note: validity of nvlist checked by ioctl layer */
182 error
= dsl_dataset_hold(dp
, fnvpair_value_string(pair
),
185 error
= dsl_bookmark_create_check_impl(snapds
,
186 nvpair_name(pair
), tx
);
187 dsl_dataset_rele(snapds
, FTAG
);
190 fnvlist_add_int32(dbca
->dbca_errors
,
191 nvpair_name(pair
), error
);
199 static dsl_bookmark_node_t
*
200 dsl_bookmark_node_alloc(char *shortname
)
202 dsl_bookmark_node_t
*dbn
= kmem_alloc(sizeof (*dbn
), KM_SLEEP
);
203 dbn
->dbn_name
= spa_strdup(shortname
);
204 dbn
->dbn_dirty
= B_FALSE
;
205 mutex_init(&dbn
->dbn_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
210 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
213 dsl_bookmark_set_phys(zfs_bookmark_phys_t
*zbm
, dsl_dataset_t
*snap
)
215 spa_t
*spa
= dsl_dataset_get_spa(snap
);
216 objset_t
*mos
= spa_get_dsl(spa
)->dp_meta_objset
;
217 dsl_dataset_phys_t
*dsp
= dsl_dataset_phys(snap
);
218 zbm
->zbm_guid
= dsp
->ds_guid
;
219 zbm
->zbm_creation_txg
= dsp
->ds_creation_txg
;
220 zbm
->zbm_creation_time
= dsp
->ds_creation_time
;
221 zbm
->zbm_redaction_obj
= 0;
224 * If the dataset is encrypted create a larger bookmark to
225 * accommodate the IVset guid. The IVset guid was added
226 * after the encryption feature to prevent a problem with
227 * raw sends. If we encounter an encrypted dataset without
228 * an IVset guid we fall back to a normal bookmark.
230 if (snap
->ds_dir
->dd_crypto_obj
!= 0 &&
231 spa_feature_is_enabled(spa
, SPA_FEATURE_BOOKMARK_V2
)) {
232 (void) zap_lookup(mos
, snap
->ds_object
,
233 DS_FIELD_IVSET_GUID
, sizeof (uint64_t), 1,
234 &zbm
->zbm_ivset_guid
);
237 if (spa_feature_is_enabled(spa
, SPA_FEATURE_BOOKMARK_WRITTEN
)) {
238 zbm
->zbm_flags
= ZBM_FLAG_SNAPSHOT_EXISTS
| ZBM_FLAG_HAS_FBN
;
239 zbm
->zbm_referenced_bytes_refd
= dsp
->ds_referenced_bytes
;
240 zbm
->zbm_compressed_bytes_refd
= dsp
->ds_compressed_bytes
;
241 zbm
->zbm_uncompressed_bytes_refd
= dsp
->ds_uncompressed_bytes
;
243 dsl_dataset_t
*nextds
;
244 VERIFY0(dsl_dataset_hold_obj(snap
->ds_dir
->dd_pool
,
245 dsp
->ds_next_snap_obj
, FTAG
, &nextds
));
246 dsl_deadlist_space(&nextds
->ds_deadlist
,
247 &zbm
->zbm_referenced_freed_before_next_snap
,
248 &zbm
->zbm_compressed_freed_before_next_snap
,
249 &zbm
->zbm_uncompressed_freed_before_next_snap
);
250 dsl_dataset_rele(nextds
, FTAG
);
252 bzero(&zbm
->zbm_flags
,
253 sizeof (zfs_bookmark_phys_t
) -
254 offsetof(zfs_bookmark_phys_t
, zbm_flags
));
259 dsl_bookmark_node_add(dsl_dataset_t
*hds
, dsl_bookmark_node_t
*dbn
,
262 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
263 objset_t
*mos
= dp
->dp_meta_objset
;
265 if (hds
->ds_bookmarks_obj
== 0) {
266 hds
->ds_bookmarks_obj
= zap_create_norm(mos
,
267 U8_TEXTPREP_TOUPPER
, DMU_OTN_ZAP_METADATA
, DMU_OT_NONE
, 0,
269 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
271 dsl_dataset_zapify(hds
, tx
);
272 VERIFY0(zap_add(mos
, hds
->ds_object
,
273 DS_FIELD_BOOKMARK_NAMES
,
274 sizeof (hds
->ds_bookmarks_obj
), 1,
275 &hds
->ds_bookmarks_obj
, tx
));
278 avl_add(&hds
->ds_bookmarks
, dbn
);
281 * To maintain backwards compatibility with software that doesn't
282 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
283 * possible bookmark size.
285 uint64_t bookmark_phys_size
= BOOKMARK_PHYS_SIZE_V1
;
286 if (spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARK_V2
) &&
287 (dbn
->dbn_phys
.zbm_ivset_guid
!= 0 || dbn
->dbn_phys
.zbm_flags
&
288 ZBM_FLAG_HAS_FBN
|| dbn
->dbn_phys
.zbm_redaction_obj
!= 0)) {
289 bookmark_phys_size
= BOOKMARK_PHYS_SIZE_V2
;
290 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_BOOKMARK_V2
, tx
);
293 __attribute__((unused
)) zfs_bookmark_phys_t zero_phys
= { 0 };
294 ASSERT0(bcmp(((char *)&dbn
->dbn_phys
) + bookmark_phys_size
,
295 &zero_phys
, sizeof (zfs_bookmark_phys_t
) - bookmark_phys_size
));
297 VERIFY0(zap_add(mos
, hds
->ds_bookmarks_obj
, dbn
->dbn_name
,
298 sizeof (uint64_t), bookmark_phys_size
/ sizeof (uint64_t),
299 &dbn
->dbn_phys
, tx
));
303 * If redaction_list is non-null, we create a redacted bookmark and redaction
304 * list, and store the object number of the redaction list in redact_obj.
307 dsl_bookmark_create_sync_impl(const char *bookmark
, const char *snapshot
,
308 dmu_tx_t
*tx
, uint64_t num_redact_snaps
, uint64_t *redact_snaps
, void *tag
,
309 redaction_list_t
**redaction_list
)
311 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
312 objset_t
*mos
= dp
->dp_meta_objset
;
313 dsl_dataset_t
*snapds
, *bmark_fs
;
315 boolean_t bookmark_redacted
;
316 uint64_t *dsredactsnaps
;
319 VERIFY0(dsl_dataset_hold(dp
, snapshot
, FTAG
, &snapds
));
320 VERIFY0(dsl_bookmark_hold_ds(dp
, bookmark
, &bmark_fs
, FTAG
,
323 dsl_bookmark_node_t
*dbn
= dsl_bookmark_node_alloc(shortname
);
324 dsl_bookmark_set_phys(&dbn
->dbn_phys
, snapds
);
326 bookmark_redacted
= dsl_dataset_get_uint64_array_feature(snapds
,
327 SPA_FEATURE_REDACTED_DATASETS
, &dsnumsnaps
, &dsredactsnaps
);
328 if (redaction_list
!= NULL
|| bookmark_redacted
) {
329 redaction_list_t
*local_rl
;
330 if (bookmark_redacted
) {
331 redact_snaps
= dsredactsnaps
;
332 num_redact_snaps
= dsnumsnaps
;
334 dbn
->dbn_phys
.zbm_redaction_obj
= dmu_object_alloc(mos
,
335 DMU_OTN_UINT64_METADATA
, SPA_OLD_MAXBLOCKSIZE
,
336 DMU_OTN_UINT64_METADATA
, sizeof (redaction_list_phys_t
) +
337 num_redact_snaps
* sizeof (uint64_t), tx
);
338 spa_feature_incr(dp
->dp_spa
,
339 SPA_FEATURE_REDACTION_BOOKMARKS
, tx
);
341 VERIFY0(dsl_redaction_list_hold_obj(dp
,
342 dbn
->dbn_phys
.zbm_redaction_obj
, tag
, &local_rl
));
343 dsl_redaction_list_long_hold(dp
, local_rl
, tag
);
345 ASSERT3U((local_rl
)->rl_dbuf
->db_size
, >=,
346 sizeof (redaction_list_phys_t
) + num_redact_snaps
*
348 dmu_buf_will_dirty(local_rl
->rl_dbuf
, tx
);
349 bcopy(redact_snaps
, local_rl
->rl_phys
->rlp_snaps
,
350 sizeof (uint64_t) * num_redact_snaps
);
351 local_rl
->rl_phys
->rlp_num_snaps
= num_redact_snaps
;
352 if (bookmark_redacted
) {
353 ASSERT3P(redaction_list
, ==, NULL
);
354 local_rl
->rl_phys
->rlp_last_blkid
= UINT64_MAX
;
355 local_rl
->rl_phys
->rlp_last_object
= UINT64_MAX
;
356 dsl_redaction_list_long_rele(local_rl
, tag
);
357 dsl_redaction_list_rele(local_rl
, tag
);
359 *redaction_list
= local_rl
;
363 if (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) {
364 spa_feature_incr(dp
->dp_spa
,
365 SPA_FEATURE_BOOKMARK_WRITTEN
, tx
);
368 dsl_bookmark_node_add(bmark_fs
, dbn
, tx
);
370 spa_history_log_internal_ds(bmark_fs
, "bookmark", tx
,
371 "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
372 shortname
, (longlong_t
)dbn
->dbn_phys
.zbm_creation_txg
,
373 (longlong_t
)snapds
->ds_object
,
374 (longlong_t
)dbn
->dbn_phys
.zbm_redaction_obj
);
376 dsl_dataset_rele(bmark_fs
, FTAG
);
377 dsl_dataset_rele(snapds
, FTAG
);
381 dsl_bookmark_create_sync(void *arg
, dmu_tx_t
*tx
)
383 dsl_bookmark_create_arg_t
*dbca
= arg
;
385 ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx
)->dp_spa
,
386 SPA_FEATURE_BOOKMARKS
));
388 for (nvpair_t
*pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, NULL
);
389 pair
!= NULL
; pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, pair
)) {
390 dsl_bookmark_create_sync_impl(nvpair_name(pair
),
391 fnvpair_value_string(pair
), tx
, 0, NULL
, NULL
, NULL
);
396 * The bookmarks must all be in the same pool.
399 dsl_bookmark_create(nvlist_t
*bmarks
, nvlist_t
*errors
)
402 dsl_bookmark_create_arg_t dbca
;
404 pair
= nvlist_next_nvpair(bmarks
, NULL
);
408 dbca
.dbca_bmarks
= bmarks
;
409 dbca
.dbca_errors
= errors
;
411 return (dsl_sync_task(nvpair_name(pair
), dsl_bookmark_create_check
,
412 dsl_bookmark_create_sync
, &dbca
,
413 fnvlist_num_pairs(bmarks
), ZFS_SPACE_CHECK_NORMAL
));
417 dsl_bookmark_create_redacted_check(void *arg
, dmu_tx_t
*tx
)
419 dsl_bookmark_create_redacted_arg_t
*dbcra
= arg
;
420 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
421 dsl_dataset_t
*snapds
;
424 if (!spa_feature_is_enabled(dp
->dp_spa
,
425 SPA_FEATURE_REDACTION_BOOKMARKS
))
426 return (SET_ERROR(ENOTSUP
));
428 * If the list of redact snaps will not fit in the bonus buffer with
429 * the furthest reached object and offset, fail.
431 if (dbcra
->dbcra_numsnaps
> (dmu_bonus_max() -
432 sizeof (redaction_list_phys_t
)) / sizeof (uint64_t))
433 return (SET_ERROR(E2BIG
));
435 rv
= dsl_dataset_hold(dp
, dbcra
->dbcra_snap
,
438 rv
= dsl_bookmark_create_check_impl(snapds
, dbcra
->dbcra_bmark
,
440 dsl_dataset_rele(snapds
, FTAG
);
446 dsl_bookmark_create_redacted_sync(void *arg
, dmu_tx_t
*tx
)
448 dsl_bookmark_create_redacted_arg_t
*dbcra
= arg
;
449 dsl_bookmark_create_sync_impl(dbcra
->dbcra_bmark
, dbcra
->dbcra_snap
, tx
,
450 dbcra
->dbcra_numsnaps
, dbcra
->dbcra_snaps
, dbcra
->dbcra_tag
,
455 dsl_bookmark_create_redacted(const char *bookmark
, const char *snapshot
,
456 uint64_t numsnaps
, uint64_t *snapguids
, void *tag
, redaction_list_t
**rl
)
458 dsl_bookmark_create_redacted_arg_t dbcra
;
460 dbcra
.dbcra_bmark
= bookmark
;
461 dbcra
.dbcra_snap
= snapshot
;
463 dbcra
.dbcra_numsnaps
= numsnaps
;
464 dbcra
.dbcra_snaps
= snapguids
;
465 dbcra
.dbcra_tag
= tag
;
467 return (dsl_sync_task(bookmark
, dsl_bookmark_create_redacted_check
,
468 dsl_bookmark_create_redacted_sync
, &dbcra
, 5,
469 ZFS_SPACE_CHECK_NORMAL
));
473 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
474 * If 'props' is NULL, retrieves all properties.
477 dsl_bookmark_fetch_props(dsl_pool_t
*dp
, zfs_bookmark_phys_t
*bmark_phys
,
478 nvlist_t
*props
, nvlist_t
*out_props
)
480 ASSERT3P(dp
, !=, NULL
);
481 ASSERT3P(bmark_phys
, !=, NULL
);
482 ASSERT3P(out_props
, !=, NULL
);
483 ASSERT(RRW_LOCK_HELD(&dp
->dp_config_rwlock
));
485 if (props
== NULL
|| nvlist_exists(props
,
486 zfs_prop_to_name(ZFS_PROP_GUID
))) {
487 dsl_prop_nvlist_add_uint64(out_props
,
488 ZFS_PROP_GUID
, bmark_phys
->zbm_guid
);
490 if (props
== NULL
|| nvlist_exists(props
,
491 zfs_prop_to_name(ZFS_PROP_CREATETXG
))) {
492 dsl_prop_nvlist_add_uint64(out_props
,
493 ZFS_PROP_CREATETXG
, bmark_phys
->zbm_creation_txg
);
495 if (props
== NULL
|| nvlist_exists(props
,
496 zfs_prop_to_name(ZFS_PROP_CREATION
))) {
497 dsl_prop_nvlist_add_uint64(out_props
,
498 ZFS_PROP_CREATION
, bmark_phys
->zbm_creation_time
);
500 if (props
== NULL
|| nvlist_exists(props
,
501 zfs_prop_to_name(ZFS_PROP_IVSET_GUID
))) {
502 dsl_prop_nvlist_add_uint64(out_props
,
503 ZFS_PROP_IVSET_GUID
, bmark_phys
->zbm_ivset_guid
);
505 if (bmark_phys
->zbm_flags
& ZBM_FLAG_HAS_FBN
) {
506 if (props
== NULL
|| nvlist_exists(props
,
507 zfs_prop_to_name(ZFS_PROP_REFERENCED
))) {
508 dsl_prop_nvlist_add_uint64(out_props
,
510 bmark_phys
->zbm_referenced_bytes_refd
);
512 if (props
== NULL
|| nvlist_exists(props
,
513 zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED
))) {
514 dsl_prop_nvlist_add_uint64(out_props
,
515 ZFS_PROP_LOGICALREFERENCED
,
516 bmark_phys
->zbm_uncompressed_bytes_refd
);
518 if (props
== NULL
|| nvlist_exists(props
,
519 zfs_prop_to_name(ZFS_PROP_REFRATIO
))) {
521 bmark_phys
->zbm_compressed_bytes_refd
== 0 ? 100 :
522 bmark_phys
->zbm_uncompressed_bytes_refd
* 100 /
523 bmark_phys
->zbm_compressed_bytes_refd
;
524 dsl_prop_nvlist_add_uint64(out_props
,
525 ZFS_PROP_REFRATIO
, ratio
);
529 if ((props
== NULL
|| nvlist_exists(props
, "redact_snaps") ||
530 nvlist_exists(props
, "redact_complete")) &&
531 bmark_phys
->zbm_redaction_obj
!= 0) {
532 redaction_list_t
*rl
;
533 int err
= dsl_redaction_list_hold_obj(dp
,
534 bmark_phys
->zbm_redaction_obj
, FTAG
, &rl
);
536 if (nvlist_exists(props
, "redact_snaps")) {
538 nvl
= fnvlist_alloc();
539 fnvlist_add_uint64_array(nvl
, ZPROP_VALUE
,
540 rl
->rl_phys
->rlp_snaps
,
541 rl
->rl_phys
->rlp_num_snaps
);
542 fnvlist_add_nvlist(out_props
, "redact_snaps",
546 if (nvlist_exists(props
, "redact_complete")) {
548 nvl
= fnvlist_alloc();
549 fnvlist_add_boolean_value(nvl
, ZPROP_VALUE
,
550 rl
->rl_phys
->rlp_last_blkid
== UINT64_MAX
&&
551 rl
->rl_phys
->rlp_last_object
== UINT64_MAX
);
552 fnvlist_add_nvlist(out_props
, "redact_complete",
556 dsl_redaction_list_rele(rl
, FTAG
);
562 dsl_get_bookmarks_impl(dsl_dataset_t
*ds
, nvlist_t
*props
, nvlist_t
*outnvl
)
564 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
566 ASSERT(dsl_pool_config_held(dp
));
568 if (dsl_dataset_is_snapshot(ds
))
569 return (SET_ERROR(EINVAL
));
571 for (dsl_bookmark_node_t
*dbn
= avl_first(&ds
->ds_bookmarks
);
572 dbn
!= NULL
; dbn
= AVL_NEXT(&ds
->ds_bookmarks
, dbn
)) {
573 nvlist_t
*out_props
= fnvlist_alloc();
575 dsl_bookmark_fetch_props(dp
, &dbn
->dbn_phys
, props
, out_props
);
577 fnvlist_add_nvlist(outnvl
, dbn
->dbn_name
, out_props
);
578 fnvlist_free(out_props
);
584 * Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
585 * their TXG, then by their FBN-ness. The "FBN-ness" component ensures
586 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
587 * dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
588 * multiple bookmarks at the same TXG (with the same FBN-ness). In this
589 * case we differentiate them by an arbitrary metric (in this case,
593 dsl_bookmark_compare(const void *l
, const void *r
)
595 const dsl_bookmark_node_t
*ldbn
= l
;
596 const dsl_bookmark_node_t
*rdbn
= r
;
598 int64_t cmp
= AVL_CMP(ldbn
->dbn_phys
.zbm_creation_txg
,
599 rdbn
->dbn_phys
.zbm_creation_txg
);
602 cmp
= AVL_CMP((ldbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
),
603 (rdbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
));
606 cmp
= strcmp(ldbn
->dbn_name
, rdbn
->dbn_name
);
607 return (AVL_ISIGN(cmp
));
611 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
614 dsl_bookmark_init_ds(dsl_dataset_t
*ds
)
616 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
617 objset_t
*mos
= dp
->dp_meta_objset
;
619 ASSERT(!ds
->ds_is_snapshot
);
621 avl_create(&ds
->ds_bookmarks
, dsl_bookmark_compare
,
622 sizeof (dsl_bookmark_node_t
),
623 offsetof(dsl_bookmark_node_t
, dbn_node
));
625 if (!dsl_dataset_is_zapified(ds
))
628 int zaperr
= zap_lookup(mos
, ds
->ds_object
, DS_FIELD_BOOKMARK_NAMES
,
629 sizeof (ds
->ds_bookmarks_obj
), 1, &ds
->ds_bookmarks_obj
);
630 if (zaperr
== ENOENT
)
635 if (ds
->ds_bookmarks_obj
== 0)
640 zap_attribute_t attr
;
642 for (zap_cursor_init(&zc
, mos
, ds
->ds_bookmarks_obj
);
643 (err
= zap_cursor_retrieve(&zc
, &attr
)) == 0;
644 zap_cursor_advance(&zc
)) {
645 dsl_bookmark_node_t
*dbn
=
646 dsl_bookmark_node_alloc(attr
.za_name
);
648 err
= dsl_bookmark_lookup_impl(ds
,
649 dbn
->dbn_name
, &dbn
->dbn_phys
);
650 ASSERT3U(err
, !=, ENOENT
);
652 kmem_free(dbn
, sizeof (*dbn
));
655 avl_add(&ds
->ds_bookmarks
, dbn
);
657 zap_cursor_fini(&zc
);
664 dsl_bookmark_fini_ds(dsl_dataset_t
*ds
)
667 dsl_bookmark_node_t
*dbn
;
669 if (ds
->ds_is_snapshot
)
672 while ((dbn
= avl_destroy_nodes(&ds
->ds_bookmarks
, &cookie
)) != NULL
) {
673 spa_strfree(dbn
->dbn_name
);
674 mutex_destroy(&dbn
->dbn_lock
);
675 kmem_free(dbn
, sizeof (*dbn
));
677 avl_destroy(&ds
->ds_bookmarks
);
681 * Retrieve the bookmarks that exist in the specified dataset, and the
682 * requested properties of each bookmark.
684 * The "props" nvlist specifies which properties are requested.
685 * See lzc_get_bookmarks() for the list of valid properties.
688 dsl_get_bookmarks(const char *dsname
, nvlist_t
*props
, nvlist_t
*outnvl
)
694 err
= dsl_pool_hold(dsname
, FTAG
, &dp
);
697 err
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
699 dsl_pool_rele(dp
, FTAG
);
703 err
= dsl_get_bookmarks_impl(ds
, props
, outnvl
);
705 dsl_dataset_rele(ds
, FTAG
);
706 dsl_pool_rele(dp
, FTAG
);
711 * Retrieve all properties for a single bookmark in the given dataset.
714 dsl_get_bookmark_props(const char *dsname
, const char *bmname
, nvlist_t
*props
)
718 zfs_bookmark_phys_t bmark_phys
= { 0 };
721 err
= dsl_pool_hold(dsname
, FTAG
, &dp
);
724 err
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
726 dsl_pool_rele(dp
, FTAG
);
730 err
= dsl_bookmark_lookup_impl(ds
, bmname
, &bmark_phys
);
734 dsl_bookmark_fetch_props(dp
, &bmark_phys
, NULL
, props
);
736 dsl_dataset_rele(ds
, FTAG
);
737 dsl_pool_rele(dp
, FTAG
);
741 typedef struct dsl_bookmark_destroy_arg
{
742 nvlist_t
*dbda_bmarks
;
743 nvlist_t
*dbda_success
;
744 nvlist_t
*dbda_errors
;
745 } dsl_bookmark_destroy_arg_t
;
748 dsl_bookmark_destroy_sync_impl(dsl_dataset_t
*ds
, const char *name
,
751 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
752 uint64_t bmark_zapobj
= ds
->ds_bookmarks_obj
;
754 uint64_t int_size
, num_ints
;
756 * 'search' must be zeroed so that dbn_flags (which is used in
757 * dsl_bookmark_compare()) will be zeroed even if the on-disk
758 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
760 dsl_bookmark_node_t search
= { 0 };
761 char realname
[ZFS_MAX_DATASET_NAME_LEN
];
764 * Find the real name of this bookmark, which may be different
765 * from the given name if the dataset is case-insensitive. Then
766 * use the real name to find the node in the ds_bookmarks AVL tree.
769 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
772 VERIFY0(zap_length(mos
, bmark_zapobj
, name
, &int_size
, &num_ints
));
774 ASSERT3U(int_size
, ==, sizeof (uint64_t));
776 if (num_ints
* int_size
> BOOKMARK_PHYS_SIZE_V1
) {
777 spa_feature_decr(dmu_objset_spa(mos
),
778 SPA_FEATURE_BOOKMARK_V2
, tx
);
780 VERIFY0(zap_lookup_norm(mos
, bmark_zapobj
, name
, sizeof (uint64_t),
781 num_ints
, &search
.dbn_phys
, mt
, realname
, sizeof (realname
), NULL
));
783 search
.dbn_name
= realname
;
784 dsl_bookmark_node_t
*dbn
= avl_find(&ds
->ds_bookmarks
, &search
, NULL
);
787 if (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) {
789 * If this bookmark HAS_FBN, and it is before the most
790 * recent snapshot, then its TXG is a key in the head's
791 * deadlist (and all clones' heads' deadlists). If this is
792 * the last thing keeping the key (i.e. there are no more
793 * bookmarks with HAS_FBN at this TXG, and there is no
794 * snapshot at this TXG), then remove the key.
796 * Note that this algorithm depends on ds_bookmarks being
797 * sorted such that all bookmarks at the same TXG with
798 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
799 * at the same TXG in between them). If this were not
800 * the case, we would need to examine *all* bookmarks
801 * at this TXG, rather than just the adjacent ones.
804 dsl_bookmark_node_t
*dbn_prev
=
805 AVL_PREV(&ds
->ds_bookmarks
, dbn
);
806 dsl_bookmark_node_t
*dbn_next
=
807 AVL_NEXT(&ds
->ds_bookmarks
, dbn
);
809 boolean_t more_bookmarks_at_this_txg
=
810 (dbn_prev
!= NULL
&& dbn_prev
->dbn_phys
.zbm_creation_txg
==
811 dbn
->dbn_phys
.zbm_creation_txg
&&
812 (dbn_prev
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) ||
813 (dbn_next
!= NULL
&& dbn_next
->dbn_phys
.zbm_creation_txg
==
814 dbn
->dbn_phys
.zbm_creation_txg
&&
815 (dbn_next
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
));
817 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_SNAPSHOT_EXISTS
) &&
818 !more_bookmarks_at_this_txg
&&
819 dbn
->dbn_phys
.zbm_creation_txg
<
820 dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
821 dsl_dir_remove_clones_key(ds
->ds_dir
,
822 dbn
->dbn_phys
.zbm_creation_txg
, tx
);
823 dsl_deadlist_remove_key(&ds
->ds_deadlist
,
824 dbn
->dbn_phys
.zbm_creation_txg
, tx
);
827 spa_feature_decr(dmu_objset_spa(mos
),
828 SPA_FEATURE_BOOKMARK_WRITTEN
, tx
);
831 if (dbn
->dbn_phys
.zbm_redaction_obj
!= 0) {
832 VERIFY0(dmu_object_free(mos
,
833 dbn
->dbn_phys
.zbm_redaction_obj
, tx
));
834 spa_feature_decr(dmu_objset_spa(mos
),
835 SPA_FEATURE_REDACTION_BOOKMARKS
, tx
);
838 avl_remove(&ds
->ds_bookmarks
, dbn
);
839 spa_strfree(dbn
->dbn_name
);
840 mutex_destroy(&dbn
->dbn_lock
);
841 kmem_free(dbn
, sizeof (*dbn
));
843 VERIFY0(zap_remove_norm(mos
, bmark_zapobj
, name
, mt
, tx
));
847 dsl_bookmark_destroy_check(void *arg
, dmu_tx_t
*tx
)
849 dsl_bookmark_destroy_arg_t
*dbda
= arg
;
850 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
853 ASSERT(nvlist_empty(dbda
->dbda_success
));
854 ASSERT(nvlist_empty(dbda
->dbda_errors
));
856 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
))
859 for (nvpair_t
*pair
= nvlist_next_nvpair(dbda
->dbda_bmarks
, NULL
);
860 pair
!= NULL
; pair
= nvlist_next_nvpair(dbda
->dbda_bmarks
, pair
)) {
861 const char *fullname
= nvpair_name(pair
);
863 zfs_bookmark_phys_t bm
;
867 error
= dsl_bookmark_hold_ds(dp
, fullname
, &ds
,
869 if (error
== ENOENT
) {
870 /* ignore it; the bookmark is "already destroyed" */
874 error
= dsl_bookmark_lookup_impl(ds
, shortname
, &bm
);
875 dsl_dataset_rele(ds
, FTAG
);
876 if (error
== ESRCH
) {
878 * ignore it; the bookmark is
879 * "already destroyed"
883 if (error
== 0 && bm
.zbm_redaction_obj
!= 0) {
884 redaction_list_t
*rl
= NULL
;
885 error
= dsl_redaction_list_hold_obj(tx
->tx_pool
,
886 bm
.zbm_redaction_obj
, FTAG
, &rl
);
887 if (error
== ENOENT
) {
889 } else if (error
== 0 &&
890 dsl_redaction_list_long_held(rl
)) {
891 error
= SET_ERROR(EBUSY
);
894 dsl_redaction_list_rele(rl
, FTAG
);
899 if (dmu_tx_is_syncing(tx
)) {
900 fnvlist_add_boolean(dbda
->dbda_success
,
904 fnvlist_add_int32(dbda
->dbda_errors
, fullname
, error
);
912 dsl_bookmark_destroy_sync(void *arg
, dmu_tx_t
*tx
)
914 dsl_bookmark_destroy_arg_t
*dbda
= arg
;
915 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
916 objset_t
*mos
= dp
->dp_meta_objset
;
918 for (nvpair_t
*pair
= nvlist_next_nvpair(dbda
->dbda_success
, NULL
);
919 pair
!= NULL
; pair
= nvlist_next_nvpair(dbda
->dbda_success
, pair
)) {
924 VERIFY0(dsl_bookmark_hold_ds(dp
, nvpair_name(pair
),
925 &ds
, FTAG
, &shortname
));
926 dsl_bookmark_destroy_sync_impl(ds
, shortname
, tx
);
929 * If all of this dataset's bookmarks have been destroyed,
930 * free the zap object and decrement the feature's use count.
932 VERIFY0(zap_count(mos
, ds
->ds_bookmarks_obj
, &zap_cnt
));
934 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
935 VERIFY0(zap_destroy(mos
, ds
->ds_bookmarks_obj
, tx
));
936 ds
->ds_bookmarks_obj
= 0;
937 spa_feature_decr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
938 VERIFY0(zap_remove(mos
, ds
->ds_object
,
939 DS_FIELD_BOOKMARK_NAMES
, tx
));
942 spa_history_log_internal_ds(ds
, "remove bookmark", tx
,
943 "name=%s", shortname
);
945 dsl_dataset_rele(ds
, FTAG
);
950 * The bookmarks must all be in the same pool.
953 dsl_bookmark_destroy(nvlist_t
*bmarks
, nvlist_t
*errors
)
956 dsl_bookmark_destroy_arg_t dbda
;
957 nvpair_t
*pair
= nvlist_next_nvpair(bmarks
, NULL
);
961 dbda
.dbda_bmarks
= bmarks
;
962 dbda
.dbda_errors
= errors
;
963 dbda
.dbda_success
= fnvlist_alloc();
965 rv
= dsl_sync_task(nvpair_name(pair
), dsl_bookmark_destroy_check
,
966 dsl_bookmark_destroy_sync
, &dbda
, fnvlist_num_pairs(bmarks
),
967 ZFS_SPACE_CHECK_RESERVED
);
968 fnvlist_free(dbda
.dbda_success
);
972 /* Return B_TRUE if there are any long holds on this dataset. */
974 dsl_redaction_list_long_held(redaction_list_t
*rl
)
976 return (!zfs_refcount_is_zero(&rl
->rl_longholds
));
980 dsl_redaction_list_long_hold(dsl_pool_t
*dp
, redaction_list_t
*rl
, void *tag
)
982 ASSERT(dsl_pool_config_held(dp
));
983 (void) zfs_refcount_add(&rl
->rl_longholds
, tag
);
987 dsl_redaction_list_long_rele(redaction_list_t
*rl
, void *tag
)
989 (void) zfs_refcount_remove(&rl
->rl_longholds
, tag
);
994 redaction_list_evict_sync(void *rlu
)
996 redaction_list_t
*rl
= rlu
;
997 zfs_refcount_destroy(&rl
->rl_longholds
);
999 kmem_free(rl
, sizeof (redaction_list_t
));
1003 dsl_redaction_list_rele(redaction_list_t
*rl
, void *tag
)
1005 dmu_buf_rele(rl
->rl_dbuf
, tag
);
1009 dsl_redaction_list_hold_obj(dsl_pool_t
*dp
, uint64_t rlobj
, void *tag
,
1010 redaction_list_t
**rlp
)
1012 objset_t
*mos
= dp
->dp_meta_objset
;
1014 redaction_list_t
*rl
;
1017 ASSERT(dsl_pool_config_held(dp
));
1019 err
= dmu_bonus_hold(mos
, rlobj
, tag
, &dbuf
);
1023 rl
= dmu_buf_get_user(dbuf
);
1025 redaction_list_t
*winner
= NULL
;
1027 rl
= kmem_zalloc(sizeof (redaction_list_t
), KM_SLEEP
);
1029 rl
->rl_object
= rlobj
;
1030 rl
->rl_phys
= dbuf
->db_data
;
1031 rl
->rl_mos
= dp
->dp_meta_objset
;
1032 zfs_refcount_create(&rl
->rl_longholds
);
1033 dmu_buf_init_user(&rl
->rl_dbu
, redaction_list_evict_sync
, NULL
,
1035 if ((winner
= dmu_buf_set_user_ie(dbuf
, &rl
->rl_dbu
)) != NULL
) {
1036 kmem_free(rl
, sizeof (*rl
));
1045 * Snapshot ds is being destroyed.
1047 * Adjust the "freed_before_next" of any bookmarks between this snap
1048 * and the previous snapshot, because their "next snapshot" is changing.
1050 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1051 * their HAS_SNAP flag (note: there can be at most one snapshot of
1052 * each filesystem at a given txg), and return B_TRUE. In this case
1053 * the caller can not remove the key in the deadlist at this TXG, because
1054 * the HAS_FBN bookmarks require the key be there.
1056 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1057 * snapshot's TXG. In this case the caller can remove the key in the
1058 * deadlist at this TXG.
1061 dsl_bookmark_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1063 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1065 dsl_dataset_t
*head
, *next
;
1066 VERIFY0(dsl_dataset_hold_obj(dp
,
1067 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &head
));
1068 VERIFY0(dsl_dataset_hold_obj(dp
,
1069 dsl_dataset_phys(ds
)->ds_next_snap_obj
, FTAG
, &next
));
1072 * Find the first bookmark that HAS_FBN at or after the
1073 * previous snapshot.
1075 dsl_bookmark_node_t search
= { 0 };
1077 search
.dbn_phys
.zbm_creation_txg
=
1078 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1079 search
.dbn_phys
.zbm_flags
= ZBM_FLAG_HAS_FBN
;
1081 * The empty-string name can't be in the AVL, and it compares
1082 * before any entries with this TXG.
1084 search
.dbn_name
= "";
1085 VERIFY3P(avl_find(&head
->ds_bookmarks
, &search
, &idx
), ==, NULL
);
1086 dsl_bookmark_node_t
*dbn
=
1087 avl_nearest(&head
->ds_bookmarks
, idx
, AVL_AFTER
);
1090 * Iterate over all bookmarks that are at or after the previous
1091 * snapshot, and before this (being deleted) snapshot. Adjust
1092 * their FBN based on their new next snapshot.
1094 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
<
1095 dsl_dataset_phys(ds
)->ds_creation_txg
;
1096 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1097 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
))
1100 * Increase our FBN by the amount of space that was live
1101 * (referenced) at the time of this bookmark (i.e.
1102 * birth <= zbm_creation_txg), and killed between this
1103 * (being deleted) snapshot and the next snapshot (i.e.
1104 * on the next snapshot's deadlist). (Space killed before
1105 * this are already on our FBN.)
1107 uint64_t referenced
, compressed
, uncompressed
;
1108 dsl_deadlist_space_range(&next
->ds_deadlist
,
1109 0, dbn
->dbn_phys
.zbm_creation_txg
,
1110 &referenced
, &compressed
, &uncompressed
);
1111 dbn
->dbn_phys
.zbm_referenced_freed_before_next_snap
+=
1113 dbn
->dbn_phys
.zbm_compressed_freed_before_next_snap
+=
1115 dbn
->dbn_phys
.zbm_uncompressed_freed_before_next_snap
+=
1117 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1118 dbn
->dbn_name
, sizeof (uint64_t),
1119 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1120 &dbn
->dbn_phys
, tx
));
1122 dsl_dataset_rele(next
, FTAG
);
1125 * There may be several bookmarks at this txg (the TXG of the
1126 * snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
1127 * flag on all of them, and return TRUE if there is at least 1
1128 * bookmark here with HAS_FBN (thus preventing the deadlist
1129 * key from being removed).
1131 boolean_t rv
= B_FALSE
;
1132 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
==
1133 dsl_dataset_phys(ds
)->ds_creation_txg
;
1134 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1135 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) {
1136 ASSERT(!(dbn
->dbn_phys
.zbm_flags
&
1137 ZBM_FLAG_SNAPSHOT_EXISTS
));
1140 ASSERT(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_SNAPSHOT_EXISTS
);
1141 dbn
->dbn_phys
.zbm_flags
&= ~ZBM_FLAG_SNAPSHOT_EXISTS
;
1142 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1143 dbn
->dbn_name
, sizeof (uint64_t),
1144 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1145 &dbn
->dbn_phys
, tx
));
1148 dsl_dataset_rele(head
, FTAG
);
1153 * A snapshot is being created of this (head) dataset.
1155 * We don't keep keys in the deadlist for the most recent snapshot, or any
1156 * bookmarks at or after it, because there can't be any blocks on the
1157 * deadlist in this range. Now that the most recent snapshot is after
1158 * all bookmarks, we need to add these keys. Note that the caller always
1159 * adds a key at the previous snapshot, so we only add keys for bookmarks
1163 dsl_bookmark_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1165 uint64_t last_key_added
= UINT64_MAX
;
1166 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1167 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>
1168 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1169 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1170 uint64_t creation_txg
= dbn
->dbn_phys
.zbm_creation_txg
;
1171 ASSERT3U(creation_txg
, <=, last_key_added
);
1173 * Note, there may be multiple bookmarks at this TXG,
1174 * and we only want to add the key for this TXG once.
1175 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1176 * these bookmarks in sequence.
1178 if ((dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) &&
1179 creation_txg
!= last_key_added
) {
1180 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1182 last_key_added
= creation_txg
;
1188 * The next snapshot of the origin dataset has changed, due to
1189 * promote or clone swap. If there are any bookmarks at this dataset,
1190 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1191 * The head dataset has the relevant bookmarks in ds_bookmarks.
1194 dsl_bookmark_next_changed(dsl_dataset_t
*head
, dsl_dataset_t
*origin
,
1197 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1200 * Find the first bookmark that HAS_FBN at the origin snapshot.
1202 dsl_bookmark_node_t search
= { 0 };
1204 search
.dbn_phys
.zbm_creation_txg
=
1205 dsl_dataset_phys(origin
)->ds_creation_txg
;
1206 search
.dbn_phys
.zbm_flags
= ZBM_FLAG_HAS_FBN
;
1208 * The empty-string name can't be in the AVL, and it compares
1209 * before any entries with this TXG.
1211 search
.dbn_name
= "";
1212 VERIFY3P(avl_find(&head
->ds_bookmarks
, &search
, &idx
), ==, NULL
);
1213 dsl_bookmark_node_t
*dbn
=
1214 avl_nearest(&head
->ds_bookmarks
, idx
, AVL_AFTER
);
1217 * Iterate over all bookmarks that are at the origin txg.
1218 * Adjust their FBN based on their new next snapshot.
1220 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
==
1221 dsl_dataset_phys(origin
)->ds_creation_txg
&&
1222 (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
);
1223 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1226 * Bookmark is at the origin, therefore its
1227 * "next dataset" is changing, so we need
1228 * to reset its FBN by recomputing it in
1229 * dsl_bookmark_set_phys().
1231 ASSERT3U(dbn
->dbn_phys
.zbm_guid
, ==,
1232 dsl_dataset_phys(origin
)->ds_guid
);
1233 ASSERT3U(dbn
->dbn_phys
.zbm_referenced_bytes_refd
, ==,
1234 dsl_dataset_phys(origin
)->ds_referenced_bytes
);
1235 ASSERT(dbn
->dbn_phys
.zbm_flags
&
1236 ZBM_FLAG_SNAPSHOT_EXISTS
);
1238 * Save and restore the zbm_redaction_obj, which
1239 * is zeroed by dsl_bookmark_set_phys().
1241 uint64_t redaction_obj
=
1242 dbn
->dbn_phys
.zbm_redaction_obj
;
1243 dsl_bookmark_set_phys(&dbn
->dbn_phys
, origin
);
1244 dbn
->dbn_phys
.zbm_redaction_obj
= redaction_obj
;
1246 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1247 dbn
->dbn_name
, sizeof (uint64_t),
1248 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1249 &dbn
->dbn_phys
, tx
));
1254 * This block is no longer referenced by this (head) dataset.
1256 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1257 * is the head dataset.
1261 dsl_bookmark_block_killed(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1264 * Iterate over bookmarks whose "next" is the head dataset.
1266 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1267 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>=
1268 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1269 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1271 * If the block was live (referenced) at the time of this
1272 * bookmark, add its space to the bookmark's FBN.
1274 if (bp
->blk_birth
<= dbn
->dbn_phys
.zbm_creation_txg
&&
1275 (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) {
1276 mutex_enter(&dbn
->dbn_lock
);
1277 dbn
->dbn_phys
.zbm_referenced_freed_before_next_snap
+=
1278 bp_get_dsize_sync(dsl_dataset_get_spa(ds
), bp
);
1279 dbn
->dbn_phys
.zbm_compressed_freed_before_next_snap
+=
1281 dbn
->dbn_phys
.zbm_uncompressed_freed_before_next_snap
+=
1284 * Changing the ZAP object here would be too
1285 * expensive. Also, we may be called from the zio
1286 * interrupt thread, which can't block on i/o.
1287 * Therefore, we mark this bookmark as dirty and
1288 * modify the ZAP once per txg, in
1289 * dsl_bookmark_sync_done().
1291 dbn
->dbn_dirty
= B_TRUE
;
1292 mutex_exit(&dbn
->dbn_lock
);
1298 dsl_bookmark_sync_done(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1300 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1302 if (dsl_dataset_is_snapshot(ds
))
1306 * We only dirty bookmarks that are at or after the most recent
1307 * snapshot. We can't create snapshots between
1308 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1309 * don't need to look at any bookmarks before ds_prev_snap_txg.
1311 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1312 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>=
1313 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1314 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1315 if (dbn
->dbn_dirty
) {
1317 * We only dirty nodes with HAS_FBN, therefore
1318 * we can always use the current bookmark struct size.
1320 ASSERT(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
);
1321 VERIFY0(zap_update(dp
->dp_meta_objset
,
1322 ds
->ds_bookmarks_obj
,
1323 dbn
->dbn_name
, sizeof (uint64_t),
1324 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1325 &dbn
->dbn_phys
, tx
));
1326 dbn
->dbn_dirty
= B_FALSE
;
1330 for (dsl_bookmark_node_t
*dbn
= avl_first(&ds
->ds_bookmarks
);
1331 dbn
!= NULL
; dbn
= AVL_NEXT(&ds
->ds_bookmarks
, dbn
)) {
1332 ASSERT(!dbn
->dbn_dirty
);
1338 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1341 dsl_bookmark_latest_txg(dsl_dataset_t
*ds
)
1343 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1344 dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1347 return (dbn
->dbn_phys
.zbm_creation_txg
);
1350 static inline unsigned int
1351 redact_block_buf_num_entries(unsigned int size
)
1353 return (size
/ sizeof (redact_block_phys_t
));
1357 * This function calculates the offset of the last entry in the array of
1358 * redact_block_phys_t. If we're reading the redaction list into buffers of
1359 * size bufsize, then for all but the last buffer, the last valid entry in the
1360 * array will be the last entry in the array. However, for the last buffer, any
1361 * amount of it may be filled. Thus, we check to see if we're looking at the
1362 * last buffer in the redaction list, and if so, we return the total number of
1363 * entries modulo the number of entries per buffer. Otherwise, we return the
1364 * number of entries per buffer minus one.
1366 static inline unsigned int
1367 last_entry(redaction_list_t
*rl
, unsigned int bufsize
, uint64_t bufid
)
1369 if (bufid
== (rl
->rl_phys
->rlp_num_entries
- 1) /
1370 redact_block_buf_num_entries(bufsize
)) {
1371 return ((rl
->rl_phys
->rlp_num_entries
- 1) %
1372 redact_block_buf_num_entries(bufsize
));
1374 return (redact_block_buf_num_entries(bufsize
) - 1);
1378 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1379 * redact_block_phys_t is before the bookmark, return -1. If the first block in
1380 * the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
1381 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1384 redact_block_zb_compare(redact_block_phys_t
*first
,
1385 zbookmark_phys_t
*second
)
1388 * If the block_phys is for a previous object, or the last block in the
1389 * block_phys is strictly before the block in the bookmark, the
1390 * block_phys is earlier.
1392 if (first
->rbp_object
< second
->zb_object
||
1393 (first
->rbp_object
== second
->zb_object
&&
1394 first
->rbp_blkid
+ (redact_block_get_count(first
) - 1) <
1395 second
->zb_blkid
)) {
1400 * If the bookmark is for a previous object, or the block in the
1401 * bookmark is strictly before the first block in the block_phys, the
1402 * bookmark is earlier.
1404 if (first
->rbp_object
> second
->zb_object
||
1405 (first
->rbp_object
== second
->zb_object
&&
1406 first
->rbp_blkid
> second
->zb_blkid
)) {
1414 * Traverse the redaction list in the provided object, and call the callback for
1415 * each entry we find. Don't call the callback for any records before resume.
1418 dsl_redaction_list_traverse(redaction_list_t
*rl
, zbookmark_phys_t
*resume
,
1419 rl_traverse_callback_t cb
, void *arg
)
1421 objset_t
*mos
= rl
->rl_mos
;
1422 redact_block_phys_t
*buf
;
1423 unsigned int bufsize
= SPA_OLD_MAXBLOCKSIZE
;
1426 if (rl
->rl_phys
->rlp_last_object
!= UINT64_MAX
||
1427 rl
->rl_phys
->rlp_last_blkid
!= UINT64_MAX
) {
1429 * When we finish a send, we update the last object and offset
1430 * to UINT64_MAX. If a send fails partway through, the last
1431 * object and offset will have some other value, indicating how
1432 * far the send got. The redaction list must be complete before
1433 * it can be traversed, so return EINVAL if the last object and
1434 * blkid are not set to UINT64_MAX.
1436 return (SET_ERROR(EINVAL
));
1440 * Binary search for the point to resume from. The goal is to minimize
1441 * the number of disk reads we have to perform.
1443 buf
= zio_data_buf_alloc(bufsize
);
1444 uint64_t maxbufid
= (rl
->rl_phys
->rlp_num_entries
- 1) /
1445 redact_block_buf_num_entries(bufsize
);
1446 uint64_t minbufid
= 0;
1447 while (resume
!= NULL
&& maxbufid
- minbufid
>= 1) {
1448 ASSERT3U(maxbufid
, >, minbufid
);
1449 uint64_t midbufid
= minbufid
+ ((maxbufid
- minbufid
) / 2);
1450 err
= dmu_read(mos
, rl
->rl_object
, midbufid
* bufsize
, bufsize
,
1451 buf
, DMU_READ_NO_PREFETCH
);
1455 int cmp0
= redact_block_zb_compare(&buf
[0], resume
);
1456 int cmpn
= redact_block_zb_compare(
1457 &buf
[last_entry(rl
, bufsize
, maxbufid
)], resume
);
1460 * If the first block is before or equal to the resume point,
1461 * and the last one is equal or after, then the resume point is
1462 * in this buf, and we should start here.
1464 if (cmp0
<= 0 && cmpn
>= 0)
1468 maxbufid
= midbufid
- 1;
1470 minbufid
= midbufid
+ 1;
1472 panic("No progress in binary search for resume point");
1475 for (uint64_t curidx
= minbufid
* redact_block_buf_num_entries(bufsize
);
1476 err
== 0 && curidx
< rl
->rl_phys
->rlp_num_entries
;
1479 * We read in the redaction list one block at a time. Once we
1480 * finish with all the entries in a given block, we read in a
1481 * new one. The predictive prefetcher will take care of any
1482 * prefetching, and this code shouldn't be the bottleneck, so we
1483 * don't need to do manual prefetching.
1485 if (curidx
% redact_block_buf_num_entries(bufsize
) == 0) {
1486 err
= dmu_read(mos
, rl
->rl_object
, curidx
*
1487 sizeof (*buf
), bufsize
, buf
,
1492 redact_block_phys_t
*rb
= &buf
[curidx
%
1493 redact_block_buf_num_entries(bufsize
)];
1495 * If resume is non-null, we should either not send the data, or
1496 * null out resume so we don't have to keep doing these
1499 if (resume
!= NULL
) {
1500 if (redact_block_zb_compare(rb
, resume
) < 0) {
1504 * If the place to resume is in the middle of
1505 * the range described by this
1506 * redact_block_phys, then modify the
1507 * redact_block_phys in memory so we generate
1508 * the right records.
1510 if (resume
->zb_object
== rb
->rbp_object
&&
1511 resume
->zb_blkid
> rb
->rbp_blkid
) {
1512 uint64_t diff
= resume
->zb_blkid
-
1514 rb
->rbp_blkid
= resume
->zb_blkid
;
1515 redact_block_set_count(rb
,
1516 redact_block_get_count(rb
) - diff
);
1522 if (cb(rb
, arg
) != 0)
1526 zio_data_buf_free(buf
, bufsize
);