]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_bookmark.c
Implement Redacted Send/Receive
[mirror_zfs.git] / module / zfs / dsl_bookmark.c
1 /*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16 /*
17 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18 * Copyright 2017 Nexenta Systems, Inc.
19 */
20
21 #include <sys/zfs_context.h>
22 #include <sys/dsl_dataset.h>
23 #include <sys/dsl_dir.h>
24 #include <sys/dsl_prop.h>
25 #include <sys/dsl_synctask.h>
26 #include <sys/dsl_destroy.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/arc.h>
30 #include <sys/zap.h>
31 #include <sys/zfeature.h>
32 #include <sys/spa.h>
33 #include <sys/dsl_bookmark.h>
34 #include <zfs_namecheck.h>
35 #include <sys/dmu_send.h>
36
37 static int
38 dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
39 dsl_dataset_t **dsp, void *tag, char **shortnamep)
40 {
41 char buf[ZFS_MAX_DATASET_NAME_LEN];
42 char *hashp;
43
44 if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
45 return (SET_ERROR(ENAMETOOLONG));
46 hashp = strchr(fullname, '#');
47 if (hashp == NULL)
48 return (SET_ERROR(EINVAL));
49
50 *shortnamep = hashp + 1;
51 if (zfs_component_namecheck(*shortnamep, NULL, NULL))
52 return (SET_ERROR(EINVAL));
53 (void) strlcpy(buf, fullname, hashp - fullname + 1);
54 return (dsl_dataset_hold(dp, buf, tag, dsp));
55 }
56
57 /*
58 * Returns ESRCH if bookmark is not found.
59 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
60 * by name, because only the ZAP honors the casesensitivity setting.
61 */
62 int
63 dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
64 zfs_bookmark_phys_t *bmark_phys)
65 {
66 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
67 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
68 matchtype_t mt = 0;
69 int err;
70
71 if (bmark_zapobj == 0)
72 return (SET_ERROR(ESRCH));
73
74 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
75 mt = MT_NORMALIZE;
76
77 /*
78 * Zero out the bookmark in case the one stored on disk
79 * is in an older, shorter format.
80 */
81 bzero(bmark_phys, sizeof (*bmark_phys));
82
83 err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
84 sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
85 NULL);
86
87 return (err == ENOENT ? ESRCH : err);
88 }
89
90 /*
91 * If later_ds is non-NULL, this will return EXDEV if the the specified bookmark
92 * does not represents an earlier point in later_ds's timeline. However,
93 * bmp will still be filled in if we return EXDEV.
94 *
95 * Returns ENOENT if the dataset containing the bookmark does not exist.
96 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
97 */
98 int
99 dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
100 dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
101 {
102 char *shortname;
103 dsl_dataset_t *ds;
104 int error;
105
106 error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
107 if (error != 0)
108 return (error);
109
110 error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
111 if (error == 0 && later_ds != NULL) {
112 if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
113 error = SET_ERROR(EXDEV);
114 }
115 dsl_dataset_rele(ds, FTAG);
116 return (error);
117 }
118
119 typedef struct dsl_bookmark_create_redacted_arg {
120 const char *dbcra_bmark;
121 const char *dbcra_snap;
122 redaction_list_t **dbcra_rl;
123 uint64_t dbcra_numsnaps;
124 uint64_t *dbcra_snaps;
125 void *dbcra_tag;
126 } dsl_bookmark_create_redacted_arg_t;
127
128 typedef struct dsl_bookmark_create_arg {
129 nvlist_t *dbca_bmarks;
130 nvlist_t *dbca_errors;
131 } dsl_bookmark_create_arg_t;
132
133 static int
134 dsl_bookmark_create_check_impl(dsl_dataset_t *snapds, const char *bookmark_name,
135 dmu_tx_t *tx)
136 {
137 dsl_pool_t *dp = dmu_tx_pool(tx);
138 dsl_dataset_t *bmark_fs;
139 char *shortname;
140 int error;
141 zfs_bookmark_phys_t bmark_phys = { 0 };
142
143 if (!snapds->ds_is_snapshot)
144 return (SET_ERROR(EINVAL));
145
146 error = dsl_bookmark_hold_ds(dp, bookmark_name,
147 &bmark_fs, FTAG, &shortname);
148 if (error != 0)
149 return (error);
150
151 if (!dsl_dataset_is_before(bmark_fs, snapds, 0)) {
152 dsl_dataset_rele(bmark_fs, FTAG);
153 return (SET_ERROR(EINVAL));
154 }
155
156 error = dsl_bookmark_lookup_impl(bmark_fs, shortname,
157 &bmark_phys);
158 dsl_dataset_rele(bmark_fs, FTAG);
159 if (error == 0)
160 return (SET_ERROR(EEXIST));
161 if (error == ESRCH)
162 return (0);
163 return (error);
164 }
165
166 static int
167 dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
168 {
169 dsl_bookmark_create_arg_t *dbca = arg;
170 dsl_pool_t *dp = dmu_tx_pool(tx);
171 int rv = 0;
172
173 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
174 return (SET_ERROR(ENOTSUP));
175
176 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
177 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
178 dsl_dataset_t *snapds;
179 int error;
180
181 /* note: validity of nvlist checked by ioctl layer */
182 error = dsl_dataset_hold(dp, fnvpair_value_string(pair),
183 FTAG, &snapds);
184 if (error == 0) {
185 error = dsl_bookmark_create_check_impl(snapds,
186 nvpair_name(pair), tx);
187 dsl_dataset_rele(snapds, FTAG);
188 }
189 if (error != 0) {
190 fnvlist_add_int32(dbca->dbca_errors,
191 nvpair_name(pair), error);
192 rv = error;
193 }
194 }
195
196 return (rv);
197 }
198
199 static dsl_bookmark_node_t *
200 dsl_bookmark_node_alloc(char *shortname)
201 {
202 dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
203 dbn->dbn_name = spa_strdup(shortname);
204 dbn->dbn_dirty = B_FALSE;
205 mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
206 return (dbn);
207 }
208
209 /*
210 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
211 */
212 static void
213 dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
214 {
215 spa_t *spa = dsl_dataset_get_spa(snap);
216 objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
217 dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
218 zbm->zbm_guid = dsp->ds_guid;
219 zbm->zbm_creation_txg = dsp->ds_creation_txg;
220 zbm->zbm_creation_time = dsp->ds_creation_time;
221 zbm->zbm_redaction_obj = 0;
222
223 /*
224 * If the dataset is encrypted create a larger bookmark to
225 * accommodate the IVset guid. The IVset guid was added
226 * after the encryption feature to prevent a problem with
227 * raw sends. If we encounter an encrypted dataset without
228 * an IVset guid we fall back to a normal bookmark.
229 */
230 if (snap->ds_dir->dd_crypto_obj != 0 &&
231 spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
232 (void) zap_lookup(mos, snap->ds_object,
233 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
234 &zbm->zbm_ivset_guid);
235 }
236
237 if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
238 zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
239 zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
240 zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
241 zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
242
243 dsl_dataset_t *nextds;
244 VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
245 dsp->ds_next_snap_obj, FTAG, &nextds));
246 dsl_deadlist_space(&nextds->ds_deadlist,
247 &zbm->zbm_referenced_freed_before_next_snap,
248 &zbm->zbm_compressed_freed_before_next_snap,
249 &zbm->zbm_uncompressed_freed_before_next_snap);
250 dsl_dataset_rele(nextds, FTAG);
251 } else {
252 bzero(&zbm->zbm_flags,
253 sizeof (zfs_bookmark_phys_t) -
254 offsetof(zfs_bookmark_phys_t, zbm_flags));
255 }
256 }
257
258 void
259 dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
260 dmu_tx_t *tx)
261 {
262 dsl_pool_t *dp = dmu_tx_pool(tx);
263 objset_t *mos = dp->dp_meta_objset;
264
265 if (hds->ds_bookmarks_obj == 0) {
266 hds->ds_bookmarks_obj = zap_create_norm(mos,
267 U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
268 tx);
269 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
270
271 dsl_dataset_zapify(hds, tx);
272 VERIFY0(zap_add(mos, hds->ds_object,
273 DS_FIELD_BOOKMARK_NAMES,
274 sizeof (hds->ds_bookmarks_obj), 1,
275 &hds->ds_bookmarks_obj, tx));
276 }
277
278 avl_add(&hds->ds_bookmarks, dbn);
279
280 /*
281 * To maintain backwards compatibility with software that doesn't
282 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
283 * possible bookmark size.
284 */
285 uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
286 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
287 (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
288 ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
289 bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
290 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
291 }
292
293 __attribute__((unused)) zfs_bookmark_phys_t zero_phys = { 0 };
294 ASSERT0(bcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
295 &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
296
297 VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
298 sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
299 &dbn->dbn_phys, tx));
300 }
301
302 /*
303 * If redaction_list is non-null, we create a redacted bookmark and redaction
304 * list, and store the object number of the redaction list in redact_obj.
305 */
306 static void
307 dsl_bookmark_create_sync_impl(const char *bookmark, const char *snapshot,
308 dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps, void *tag,
309 redaction_list_t **redaction_list)
310 {
311 dsl_pool_t *dp = dmu_tx_pool(tx);
312 objset_t *mos = dp->dp_meta_objset;
313 dsl_dataset_t *snapds, *bmark_fs;
314 char *shortname;
315 boolean_t bookmark_redacted;
316 uint64_t *dsredactsnaps;
317 uint64_t dsnumsnaps;
318
319 VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
320 VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
321 &shortname));
322
323 dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
324 dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
325
326 bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
327 SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
328 if (redaction_list != NULL || bookmark_redacted) {
329 redaction_list_t *local_rl;
330 if (bookmark_redacted) {
331 redact_snaps = dsredactsnaps;
332 num_redact_snaps = dsnumsnaps;
333 }
334 dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
335 DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
336 DMU_OTN_UINT64_METADATA, sizeof (redaction_list_phys_t) +
337 num_redact_snaps * sizeof (uint64_t), tx);
338 spa_feature_incr(dp->dp_spa,
339 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
340
341 VERIFY0(dsl_redaction_list_hold_obj(dp,
342 dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
343 dsl_redaction_list_long_hold(dp, local_rl, tag);
344
345 ASSERT3U((local_rl)->rl_dbuf->db_size, >=,
346 sizeof (redaction_list_phys_t) + num_redact_snaps *
347 sizeof (uint64_t));
348 dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
349 bcopy(redact_snaps, local_rl->rl_phys->rlp_snaps,
350 sizeof (uint64_t) * num_redact_snaps);
351 local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
352 if (bookmark_redacted) {
353 ASSERT3P(redaction_list, ==, NULL);
354 local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
355 local_rl->rl_phys->rlp_last_object = UINT64_MAX;
356 dsl_redaction_list_long_rele(local_rl, tag);
357 dsl_redaction_list_rele(local_rl, tag);
358 } else {
359 *redaction_list = local_rl;
360 }
361 }
362
363 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
364 spa_feature_incr(dp->dp_spa,
365 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
366 }
367
368 dsl_bookmark_node_add(bmark_fs, dbn, tx);
369
370 spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
371 "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
372 shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
373 (longlong_t)snapds->ds_object,
374 (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
375
376 dsl_dataset_rele(bmark_fs, FTAG);
377 dsl_dataset_rele(snapds, FTAG);
378 }
379
380 static void
381 dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
382 {
383 dsl_bookmark_create_arg_t *dbca = arg;
384
385 ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
386 SPA_FEATURE_BOOKMARKS));
387
388 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
389 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
390 dsl_bookmark_create_sync_impl(nvpair_name(pair),
391 fnvpair_value_string(pair), tx, 0, NULL, NULL, NULL);
392 }
393 }
394
395 /*
396 * The bookmarks must all be in the same pool.
397 */
398 int
399 dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
400 {
401 nvpair_t *pair;
402 dsl_bookmark_create_arg_t dbca;
403
404 pair = nvlist_next_nvpair(bmarks, NULL);
405 if (pair == NULL)
406 return (0);
407
408 dbca.dbca_bmarks = bmarks;
409 dbca.dbca_errors = errors;
410
411 return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
412 dsl_bookmark_create_sync, &dbca,
413 fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
414 }
415
416 static int
417 dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
418 {
419 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
420 dsl_pool_t *dp = dmu_tx_pool(tx);
421 dsl_dataset_t *snapds;
422 int rv = 0;
423
424 if (!spa_feature_is_enabled(dp->dp_spa,
425 SPA_FEATURE_REDACTION_BOOKMARKS))
426 return (SET_ERROR(ENOTSUP));
427 /*
428 * If the list of redact snaps will not fit in the bonus buffer with
429 * the furthest reached object and offset, fail.
430 */
431 if (dbcra->dbcra_numsnaps > (dmu_bonus_max() -
432 sizeof (redaction_list_phys_t)) / sizeof (uint64_t))
433 return (SET_ERROR(E2BIG));
434
435 rv = dsl_dataset_hold(dp, dbcra->dbcra_snap,
436 FTAG, &snapds);
437 if (rv == 0) {
438 rv = dsl_bookmark_create_check_impl(snapds, dbcra->dbcra_bmark,
439 tx);
440 dsl_dataset_rele(snapds, FTAG);
441 }
442 return (rv);
443 }
444
445 static void
446 dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
447 {
448 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
449 dsl_bookmark_create_sync_impl(dbcra->dbcra_bmark, dbcra->dbcra_snap, tx,
450 dbcra->dbcra_numsnaps, dbcra->dbcra_snaps, dbcra->dbcra_tag,
451 dbcra->dbcra_rl);
452 }
453
454 int
455 dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
456 uint64_t numsnaps, uint64_t *snapguids, void *tag, redaction_list_t **rl)
457 {
458 dsl_bookmark_create_redacted_arg_t dbcra;
459
460 dbcra.dbcra_bmark = bookmark;
461 dbcra.dbcra_snap = snapshot;
462 dbcra.dbcra_rl = rl;
463 dbcra.dbcra_numsnaps = numsnaps;
464 dbcra.dbcra_snaps = snapguids;
465 dbcra.dbcra_tag = tag;
466
467 return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
468 dsl_bookmark_create_redacted_sync, &dbcra, 5,
469 ZFS_SPACE_CHECK_NORMAL));
470 }
471
472 /*
473 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
474 * If 'props' is NULL, retrieves all properties.
475 */
476 static void
477 dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
478 nvlist_t *props, nvlist_t *out_props)
479 {
480 ASSERT3P(dp, !=, NULL);
481 ASSERT3P(bmark_phys, !=, NULL);
482 ASSERT3P(out_props, !=, NULL);
483 ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
484
485 if (props == NULL || nvlist_exists(props,
486 zfs_prop_to_name(ZFS_PROP_GUID))) {
487 dsl_prop_nvlist_add_uint64(out_props,
488 ZFS_PROP_GUID, bmark_phys->zbm_guid);
489 }
490 if (props == NULL || nvlist_exists(props,
491 zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
492 dsl_prop_nvlist_add_uint64(out_props,
493 ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
494 }
495 if (props == NULL || nvlist_exists(props,
496 zfs_prop_to_name(ZFS_PROP_CREATION))) {
497 dsl_prop_nvlist_add_uint64(out_props,
498 ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
499 }
500 if (props == NULL || nvlist_exists(props,
501 zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
502 dsl_prop_nvlist_add_uint64(out_props,
503 ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
504 }
505 if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
506 if (props == NULL || nvlist_exists(props,
507 zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
508 dsl_prop_nvlist_add_uint64(out_props,
509 ZFS_PROP_REFERENCED,
510 bmark_phys->zbm_referenced_bytes_refd);
511 }
512 if (props == NULL || nvlist_exists(props,
513 zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
514 dsl_prop_nvlist_add_uint64(out_props,
515 ZFS_PROP_LOGICALREFERENCED,
516 bmark_phys->zbm_uncompressed_bytes_refd);
517 }
518 if (props == NULL || nvlist_exists(props,
519 zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
520 uint64_t ratio =
521 bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
522 bmark_phys->zbm_uncompressed_bytes_refd * 100 /
523 bmark_phys->zbm_compressed_bytes_refd;
524 dsl_prop_nvlist_add_uint64(out_props,
525 ZFS_PROP_REFRATIO, ratio);
526 }
527 }
528
529 if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
530 nvlist_exists(props, "redact_complete")) &&
531 bmark_phys->zbm_redaction_obj != 0) {
532 redaction_list_t *rl;
533 int err = dsl_redaction_list_hold_obj(dp,
534 bmark_phys->zbm_redaction_obj, FTAG, &rl);
535 if (err == 0) {
536 if (nvlist_exists(props, "redact_snaps")) {
537 nvlist_t *nvl;
538 nvl = fnvlist_alloc();
539 fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
540 rl->rl_phys->rlp_snaps,
541 rl->rl_phys->rlp_num_snaps);
542 fnvlist_add_nvlist(out_props, "redact_snaps",
543 nvl);
544 nvlist_free(nvl);
545 }
546 if (nvlist_exists(props, "redact_complete")) {
547 nvlist_t *nvl;
548 nvl = fnvlist_alloc();
549 fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
550 rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
551 rl->rl_phys->rlp_last_object == UINT64_MAX);
552 fnvlist_add_nvlist(out_props, "redact_complete",
553 nvl);
554 nvlist_free(nvl);
555 }
556 dsl_redaction_list_rele(rl, FTAG);
557 }
558 }
559 }
560
561 int
562 dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
563 {
564 dsl_pool_t *dp = ds->ds_dir->dd_pool;
565
566 ASSERT(dsl_pool_config_held(dp));
567
568 if (dsl_dataset_is_snapshot(ds))
569 return (SET_ERROR(EINVAL));
570
571 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
572 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
573 nvlist_t *out_props = fnvlist_alloc();
574
575 dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
576
577 fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
578 fnvlist_free(out_props);
579 }
580 return (0);
581 }
582
583 /*
584 * Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
585 * their TXG, then by their FBN-ness. The "FBN-ness" component ensures
586 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
587 * dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
588 * multiple bookmarks at the same TXG (with the same FBN-ness). In this
589 * case we differentiate them by an arbitrary metric (in this case,
590 * their names).
591 */
592 static int
593 dsl_bookmark_compare(const void *l, const void *r)
594 {
595 const dsl_bookmark_node_t *ldbn = l;
596 const dsl_bookmark_node_t *rdbn = r;
597
598 int64_t cmp = AVL_CMP(ldbn->dbn_phys.zbm_creation_txg,
599 rdbn->dbn_phys.zbm_creation_txg);
600 if (likely(cmp))
601 return (cmp);
602 cmp = AVL_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
603 (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
604 if (likely(cmp))
605 return (cmp);
606 cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
607 return (AVL_ISIGN(cmp));
608 }
609
610 /*
611 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
612 */
613 int
614 dsl_bookmark_init_ds(dsl_dataset_t *ds)
615 {
616 dsl_pool_t *dp = ds->ds_dir->dd_pool;
617 objset_t *mos = dp->dp_meta_objset;
618
619 ASSERT(!ds->ds_is_snapshot);
620
621 avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
622 sizeof (dsl_bookmark_node_t),
623 offsetof(dsl_bookmark_node_t, dbn_node));
624
625 if (!dsl_dataset_is_zapified(ds))
626 return (0);
627
628 int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
629 sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
630 if (zaperr == ENOENT)
631 return (0);
632 if (zaperr != 0)
633 return (zaperr);
634
635 if (ds->ds_bookmarks_obj == 0)
636 return (0);
637
638 int err = 0;
639 zap_cursor_t zc;
640 zap_attribute_t attr;
641
642 for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
643 (err = zap_cursor_retrieve(&zc, &attr)) == 0;
644 zap_cursor_advance(&zc)) {
645 dsl_bookmark_node_t *dbn =
646 dsl_bookmark_node_alloc(attr.za_name);
647
648 err = dsl_bookmark_lookup_impl(ds,
649 dbn->dbn_name, &dbn->dbn_phys);
650 ASSERT3U(err, !=, ENOENT);
651 if (err != 0) {
652 kmem_free(dbn, sizeof (*dbn));
653 break;
654 }
655 avl_add(&ds->ds_bookmarks, dbn);
656 }
657 zap_cursor_fini(&zc);
658 if (err == ENOENT)
659 err = 0;
660 return (err);
661 }
662
663 void
664 dsl_bookmark_fini_ds(dsl_dataset_t *ds)
665 {
666 void *cookie = NULL;
667 dsl_bookmark_node_t *dbn;
668
669 if (ds->ds_is_snapshot)
670 return;
671
672 while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
673 spa_strfree(dbn->dbn_name);
674 mutex_destroy(&dbn->dbn_lock);
675 kmem_free(dbn, sizeof (*dbn));
676 }
677 avl_destroy(&ds->ds_bookmarks);
678 }
679
680 /*
681 * Retrieve the bookmarks that exist in the specified dataset, and the
682 * requested properties of each bookmark.
683 *
684 * The "props" nvlist specifies which properties are requested.
685 * See lzc_get_bookmarks() for the list of valid properties.
686 */
687 int
688 dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
689 {
690 dsl_pool_t *dp;
691 dsl_dataset_t *ds;
692 int err;
693
694 err = dsl_pool_hold(dsname, FTAG, &dp);
695 if (err != 0)
696 return (err);
697 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
698 if (err != 0) {
699 dsl_pool_rele(dp, FTAG);
700 return (err);
701 }
702
703 err = dsl_get_bookmarks_impl(ds, props, outnvl);
704
705 dsl_dataset_rele(ds, FTAG);
706 dsl_pool_rele(dp, FTAG);
707 return (err);
708 }
709
710 /*
711 * Retrieve all properties for a single bookmark in the given dataset.
712 */
713 int
714 dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
715 {
716 dsl_pool_t *dp;
717 dsl_dataset_t *ds;
718 zfs_bookmark_phys_t bmark_phys = { 0 };
719 int err;
720
721 err = dsl_pool_hold(dsname, FTAG, &dp);
722 if (err != 0)
723 return (err);
724 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
725 if (err != 0) {
726 dsl_pool_rele(dp, FTAG);
727 return (err);
728 }
729
730 err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
731 if (err != 0)
732 goto out;
733
734 dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
735 out:
736 dsl_dataset_rele(ds, FTAG);
737 dsl_pool_rele(dp, FTAG);
738 return (err);
739 }
740
741 typedef struct dsl_bookmark_destroy_arg {
742 nvlist_t *dbda_bmarks;
743 nvlist_t *dbda_success;
744 nvlist_t *dbda_errors;
745 } dsl_bookmark_destroy_arg_t;
746
747 static void
748 dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
749 dmu_tx_t *tx)
750 {
751 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
752 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
753 matchtype_t mt = 0;
754 uint64_t int_size, num_ints;
755 /*
756 * 'search' must be zeroed so that dbn_flags (which is used in
757 * dsl_bookmark_compare()) will be zeroed even if the on-disk
758 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
759 */
760 dsl_bookmark_node_t search = { 0 };
761 char realname[ZFS_MAX_DATASET_NAME_LEN];
762
763 /*
764 * Find the real name of this bookmark, which may be different
765 * from the given name if the dataset is case-insensitive. Then
766 * use the real name to find the node in the ds_bookmarks AVL tree.
767 */
768
769 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
770 mt = MT_NORMALIZE;
771
772 VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
773
774 ASSERT3U(int_size, ==, sizeof (uint64_t));
775
776 if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
777 spa_feature_decr(dmu_objset_spa(mos),
778 SPA_FEATURE_BOOKMARK_V2, tx);
779 }
780 VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
781 num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
782
783 search.dbn_name = realname;
784 dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
785 ASSERT(dbn != NULL);
786
787 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
788 /*
789 * If this bookmark HAS_FBN, and it is before the most
790 * recent snapshot, then its TXG is a key in the head's
791 * deadlist (and all clones' heads' deadlists). If this is
792 * the last thing keeping the key (i.e. there are no more
793 * bookmarks with HAS_FBN at this TXG, and there is no
794 * snapshot at this TXG), then remove the key.
795 *
796 * Note that this algorithm depends on ds_bookmarks being
797 * sorted such that all bookmarks at the same TXG with
798 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
799 * at the same TXG in between them). If this were not
800 * the case, we would need to examine *all* bookmarks
801 * at this TXG, rather than just the adjacent ones.
802 */
803
804 dsl_bookmark_node_t *dbn_prev =
805 AVL_PREV(&ds->ds_bookmarks, dbn);
806 dsl_bookmark_node_t *dbn_next =
807 AVL_NEXT(&ds->ds_bookmarks, dbn);
808
809 boolean_t more_bookmarks_at_this_txg =
810 (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
811 dbn->dbn_phys.zbm_creation_txg &&
812 (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
813 (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
814 dbn->dbn_phys.zbm_creation_txg &&
815 (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
816
817 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
818 !more_bookmarks_at_this_txg &&
819 dbn->dbn_phys.zbm_creation_txg <
820 dsl_dataset_phys(ds)->ds_prev_snap_txg) {
821 dsl_dir_remove_clones_key(ds->ds_dir,
822 dbn->dbn_phys.zbm_creation_txg, tx);
823 dsl_deadlist_remove_key(&ds->ds_deadlist,
824 dbn->dbn_phys.zbm_creation_txg, tx);
825 }
826
827 spa_feature_decr(dmu_objset_spa(mos),
828 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
829 }
830
831 if (dbn->dbn_phys.zbm_redaction_obj != 0) {
832 VERIFY0(dmu_object_free(mos,
833 dbn->dbn_phys.zbm_redaction_obj, tx));
834 spa_feature_decr(dmu_objset_spa(mos),
835 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
836 }
837
838 avl_remove(&ds->ds_bookmarks, dbn);
839 spa_strfree(dbn->dbn_name);
840 mutex_destroy(&dbn->dbn_lock);
841 kmem_free(dbn, sizeof (*dbn));
842
843 VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
844 }
845
846 static int
847 dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
848 {
849 dsl_bookmark_destroy_arg_t *dbda = arg;
850 dsl_pool_t *dp = dmu_tx_pool(tx);
851 int rv = 0;
852
853 ASSERT(nvlist_empty(dbda->dbda_success));
854 ASSERT(nvlist_empty(dbda->dbda_errors));
855
856 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
857 return (0);
858
859 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
860 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
861 const char *fullname = nvpair_name(pair);
862 dsl_dataset_t *ds;
863 zfs_bookmark_phys_t bm;
864 int error;
865 char *shortname;
866
867 error = dsl_bookmark_hold_ds(dp, fullname, &ds,
868 FTAG, &shortname);
869 if (error == ENOENT) {
870 /* ignore it; the bookmark is "already destroyed" */
871 continue;
872 }
873 if (error == 0) {
874 error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
875 dsl_dataset_rele(ds, FTAG);
876 if (error == ESRCH) {
877 /*
878 * ignore it; the bookmark is
879 * "already destroyed"
880 */
881 continue;
882 }
883 if (error == 0 && bm.zbm_redaction_obj != 0) {
884 redaction_list_t *rl = NULL;
885 error = dsl_redaction_list_hold_obj(tx->tx_pool,
886 bm.zbm_redaction_obj, FTAG, &rl);
887 if (error == ENOENT) {
888 error = 0;
889 } else if (error == 0 &&
890 dsl_redaction_list_long_held(rl)) {
891 error = SET_ERROR(EBUSY);
892 }
893 if (rl != NULL) {
894 dsl_redaction_list_rele(rl, FTAG);
895 }
896 }
897 }
898 if (error == 0) {
899 if (dmu_tx_is_syncing(tx)) {
900 fnvlist_add_boolean(dbda->dbda_success,
901 fullname);
902 }
903 } else {
904 fnvlist_add_int32(dbda->dbda_errors, fullname, error);
905 rv = error;
906 }
907 }
908 return (rv);
909 }
910
911 static void
912 dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
913 {
914 dsl_bookmark_destroy_arg_t *dbda = arg;
915 dsl_pool_t *dp = dmu_tx_pool(tx);
916 objset_t *mos = dp->dp_meta_objset;
917
918 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
919 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
920 dsl_dataset_t *ds;
921 char *shortname;
922 uint64_t zap_cnt;
923
924 VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
925 &ds, FTAG, &shortname));
926 dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
927
928 /*
929 * If all of this dataset's bookmarks have been destroyed,
930 * free the zap object and decrement the feature's use count.
931 */
932 VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
933 if (zap_cnt == 0) {
934 dmu_buf_will_dirty(ds->ds_dbuf, tx);
935 VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
936 ds->ds_bookmarks_obj = 0;
937 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
938 VERIFY0(zap_remove(mos, ds->ds_object,
939 DS_FIELD_BOOKMARK_NAMES, tx));
940 }
941
942 spa_history_log_internal_ds(ds, "remove bookmark", tx,
943 "name=%s", shortname);
944
945 dsl_dataset_rele(ds, FTAG);
946 }
947 }
948
949 /*
950 * The bookmarks must all be in the same pool.
951 */
952 int
953 dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
954 {
955 int rv;
956 dsl_bookmark_destroy_arg_t dbda;
957 nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
958 if (pair == NULL)
959 return (0);
960
961 dbda.dbda_bmarks = bmarks;
962 dbda.dbda_errors = errors;
963 dbda.dbda_success = fnvlist_alloc();
964
965 rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
966 dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
967 ZFS_SPACE_CHECK_RESERVED);
968 fnvlist_free(dbda.dbda_success);
969 return (rv);
970 }
971
972 /* Return B_TRUE if there are any long holds on this dataset. */
973 boolean_t
974 dsl_redaction_list_long_held(redaction_list_t *rl)
975 {
976 return (!zfs_refcount_is_zero(&rl->rl_longholds));
977 }
978
979 void
980 dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl, void *tag)
981 {
982 ASSERT(dsl_pool_config_held(dp));
983 (void) zfs_refcount_add(&rl->rl_longholds, tag);
984 }
985
986 void
987 dsl_redaction_list_long_rele(redaction_list_t *rl, void *tag)
988 {
989 (void) zfs_refcount_remove(&rl->rl_longholds, tag);
990 }
991
992 /* ARGSUSED */
993 static void
994 redaction_list_evict_sync(void *rlu)
995 {
996 redaction_list_t *rl = rlu;
997 zfs_refcount_destroy(&rl->rl_longholds);
998
999 kmem_free(rl, sizeof (redaction_list_t));
1000 }
1001
1002 void
1003 dsl_redaction_list_rele(redaction_list_t *rl, void *tag)
1004 {
1005 dmu_buf_rele(rl->rl_dbuf, tag);
1006 }
1007
1008 int
1009 dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, void *tag,
1010 redaction_list_t **rlp)
1011 {
1012 objset_t *mos = dp->dp_meta_objset;
1013 dmu_buf_t *dbuf;
1014 redaction_list_t *rl;
1015 int err;
1016
1017 ASSERT(dsl_pool_config_held(dp));
1018
1019 err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1020 if (err != 0)
1021 return (err);
1022
1023 rl = dmu_buf_get_user(dbuf);
1024 if (rl == NULL) {
1025 redaction_list_t *winner = NULL;
1026
1027 rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1028 rl->rl_dbuf = dbuf;
1029 rl->rl_object = rlobj;
1030 rl->rl_phys = dbuf->db_data;
1031 rl->rl_mos = dp->dp_meta_objset;
1032 zfs_refcount_create(&rl->rl_longholds);
1033 dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1034 &rl->rl_dbuf);
1035 if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1036 kmem_free(rl, sizeof (*rl));
1037 rl = winner;
1038 }
1039 }
1040 *rlp = rl;
1041 return (0);
1042 }
1043
1044 /*
1045 * Snapshot ds is being destroyed.
1046 *
1047 * Adjust the "freed_before_next" of any bookmarks between this snap
1048 * and the previous snapshot, because their "next snapshot" is changing.
1049 *
1050 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1051 * their HAS_SNAP flag (note: there can be at most one snapshot of
1052 * each filesystem at a given txg), and return B_TRUE. In this case
1053 * the caller can not remove the key in the deadlist at this TXG, because
1054 * the HAS_FBN bookmarks require the key be there.
1055 *
1056 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1057 * snapshot's TXG. In this case the caller can remove the key in the
1058 * deadlist at this TXG.
1059 */
1060 boolean_t
1061 dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1062 {
1063 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1064
1065 dsl_dataset_t *head, *next;
1066 VERIFY0(dsl_dataset_hold_obj(dp,
1067 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1068 VERIFY0(dsl_dataset_hold_obj(dp,
1069 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1070
1071 /*
1072 * Find the first bookmark that HAS_FBN at or after the
1073 * previous snapshot.
1074 */
1075 dsl_bookmark_node_t search = { 0 };
1076 avl_index_t idx;
1077 search.dbn_phys.zbm_creation_txg =
1078 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1079 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1080 /*
1081 * The empty-string name can't be in the AVL, and it compares
1082 * before any entries with this TXG.
1083 */
1084 search.dbn_name = "";
1085 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1086 dsl_bookmark_node_t *dbn =
1087 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1088
1089 /*
1090 * Iterate over all bookmarks that are at or after the previous
1091 * snapshot, and before this (being deleted) snapshot. Adjust
1092 * their FBN based on their new next snapshot.
1093 */
1094 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1095 dsl_dataset_phys(ds)->ds_creation_txg;
1096 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1097 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1098 continue;
1099 /*
1100 * Increase our FBN by the amount of space that was live
1101 * (referenced) at the time of this bookmark (i.e.
1102 * birth <= zbm_creation_txg), and killed between this
1103 * (being deleted) snapshot and the next snapshot (i.e.
1104 * on the next snapshot's deadlist). (Space killed before
1105 * this are already on our FBN.)
1106 */
1107 uint64_t referenced, compressed, uncompressed;
1108 dsl_deadlist_space_range(&next->ds_deadlist,
1109 0, dbn->dbn_phys.zbm_creation_txg,
1110 &referenced, &compressed, &uncompressed);
1111 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1112 referenced;
1113 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1114 compressed;
1115 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1116 uncompressed;
1117 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1118 dbn->dbn_name, sizeof (uint64_t),
1119 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1120 &dbn->dbn_phys, tx));
1121 }
1122 dsl_dataset_rele(next, FTAG);
1123
1124 /*
1125 * There may be several bookmarks at this txg (the TXG of the
1126 * snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
1127 * flag on all of them, and return TRUE if there is at least 1
1128 * bookmark here with HAS_FBN (thus preventing the deadlist
1129 * key from being removed).
1130 */
1131 boolean_t rv = B_FALSE;
1132 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1133 dsl_dataset_phys(ds)->ds_creation_txg;
1134 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1135 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1136 ASSERT(!(dbn->dbn_phys.zbm_flags &
1137 ZBM_FLAG_SNAPSHOT_EXISTS));
1138 continue;
1139 }
1140 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1141 dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1142 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1143 dbn->dbn_name, sizeof (uint64_t),
1144 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1145 &dbn->dbn_phys, tx));
1146 rv = B_TRUE;
1147 }
1148 dsl_dataset_rele(head, FTAG);
1149 return (rv);
1150 }
1151
1152 /*
1153 * A snapshot is being created of this (head) dataset.
1154 *
1155 * We don't keep keys in the deadlist for the most recent snapshot, or any
1156 * bookmarks at or after it, because there can't be any blocks on the
1157 * deadlist in this range. Now that the most recent snapshot is after
1158 * all bookmarks, we need to add these keys. Note that the caller always
1159 * adds a key at the previous snapshot, so we only add keys for bookmarks
1160 * after that.
1161 */
1162 void
1163 dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1164 {
1165 uint64_t last_key_added = UINT64_MAX;
1166 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1167 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1168 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1169 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1170 uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1171 ASSERT3U(creation_txg, <=, last_key_added);
1172 /*
1173 * Note, there may be multiple bookmarks at this TXG,
1174 * and we only want to add the key for this TXG once.
1175 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1176 * these bookmarks in sequence.
1177 */
1178 if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1179 creation_txg != last_key_added) {
1180 dsl_deadlist_add_key(&ds->ds_deadlist,
1181 creation_txg, tx);
1182 last_key_added = creation_txg;
1183 }
1184 }
1185 }
1186
1187 /*
1188 * The next snapshot of the origin dataset has changed, due to
1189 * promote or clone swap. If there are any bookmarks at this dataset,
1190 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1191 * The head dataset has the relevant bookmarks in ds_bookmarks.
1192 */
1193 void
1194 dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1195 dmu_tx_t *tx)
1196 {
1197 dsl_pool_t *dp = dmu_tx_pool(tx);
1198
1199 /*
1200 * Find the first bookmark that HAS_FBN at the origin snapshot.
1201 */
1202 dsl_bookmark_node_t search = { 0 };
1203 avl_index_t idx;
1204 search.dbn_phys.zbm_creation_txg =
1205 dsl_dataset_phys(origin)->ds_creation_txg;
1206 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1207 /*
1208 * The empty-string name can't be in the AVL, and it compares
1209 * before any entries with this TXG.
1210 */
1211 search.dbn_name = "";
1212 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1213 dsl_bookmark_node_t *dbn =
1214 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1215
1216 /*
1217 * Iterate over all bookmarks that are at the origin txg.
1218 * Adjust their FBN based on their new next snapshot.
1219 */
1220 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1221 dsl_dataset_phys(origin)->ds_creation_txg &&
1222 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1223 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1224
1225 /*
1226 * Bookmark is at the origin, therefore its
1227 * "next dataset" is changing, so we need
1228 * to reset its FBN by recomputing it in
1229 * dsl_bookmark_set_phys().
1230 */
1231 ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1232 dsl_dataset_phys(origin)->ds_guid);
1233 ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1234 dsl_dataset_phys(origin)->ds_referenced_bytes);
1235 ASSERT(dbn->dbn_phys.zbm_flags &
1236 ZBM_FLAG_SNAPSHOT_EXISTS);
1237 /*
1238 * Save and restore the zbm_redaction_obj, which
1239 * is zeroed by dsl_bookmark_set_phys().
1240 */
1241 uint64_t redaction_obj =
1242 dbn->dbn_phys.zbm_redaction_obj;
1243 dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1244 dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1245
1246 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1247 dbn->dbn_name, sizeof (uint64_t),
1248 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1249 &dbn->dbn_phys, tx));
1250 }
1251 }
1252
1253 /*
1254 * This block is no longer referenced by this (head) dataset.
1255 *
1256 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1257 * is the head dataset.
1258 */
1259 /* ARGSUSED */
1260 void
1261 dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1262 {
1263 /*
1264 * Iterate over bookmarks whose "next" is the head dataset.
1265 */
1266 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1267 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1268 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1269 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1270 /*
1271 * If the block was live (referenced) at the time of this
1272 * bookmark, add its space to the bookmark's FBN.
1273 */
1274 if (bp->blk_birth <= dbn->dbn_phys.zbm_creation_txg &&
1275 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1276 mutex_enter(&dbn->dbn_lock);
1277 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1278 bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1279 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1280 BP_GET_PSIZE(bp);
1281 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1282 BP_GET_UCSIZE(bp);
1283 /*
1284 * Changing the ZAP object here would be too
1285 * expensive. Also, we may be called from the zio
1286 * interrupt thread, which can't block on i/o.
1287 * Therefore, we mark this bookmark as dirty and
1288 * modify the ZAP once per txg, in
1289 * dsl_bookmark_sync_done().
1290 */
1291 dbn->dbn_dirty = B_TRUE;
1292 mutex_exit(&dbn->dbn_lock);
1293 }
1294 }
1295 }
1296
1297 void
1298 dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1299 {
1300 dsl_pool_t *dp = dmu_tx_pool(tx);
1301
1302 if (dsl_dataset_is_snapshot(ds))
1303 return;
1304
1305 /*
1306 * We only dirty bookmarks that are at or after the most recent
1307 * snapshot. We can't create snapshots between
1308 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1309 * don't need to look at any bookmarks before ds_prev_snap_txg.
1310 */
1311 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1312 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1313 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1314 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1315 if (dbn->dbn_dirty) {
1316 /*
1317 * We only dirty nodes with HAS_FBN, therefore
1318 * we can always use the current bookmark struct size.
1319 */
1320 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1321 VERIFY0(zap_update(dp->dp_meta_objset,
1322 ds->ds_bookmarks_obj,
1323 dbn->dbn_name, sizeof (uint64_t),
1324 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1325 &dbn->dbn_phys, tx));
1326 dbn->dbn_dirty = B_FALSE;
1327 }
1328 }
1329 #ifdef ZFS_DEBUG
1330 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1331 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1332 ASSERT(!dbn->dbn_dirty);
1333 }
1334 #endif
1335 }
1336
1337 /*
1338 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1339 */
1340 uint64_t
1341 dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1342 {
1343 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1344 dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1345 if (dbn == NULL)
1346 return (0);
1347 return (dbn->dbn_phys.zbm_creation_txg);
1348 }
1349
1350 static inline unsigned int
1351 redact_block_buf_num_entries(unsigned int size)
1352 {
1353 return (size / sizeof (redact_block_phys_t));
1354 }
1355
1356 /*
1357 * This function calculates the offset of the last entry in the array of
1358 * redact_block_phys_t. If we're reading the redaction list into buffers of
1359 * size bufsize, then for all but the last buffer, the last valid entry in the
1360 * array will be the last entry in the array. However, for the last buffer, any
1361 * amount of it may be filled. Thus, we check to see if we're looking at the
1362 * last buffer in the redaction list, and if so, we return the total number of
1363 * entries modulo the number of entries per buffer. Otherwise, we return the
1364 * number of entries per buffer minus one.
1365 */
1366 static inline unsigned int
1367 last_entry(redaction_list_t *rl, unsigned int bufsize, uint64_t bufid)
1368 {
1369 if (bufid == (rl->rl_phys->rlp_num_entries - 1) /
1370 redact_block_buf_num_entries(bufsize)) {
1371 return ((rl->rl_phys->rlp_num_entries - 1) %
1372 redact_block_buf_num_entries(bufsize));
1373 }
1374 return (redact_block_buf_num_entries(bufsize) - 1);
1375 }
1376
1377 /*
1378 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1379 * redact_block_phys_t is before the bookmark, return -1. If the first block in
1380 * the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
1381 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1382 */
1383 static int
1384 redact_block_zb_compare(redact_block_phys_t *first,
1385 zbookmark_phys_t *second)
1386 {
1387 /*
1388 * If the block_phys is for a previous object, or the last block in the
1389 * block_phys is strictly before the block in the bookmark, the
1390 * block_phys is earlier.
1391 */
1392 if (first->rbp_object < second->zb_object ||
1393 (first->rbp_object == second->zb_object &&
1394 first->rbp_blkid + (redact_block_get_count(first) - 1) <
1395 second->zb_blkid)) {
1396 return (-1);
1397 }
1398
1399 /*
1400 * If the bookmark is for a previous object, or the block in the
1401 * bookmark is strictly before the first block in the block_phys, the
1402 * bookmark is earlier.
1403 */
1404 if (first->rbp_object > second->zb_object ||
1405 (first->rbp_object == second->zb_object &&
1406 first->rbp_blkid > second->zb_blkid)) {
1407 return (1);
1408 }
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * Traverse the redaction list in the provided object, and call the callback for
1415 * each entry we find. Don't call the callback for any records before resume.
1416 */
1417 int
1418 dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1419 rl_traverse_callback_t cb, void *arg)
1420 {
1421 objset_t *mos = rl->rl_mos;
1422 redact_block_phys_t *buf;
1423 unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1424 int err = 0;
1425
1426 if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1427 rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1428 /*
1429 * When we finish a send, we update the last object and offset
1430 * to UINT64_MAX. If a send fails partway through, the last
1431 * object and offset will have some other value, indicating how
1432 * far the send got. The redaction list must be complete before
1433 * it can be traversed, so return EINVAL if the last object and
1434 * blkid are not set to UINT64_MAX.
1435 */
1436 return (SET_ERROR(EINVAL));
1437 }
1438
1439 /*
1440 * Binary search for the point to resume from. The goal is to minimize
1441 * the number of disk reads we have to perform.
1442 */
1443 buf = zio_data_buf_alloc(bufsize);
1444 uint64_t maxbufid = (rl->rl_phys->rlp_num_entries - 1) /
1445 redact_block_buf_num_entries(bufsize);
1446 uint64_t minbufid = 0;
1447 while (resume != NULL && maxbufid - minbufid >= 1) {
1448 ASSERT3U(maxbufid, >, minbufid);
1449 uint64_t midbufid = minbufid + ((maxbufid - minbufid) / 2);
1450 err = dmu_read(mos, rl->rl_object, midbufid * bufsize, bufsize,
1451 buf, DMU_READ_NO_PREFETCH);
1452 if (err != 0)
1453 break;
1454
1455 int cmp0 = redact_block_zb_compare(&buf[0], resume);
1456 int cmpn = redact_block_zb_compare(
1457 &buf[last_entry(rl, bufsize, maxbufid)], resume);
1458
1459 /*
1460 * If the first block is before or equal to the resume point,
1461 * and the last one is equal or after, then the resume point is
1462 * in this buf, and we should start here.
1463 */
1464 if (cmp0 <= 0 && cmpn >= 0)
1465 break;
1466
1467 if (cmp0 > 0)
1468 maxbufid = midbufid - 1;
1469 else if (cmpn < 0)
1470 minbufid = midbufid + 1;
1471 else
1472 panic("No progress in binary search for resume point");
1473 }
1474
1475 for (uint64_t curidx = minbufid * redact_block_buf_num_entries(bufsize);
1476 err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1477 curidx++) {
1478 /*
1479 * We read in the redaction list one block at a time. Once we
1480 * finish with all the entries in a given block, we read in a
1481 * new one. The predictive prefetcher will take care of any
1482 * prefetching, and this code shouldn't be the bottleneck, so we
1483 * don't need to do manual prefetching.
1484 */
1485 if (curidx % redact_block_buf_num_entries(bufsize) == 0) {
1486 err = dmu_read(mos, rl->rl_object, curidx *
1487 sizeof (*buf), bufsize, buf,
1488 DMU_READ_PREFETCH);
1489 if (err != 0)
1490 break;
1491 }
1492 redact_block_phys_t *rb = &buf[curidx %
1493 redact_block_buf_num_entries(bufsize)];
1494 /*
1495 * If resume is non-null, we should either not send the data, or
1496 * null out resume so we don't have to keep doing these
1497 * comparisons.
1498 */
1499 if (resume != NULL) {
1500 if (redact_block_zb_compare(rb, resume) < 0) {
1501 continue;
1502 } else {
1503 /*
1504 * If the place to resume is in the middle of
1505 * the range described by this
1506 * redact_block_phys, then modify the
1507 * redact_block_phys in memory so we generate
1508 * the right records.
1509 */
1510 if (resume->zb_object == rb->rbp_object &&
1511 resume->zb_blkid > rb->rbp_blkid) {
1512 uint64_t diff = resume->zb_blkid -
1513 rb->rbp_blkid;
1514 rb->rbp_blkid = resume->zb_blkid;
1515 redact_block_set_count(rb,
1516 redact_block_get_count(rb) - diff);
1517 }
1518 resume = NULL;
1519 }
1520 }
1521
1522 if (cb(rb, arg) != 0)
1523 break;
1524 }
1525
1526 zio_data_buf_free(buf, bufsize);
1527 return (err);
1528 }