4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/dmu_objset.h>
41 #include <sys/zfeature.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dmu_impl.h>
49 dsl_destroy_snapshot_check_impl(dsl_dataset_t
*ds
, boolean_t defer
)
51 if (!ds
->ds_is_snapshot
)
52 return (SET_ERROR(EINVAL
));
54 if (dsl_dataset_long_held(ds
))
55 return (SET_ERROR(EBUSY
));
58 * Only allow deferred destroy on pools that support it.
59 * NOTE: deferred destroy is only supported on snapshots.
62 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
64 return (SET_ERROR(ENOTSUP
));
69 * If this snapshot has an elevated user reference count,
70 * we can't destroy it yet.
72 if (ds
->ds_userrefs
> 0)
73 return (SET_ERROR(EBUSY
));
76 * Can't delete a branch point.
78 if (dsl_dataset_phys(ds
)->ds_num_children
> 1)
79 return (SET_ERROR(EEXIST
));
85 dsl_destroy_snapshot_check(void *arg
, dmu_tx_t
*tx
)
87 dsl_destroy_snapshot_arg_t
*ddsa
= arg
;
88 const char *dsname
= ddsa
->ddsa_name
;
89 boolean_t defer
= ddsa
->ddsa_defer
;
91 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
95 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
98 * If the snapshot does not exist, silently ignore it, and
99 * dsl_destroy_snapshot_sync() will be a no-op
100 * (it's "already destroyed").
106 error
= dsl_destroy_snapshot_check_impl(ds
, defer
);
107 dsl_dataset_rele(ds
, FTAG
);
113 struct process_old_arg
{
115 dsl_dataset_t
*ds_prev
;
116 boolean_t after_branch_point
;
118 uint64_t used
, comp
, uncomp
;
122 process_old_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
124 struct process_old_arg
*poa
= arg
;
125 dsl_pool_t
*dp
= poa
->ds
->ds_dir
->dd_pool
;
127 ASSERT(!BP_IS_HOLE(bp
));
129 if (bp
->blk_birth
<= dsl_dataset_phys(poa
->ds
)->ds_prev_snap_txg
) {
130 dsl_deadlist_insert(&poa
->ds
->ds_deadlist
, bp
, tx
);
131 if (poa
->ds_prev
&& !poa
->after_branch_point
&&
133 dsl_dataset_phys(poa
->ds_prev
)->ds_prev_snap_txg
) {
134 dsl_dataset_phys(poa
->ds_prev
)->ds_unique_bytes
+=
135 bp_get_dsize_sync(dp
->dp_spa
, bp
);
138 poa
->used
+= bp_get_dsize_sync(dp
->dp_spa
, bp
);
139 poa
->comp
+= BP_GET_PSIZE(bp
);
140 poa
->uncomp
+= BP_GET_UCSIZE(bp
);
141 dsl_free_sync(poa
->pio
, dp
, tx
->tx_txg
, bp
);
147 process_old_deadlist(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_prev
,
148 dsl_dataset_t
*ds_next
, boolean_t after_branch_point
, dmu_tx_t
*tx
)
150 struct process_old_arg poa
= { 0 };
151 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
152 objset_t
*mos
= dp
->dp_meta_objset
;
153 uint64_t deadlist_obj
;
155 ASSERT(ds
->ds_deadlist
.dl_oldfmt
);
156 ASSERT(ds_next
->ds_deadlist
.dl_oldfmt
);
159 poa
.ds_prev
= ds_prev
;
160 poa
.after_branch_point
= after_branch_point
;
161 poa
.pio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
162 VERIFY0(bpobj_iterate(&ds_next
->ds_deadlist
.dl_bpobj
,
163 process_old_cb
, &poa
, tx
));
164 VERIFY0(zio_wait(poa
.pio
));
165 ASSERT3U(poa
.used
, ==, dsl_dataset_phys(ds
)->ds_unique_bytes
);
167 /* change snapused */
168 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
169 -poa
.used
, -poa
.comp
, -poa
.uncomp
, tx
);
171 /* swap next's deadlist to our deadlist */
172 dsl_deadlist_close(&ds
->ds_deadlist
);
173 dsl_deadlist_close(&ds_next
->ds_deadlist
);
174 deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
175 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
176 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
;
177 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
= deadlist_obj
;
178 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
179 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
180 dsl_deadlist_open(&ds_next
->ds_deadlist
, mos
,
181 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
);
184 struct removeclonesnode
{
190 dsl_dataset_remove_clones_key(dsl_dataset_t
*ds
, uint64_t mintxg
, dmu_tx_t
*tx
)
192 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
194 struct removeclonesnode
*rcn
;
196 list_create(&clones
, sizeof (struct removeclonesnode
),
197 offsetof(struct removeclonesnode
, link
));
199 rcn
= kmem_zalloc(sizeof (struct removeclonesnode
), KM_SLEEP
);
201 list_insert_head(&clones
, rcn
);
203 for (; rcn
!= NULL
; rcn
= list_next(&clones
, rcn
)) {
207 * If it is the old version, dd_clones doesn't exist so we can't
208 * find the clones, but dsl_deadlist_remove_key() is a no-op so
211 if (dsl_dir_phys(rcn
->ds
->ds_dir
)->dd_clones
== 0)
214 for (zap_cursor_init(&zc
, mos
,
215 dsl_dir_phys(rcn
->ds
->ds_dir
)->dd_clones
);
216 zap_cursor_retrieve(&zc
, &za
) == 0;
217 zap_cursor_advance(&zc
)) {
218 dsl_dataset_t
*clone
;
220 VERIFY0(dsl_dataset_hold_obj(rcn
->ds
->ds_dir
->dd_pool
,
221 za
.za_first_integer
, FTAG
, &clone
));
222 if (clone
->ds_dir
->dd_origin_txg
> mintxg
) {
223 dsl_deadlist_remove_key(&clone
->ds_deadlist
,
225 if (dsl_dataset_remap_deadlist_exists(clone
)) {
226 dsl_deadlist_remove_key(
227 &clone
->ds_remap_deadlist
, mintxg
,
231 sizeof (struct removeclonesnode
), KM_SLEEP
);
233 list_insert_tail(&clones
, rcn
);
235 dsl_dataset_rele(clone
, FTAG
);
238 zap_cursor_fini(&zc
);
241 rcn
= list_remove_head(&clones
);
242 kmem_free(rcn
, sizeof (struct removeclonesnode
));
243 while ((rcn
= list_remove_head(&clones
)) != NULL
) {
244 dsl_dataset_rele(rcn
->ds
, FTAG
);
245 kmem_free(rcn
, sizeof (struct removeclonesnode
));
247 list_destroy(&clones
);
251 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_next
,
254 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
256 /* Move blocks to be obsoleted to pool's obsolete list. */
257 if (dsl_dataset_remap_deadlist_exists(ds_next
)) {
258 if (!bpobj_is_open(&dp
->dp_obsolete_bpobj
))
259 dsl_pool_create_obsolete_bpobj(dp
, tx
);
261 dsl_deadlist_move_bpobj(&ds_next
->ds_remap_deadlist
,
262 &dp
->dp_obsolete_bpobj
,
263 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
266 /* Merge our deadlist into next's and free it. */
267 if (dsl_dataset_remap_deadlist_exists(ds
)) {
268 uint64_t remap_deadlist_object
=
269 dsl_dataset_get_remap_deadlist_object(ds
);
270 ASSERT(remap_deadlist_object
!= 0);
272 mutex_enter(&ds_next
->ds_remap_deadlist_lock
);
273 if (!dsl_dataset_remap_deadlist_exists(ds_next
))
274 dsl_dataset_create_remap_deadlist(ds_next
, tx
);
275 mutex_exit(&ds_next
->ds_remap_deadlist_lock
);
277 dsl_deadlist_merge(&ds_next
->ds_remap_deadlist
,
278 remap_deadlist_object
, tx
);
279 dsl_dataset_destroy_remap_deadlist(ds
, tx
);
284 dsl_destroy_snapshot_sync_impl(dsl_dataset_t
*ds
, boolean_t defer
, dmu_tx_t
*tx
)
286 int after_branch_point
= FALSE
;
287 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
288 objset_t
*mos
= dp
->dp_meta_objset
;
289 dsl_dataset_t
*ds_prev
= NULL
;
292 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
293 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
294 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
295 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
296 ASSERT(zfs_refcount_is_zero(&ds
->ds_longholds
));
299 (ds
->ds_userrefs
> 0 ||
300 dsl_dataset_phys(ds
)->ds_num_children
> 1)) {
301 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
302 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
303 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_DEFER_DESTROY
;
304 spa_history_log_internal_ds(ds
, "defer_destroy", tx
, "");
308 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
310 /* We need to log before removing it from the namespace. */
311 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
313 dsl_scan_ds_destroyed(ds
, tx
);
317 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
318 if (dsl_dataset_feature_is_active(ds
, f
))
319 dsl_dataset_deactivate_feature(ds
, f
, tx
);
321 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
322 ASSERT3P(ds
->ds_prev
, ==, NULL
);
323 VERIFY0(dsl_dataset_hold_obj(dp
,
324 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &ds_prev
));
326 (dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
!= obj
);
328 dmu_buf_will_dirty(ds_prev
->ds_dbuf
, tx
);
329 if (after_branch_point
&&
330 dsl_dataset_phys(ds_prev
)->ds_next_clones_obj
!= 0) {
331 dsl_dataset_remove_from_next_clones(ds_prev
, obj
, tx
);
332 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
333 VERIFY0(zap_add_int(mos
,
334 dsl_dataset_phys(ds_prev
)->
336 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
340 if (!after_branch_point
) {
341 dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
=
342 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
346 dsl_dataset_t
*ds_next
;
348 uint64_t used
= 0, comp
= 0, uncomp
= 0;
350 VERIFY0(dsl_dataset_hold_obj(dp
,
351 dsl_dataset_phys(ds
)->ds_next_snap_obj
, FTAG
, &ds_next
));
352 ASSERT3U(dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
, ==, obj
);
354 old_unique
= dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
356 dmu_buf_will_dirty(ds_next
->ds_dbuf
, tx
);
357 dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
=
358 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
359 dsl_dataset_phys(ds_next
)->ds_prev_snap_txg
=
360 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
361 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
362 ds_prev
? dsl_dataset_phys(ds_prev
)->ds_creation_txg
: 0);
364 if (ds_next
->ds_deadlist
.dl_oldfmt
) {
365 process_old_deadlist(ds
, ds_prev
, ds_next
,
366 after_branch_point
, tx
);
368 /* Adjust prev's unique space. */
369 if (ds_prev
&& !after_branch_point
) {
370 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
371 dsl_dataset_phys(ds_prev
)->ds_prev_snap_txg
,
372 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
373 &used
, &comp
, &uncomp
);
374 dsl_dataset_phys(ds_prev
)->ds_unique_bytes
+= used
;
377 /* Adjust snapused. */
378 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
379 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, UINT64_MAX
,
380 &used
, &comp
, &uncomp
);
381 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
382 -used
, -comp
, -uncomp
, tx
);
384 /* Move blocks to be freed to pool's free list. */
385 dsl_deadlist_move_bpobj(&ds_next
->ds_deadlist
,
386 &dp
->dp_free_bpobj
, dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
388 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
389 DD_USED_HEAD
, used
, comp
, uncomp
, tx
);
391 /* Merge our deadlist into next's and free it. */
392 dsl_deadlist_merge(&ds_next
->ds_deadlist
,
393 dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
396 dsl_deadlist_close(&ds
->ds_deadlist
);
397 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
398 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
399 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
401 dsl_destroy_snapshot_handle_remaps(ds
, ds_next
, tx
);
403 /* Collapse range in clone heads */
404 dsl_dataset_remove_clones_key(ds
,
405 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
407 if (ds_next
->ds_is_snapshot
) {
408 dsl_dataset_t
*ds_nextnext
;
411 * Update next's unique to include blocks which
412 * were previously shared by only this snapshot
413 * and it. Those blocks will be born after the
414 * prev snap and before this snap, and will have
415 * died after the next snap and before the one
416 * after that (ie. be on the snap after next's
419 VERIFY0(dsl_dataset_hold_obj(dp
,
420 dsl_dataset_phys(ds_next
)->ds_next_snap_obj
,
421 FTAG
, &ds_nextnext
));
422 dsl_deadlist_space_range(&ds_nextnext
->ds_deadlist
,
423 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
424 dsl_dataset_phys(ds
)->ds_creation_txg
,
425 &used
, &comp
, &uncomp
);
426 dsl_dataset_phys(ds_next
)->ds_unique_bytes
+= used
;
427 dsl_dataset_rele(ds_nextnext
, FTAG
);
428 ASSERT3P(ds_next
->ds_prev
, ==, NULL
);
430 /* Collapse range in this head. */
432 VERIFY0(dsl_dataset_hold_obj(dp
,
433 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &hds
));
434 dsl_deadlist_remove_key(&hds
->ds_deadlist
,
435 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
436 if (dsl_dataset_remap_deadlist_exists(hds
)) {
437 dsl_deadlist_remove_key(&hds
->ds_remap_deadlist
,
438 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
440 dsl_dataset_rele(hds
, FTAG
);
443 ASSERT3P(ds_next
->ds_prev
, ==, ds
);
444 dsl_dataset_rele(ds_next
->ds_prev
, ds_next
);
445 ds_next
->ds_prev
= NULL
;
447 VERIFY0(dsl_dataset_hold_obj(dp
,
448 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
449 ds_next
, &ds_next
->ds_prev
));
452 dsl_dataset_recalc_head_uniq(ds_next
);
455 * Reduce the amount of our unconsumed refreservation
456 * being charged to our parent by the amount of
457 * new unique data we have gained.
459 if (old_unique
< ds_next
->ds_reserved
) {
461 uint64_t new_unique
=
462 dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
464 ASSERT(old_unique
<= new_unique
);
465 mrsdelta
= MIN(new_unique
- old_unique
,
466 ds_next
->ds_reserved
- old_unique
);
467 dsl_dir_diduse_space(ds
->ds_dir
,
468 DD_USED_REFRSRV
, -mrsdelta
, 0, 0, tx
);
471 dsl_dataset_rele(ds_next
, FTAG
);
474 * This must be done after the dsl_traverse(), because it will
475 * re-open the objset.
478 dmu_objset_evict(ds
->ds_objset
);
479 ds
->ds_objset
= NULL
;
482 /* remove from snapshot namespace */
483 dsl_dataset_t
*ds_head
;
484 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0);
485 VERIFY0(dsl_dataset_hold_obj(dp
,
486 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &ds_head
));
487 VERIFY0(dsl_dataset_get_snapname(ds
));
493 err
= dsl_dataset_snap_lookup(ds_head
,
494 ds
->ds_snapname
, &val
);
496 ASSERT3U(val
, ==, obj
);
499 VERIFY0(dsl_dataset_snap_remove(ds_head
, ds
->ds_snapname
, tx
, B_TRUE
));
500 dsl_dataset_rele(ds_head
, FTAG
);
503 dsl_dataset_rele(ds_prev
, FTAG
);
505 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
507 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
508 ASSERTV(uint64_t count
);
509 ASSERT0(zap_count(mos
,
510 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
) &&
512 VERIFY0(dmu_object_free(mos
,
513 dsl_dataset_phys(ds
)->ds_next_clones_obj
, tx
));
515 if (dsl_dataset_phys(ds
)->ds_props_obj
!= 0)
516 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_props_obj
,
518 if (dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0)
519 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_userrefs_obj
,
521 dsl_dir_rele(ds
->ds_dir
, ds
);
523 dmu_object_free_zapified(mos
, obj
, tx
);
527 dsl_destroy_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
529 dsl_destroy_snapshot_arg_t
*ddsa
= arg
;
530 const char *dsname
= ddsa
->ddsa_name
;
531 boolean_t defer
= ddsa
->ddsa_defer
;
533 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
536 int error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
540 dsl_destroy_snapshot_sync_impl(ds
, defer
, tx
);
541 zvol_remove_minors(dp
->dp_spa
, dsname
, B_TRUE
);
542 dsl_dataset_rele(ds
, FTAG
);
546 * The semantics of this function are described in the comment above
547 * lzc_destroy_snaps(). To summarize:
549 * The snapshots must all be in the same pool.
551 * Snapshots that don't exist will be silently ignored (considered to be
552 * "already deleted").
554 * On success, all snaps will be destroyed and this will return 0.
555 * On failure, no snaps will be destroyed, the errlist will be filled in,
556 * and this will return an errno.
559 dsl_destroy_snapshots_nvl(nvlist_t
*snaps
, boolean_t defer
,
562 if (nvlist_next_nvpair(snaps
, NULL
) == NULL
)
566 * lzc_destroy_snaps() is documented to take an nvlist whose
567 * values "don't matter". We need to convert that nvlist to
568 * one that we know can be converted to LUA. We also don't
569 * care about any duplicate entries because the nvlist will
570 * be converted to a LUA table which should take care of this.
572 nvlist_t
*snaps_normalized
;
573 VERIFY0(nvlist_alloc(&snaps_normalized
, 0, KM_SLEEP
));
574 for (nvpair_t
*pair
= nvlist_next_nvpair(snaps
, NULL
);
575 pair
!= NULL
; pair
= nvlist_next_nvpair(snaps
, pair
)) {
576 fnvlist_add_boolean_value(snaps_normalized
,
577 nvpair_name(pair
), B_TRUE
);
581 VERIFY0(nvlist_alloc(&arg
, 0, KM_SLEEP
));
582 fnvlist_add_nvlist(arg
, "snaps", snaps_normalized
);
583 fnvlist_free(snaps_normalized
);
584 fnvlist_add_boolean_value(arg
, "defer", defer
);
587 VERIFY0(nvlist_alloc(&wrapper
, 0, KM_SLEEP
));
588 fnvlist_add_nvlist(wrapper
, ZCP_ARG_ARGLIST
, arg
);
591 const char *program
=
593 "snaps = arg['snaps']\n"
594 "defer = arg['defer']\n"
596 "has_errors = false\n"
597 "for snap, v in pairs(snaps) do\n"
598 " errno = zfs.check.destroy{snap, defer=defer}\n"
599 " zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
600 " if errno == ENOENT then\n"
601 " snaps[snap] = nil\n"
602 " elseif errno ~= 0 then\n"
603 " errors[snap] = errno\n"
604 " has_errors = true\n"
607 "if has_errors then\n"
610 "for snap, v in pairs(snaps) do\n"
611 " errno = zfs.sync.destroy{snap, defer=defer}\n"
612 " assert(errno == 0)\n"
616 nvlist_t
*result
= fnvlist_alloc();
617 int error
= zcp_eval(nvpair_name(nvlist_next_nvpair(snaps
, NULL
)),
621 zfs_lua_max_memlimit
,
622 nvlist_next_nvpair(wrapper
, NULL
), result
);
624 char *errorstr
= NULL
;
625 (void) nvlist_lookup_string(result
, ZCP_RET_ERROR
, &errorstr
);
626 if (errorstr
!= NULL
) {
627 zfs_dbgmsg(errorstr
);
631 fnvlist_free(wrapper
);
634 * lzc_destroy_snaps() is documented to fill the errlist with
635 * int32 values, so we need to covert the int64 values that are
639 nvlist_t
*errlist_raw
= fnvlist_lookup_nvlist(result
, ZCP_RET_RETURN
);
640 for (nvpair_t
*pair
= nvlist_next_nvpair(errlist_raw
, NULL
);
641 pair
!= NULL
; pair
= nvlist_next_nvpair(errlist_raw
, pair
)) {
642 int32_t val
= (int32_t)fnvpair_value_int64(pair
);
645 fnvlist_add_int32(errlist
, nvpair_name(pair
), val
);
647 fnvlist_free(result
);
652 dsl_destroy_snapshot(const char *name
, boolean_t defer
)
655 nvlist_t
*nvl
= fnvlist_alloc();
656 nvlist_t
*errlist
= fnvlist_alloc();
658 fnvlist_add_boolean(nvl
, name
);
659 error
= dsl_destroy_snapshots_nvl(nvl
, defer
, errlist
);
660 fnvlist_free(errlist
);
672 kill_blkptr(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
673 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
675 struct killarg
*ka
= arg
;
676 dmu_tx_t
*tx
= ka
->tx
;
678 if (bp
== NULL
|| BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
))
681 if (zb
->zb_level
== ZB_ZIL_LEVEL
) {
682 ASSERT(zilog
!= NULL
);
684 * It's a block in the intent log. It has no
685 * accounting, so just free it.
687 dsl_free(ka
->tx
->tx_pool
, ka
->tx
->tx_txg
, bp
);
689 ASSERT(zilog
== NULL
);
690 ASSERT3U(bp
->blk_birth
, >,
691 dsl_dataset_phys(ka
->ds
)->ds_prev_snap_txg
);
692 (void) dsl_dataset_block_kill(ka
->ds
, bp
, tx
, B_FALSE
);
699 old_synchronous_dataset_destroy(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
704 * Free everything that we point to (that's born after
705 * the previous snapshot, if we are a clone)
707 * NB: this should be very quick, because we already
708 * freed all the objects in open context.
712 VERIFY0(traverse_dataset(ds
,
713 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, TRAVERSE_POST
|
714 TRAVERSE_NO_DECRYPT
, kill_blkptr
, &ka
));
715 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
716 dsl_dataset_phys(ds
)->ds_unique_bytes
== 0);
720 dsl_destroy_head_check_impl(dsl_dataset_t
*ds
, int expected_holds
)
726 ASSERT(!ds
->ds_is_snapshot
);
727 if (ds
->ds_is_snapshot
)
728 return (SET_ERROR(EINVAL
));
730 if (zfs_refcount_count(&ds
->ds_longholds
) != expected_holds
)
731 return (SET_ERROR(EBUSY
));
733 mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
736 * Can't delete a head dataset if there are snapshots of it.
737 * (Except if the only snapshots are from the branch we cloned
740 if (ds
->ds_prev
!= NULL
&&
741 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
== ds
->ds_object
)
742 return (SET_ERROR(EBUSY
));
745 * Can't delete if there are children of this fs.
747 error
= zap_count(mos
,
748 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &count
);
752 return (SET_ERROR(EEXIST
));
754 if (dsl_dir_is_clone(ds
->ds_dir
) && DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
755 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
756 ds
->ds_prev
->ds_userrefs
== 0) {
757 /* We need to remove the origin snapshot as well. */
758 if (!zfs_refcount_is_zero(&ds
->ds_prev
->ds_longholds
))
759 return (SET_ERROR(EBUSY
));
765 dsl_destroy_head_check(void *arg
, dmu_tx_t
*tx
)
767 dsl_destroy_head_arg_t
*ddha
= arg
;
768 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
772 error
= dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
);
776 error
= dsl_destroy_head_check_impl(ds
, 0);
777 dsl_dataset_rele(ds
, FTAG
);
782 dsl_dir_destroy_sync(uint64_t ddobj
, dmu_tx_t
*tx
)
785 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
786 objset_t
*mos
= dp
->dp_meta_objset
;
789 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx
)->dp_config_rwlock
));
791 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, NULL
, FTAG
, &dd
));
793 ASSERT0(dsl_dir_phys(dd
)->dd_head_dataset_obj
);
795 /* Decrement the filesystem count for all parent filesystems. */
796 if (dd
->dd_parent
!= NULL
)
797 dsl_fs_ss_count_adjust(dd
->dd_parent
, -1,
798 DD_FIELD_FILESYSTEM_COUNT
, tx
);
801 * Remove our reservation. The impl() routine avoids setting the
802 * actual property, which would require the (already destroyed) ds.
804 dsl_dir_set_reservation_sync_impl(dd
, 0, tx
);
806 ASSERT0(dsl_dir_phys(dd
)->dd_used_bytes
);
807 ASSERT0(dsl_dir_phys(dd
)->dd_reserved
);
808 for (t
= 0; t
< DD_USED_NUM
; t
++)
809 ASSERT0(dsl_dir_phys(dd
)->dd_used_breakdown
[t
]);
811 if (dd
->dd_crypto_obj
!= 0) {
812 dsl_crypto_key_destroy_sync(dd
->dd_crypto_obj
, tx
);
813 (void) spa_keystore_unload_wkey_impl(dp
->dp_spa
, dd
->dd_object
);
816 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_child_dir_zapobj
, tx
));
817 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_props_zapobj
, tx
));
818 if (dsl_dir_phys(dd
)->dd_clones
!= 0)
819 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_clones
, tx
));
820 VERIFY0(dsl_deleg_destroy(mos
, dsl_dir_phys(dd
)->dd_deleg_zapobj
, tx
));
821 VERIFY0(zap_remove(mos
,
822 dsl_dir_phys(dd
->dd_parent
)->dd_child_dir_zapobj
,
825 dsl_dir_rele(dd
, FTAG
);
826 dmu_object_free_zapified(mos
, ddobj
, tx
);
830 dsl_destroy_head_sync_impl(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
832 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
833 objset_t
*mos
= dp
->dp_meta_objset
;
834 uint64_t obj
, ddobj
, prevobj
= 0;
837 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
838 ASSERT(ds
->ds_prev
== NULL
||
839 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
!= ds
->ds_object
);
840 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
841 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
842 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
843 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
845 /* We need to log before removing it from the namespace. */
846 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
848 rmorigin
= (dsl_dir_is_clone(ds
->ds_dir
) &&
849 DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
850 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
851 ds
->ds_prev
->ds_userrefs
== 0);
853 /* Remove our reservation. */
854 if (ds
->ds_reserved
!= 0) {
855 dsl_dataset_set_refreservation_sync_impl(ds
,
856 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
858 ASSERT0(ds
->ds_reserved
);
863 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
864 if (dsl_dataset_feature_is_active(ds
, f
))
865 dsl_dataset_deactivate_feature(ds
, f
, tx
);
868 dsl_scan_ds_destroyed(ds
, tx
);
870 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
871 /* This is a clone */
872 ASSERT(ds
->ds_prev
!= NULL
);
873 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
, !=,
875 ASSERT0(dsl_dataset_phys(ds
)->ds_next_snap_obj
);
877 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
878 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
!= 0) {
879 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
883 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
, >, 1);
884 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
--;
888 * Destroy the deadlist. Unless it's a clone, the
889 * deadlist should be empty since the dataset has no snapshots.
890 * (If it's a clone, it's safe to ignore the deadlist contents
891 * since they are still referenced by the origin snapshot.)
893 dsl_deadlist_close(&ds
->ds_deadlist
);
894 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
895 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
896 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
898 if (dsl_dataset_remap_deadlist_exists(ds
))
899 dsl_dataset_destroy_remap_deadlist(ds
, tx
);
902 VERIFY0(dmu_objset_from_ds(ds
, &os
));
904 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
905 old_synchronous_dataset_destroy(ds
, tx
);
908 * Move the bptree into the pool's list of trees to
909 * clean up and update space accounting information.
911 uint64_t used
, comp
, uncomp
;
913 zil_destroy_sync(dmu_objset_zil(os
), tx
);
915 if (!spa_feature_is_active(dp
->dp_spa
,
916 SPA_FEATURE_ASYNC_DESTROY
)) {
917 dsl_scan_t
*scn
= dp
->dp_scan
;
918 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
,
920 dp
->dp_bptree_obj
= bptree_alloc(mos
, tx
);
922 DMU_POOL_DIRECTORY_OBJECT
,
923 DMU_POOL_BPTREE_OBJ
, sizeof (uint64_t), 1,
924 &dp
->dp_bptree_obj
, tx
));
925 ASSERT(!scn
->scn_async_destroying
);
926 scn
->scn_async_destroying
= B_TRUE
;
929 used
= dsl_dir_phys(ds
->ds_dir
)->dd_used_bytes
;
930 comp
= dsl_dir_phys(ds
->ds_dir
)->dd_compressed_bytes
;
931 uncomp
= dsl_dir_phys(ds
->ds_dir
)->dd_uncompressed_bytes
;
933 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
934 dsl_dataset_phys(ds
)->ds_unique_bytes
== used
);
936 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
937 bptree_add(mos
, dp
->dp_bptree_obj
,
938 &dsl_dataset_phys(ds
)->ds_bp
,
939 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
940 used
, comp
, uncomp
, tx
);
941 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
942 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
943 -used
, -comp
, -uncomp
, tx
);
944 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
945 used
, comp
, uncomp
, tx
);
948 if (ds
->ds_prev
!= NULL
) {
949 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
950 VERIFY0(zap_remove_int(mos
,
951 dsl_dir_phys(ds
->ds_prev
->ds_dir
)->dd_clones
,
954 prevobj
= ds
->ds_prev
->ds_object
;
955 dsl_dataset_rele(ds
->ds_prev
, ds
);
960 * This must be done after the dsl_traverse(), because it will
961 * re-open the objset.
964 dmu_objset_evict(ds
->ds_objset
);
965 ds
->ds_objset
= NULL
;
968 /* Erase the link in the dir */
969 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
970 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
= 0;
971 ddobj
= ds
->ds_dir
->dd_object
;
972 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0);
973 VERIFY0(zap_destroy(mos
,
974 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, tx
));
976 if (ds
->ds_bookmarks
!= 0) {
977 VERIFY0(zap_destroy(mos
, ds
->ds_bookmarks
, tx
));
978 spa_feature_decr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
981 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
983 ASSERT0(dsl_dataset_phys(ds
)->ds_next_clones_obj
);
984 ASSERT0(dsl_dataset_phys(ds
)->ds_props_obj
);
985 ASSERT0(dsl_dataset_phys(ds
)->ds_userrefs_obj
);
986 dsl_dir_rele(ds
->ds_dir
, ds
);
988 dmu_object_free_zapified(mos
, obj
, tx
);
990 dsl_dir_destroy_sync(ddobj
, tx
);
994 VERIFY0(dsl_dataset_hold_obj(dp
, prevobj
, FTAG
, &prev
));
995 dsl_destroy_snapshot_sync_impl(prev
, B_FALSE
, tx
);
996 dsl_dataset_rele(prev
, FTAG
);
1001 dsl_destroy_head_sync(void *arg
, dmu_tx_t
*tx
)
1003 dsl_destroy_head_arg_t
*ddha
= arg
;
1004 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1007 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
1008 dsl_destroy_head_sync_impl(ds
, tx
);
1009 zvol_remove_minors(dp
->dp_spa
, ddha
->ddha_name
, B_TRUE
);
1010 dsl_dataset_rele(ds
, FTAG
);
1014 dsl_destroy_head_begin_sync(void *arg
, dmu_tx_t
*tx
)
1016 dsl_destroy_head_arg_t
*ddha
= arg
;
1017 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1020 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
1022 /* Mark it as inconsistent on-disk, in case we crash */
1023 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1024 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1026 spa_history_log_internal_ds(ds
, "destroy begin", tx
, "");
1027 dsl_dataset_rele(ds
, FTAG
);
1031 dsl_destroy_head(const char *name
)
1033 dsl_destroy_head_arg_t ddha
;
1036 boolean_t isenabled
;
1039 zfs_destroy_unmount_origin(name
);
1042 error
= spa_open(name
, &spa
, FTAG
);
1045 isenabled
= spa_feature_is_enabled(spa
, SPA_FEATURE_ASYNC_DESTROY
);
1046 spa_close(spa
, FTAG
);
1048 ddha
.ddha_name
= name
;
1053 error
= dsl_sync_task(name
, dsl_destroy_head_check
,
1054 dsl_destroy_head_begin_sync
, &ddha
,
1055 0, ZFS_SPACE_CHECK_DESTROY
);
1060 * Head deletion is processed in one txg on old pools;
1061 * remove the objects from open context so that the txg sync
1064 error
= dmu_objset_own(name
, DMU_OST_ANY
, B_FALSE
, B_FALSE
,
1067 uint64_t prev_snap_txg
=
1068 dsl_dataset_phys(dmu_objset_ds(os
))->
1070 for (uint64_t obj
= 0; error
== 0;
1071 error
= dmu_object_next(os
, &obj
, FALSE
,
1073 (void) dmu_free_long_object(os
, obj
);
1074 /* sync out all frees */
1075 txg_wait_synced(dmu_objset_pool(os
), 0);
1076 dmu_objset_disown(os
, B_FALSE
, FTAG
);
1080 return (dsl_sync_task(name
, dsl_destroy_head_check
,
1081 dsl_destroy_head_sync
, &ddha
, 0, ZFS_SPACE_CHECK_DESTROY
));
1085 * Note, this function is used as the callback for dmu_objset_find(). We
1086 * always return 0 so that we will continue to find and process
1087 * inconsistent datasets, even if we encounter an error trying to
1088 * process one of them.
1092 dsl_destroy_inconsistent(const char *dsname
, void *arg
)
1096 if (dmu_objset_hold(dsname
, FTAG
, &os
) == 0) {
1097 boolean_t need_destroy
= DS_IS_INCONSISTENT(dmu_objset_ds(os
));
1100 * If the dataset is inconsistent because a resumable receive
1101 * has failed, then do not destroy it.
1103 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os
)))
1104 need_destroy
= B_FALSE
;
1106 dmu_objset_rele(os
, FTAG
);
1108 (void) dsl_destroy_head(dsname
);
1114 #if defined(_KERNEL)
1115 EXPORT_SYMBOL(dsl_destroy_head
);
1116 EXPORT_SYMBOL(dsl_destroy_head_sync_impl
);
1117 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one
);
1118 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl
);
1119 EXPORT_SYMBOL(dsl_destroy_inconsistent
);
1120 EXPORT_SYMBOL(dsl_dataset_user_release_tmp
);
1121 EXPORT_SYMBOL(dsl_destroy_head_check_impl
);