4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_scan.h>
38 #include <sys/dmu_objset.h>
40 #include <sys/zfeature.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/dsl_deleg.h>
43 #include <sys/dmu_impl.h>
46 typedef struct dmu_snapshots_destroy_arg
{
48 nvlist_t
*dsda_successful_snaps
;
50 nvlist_t
*dsda_errlist
;
51 } dmu_snapshots_destroy_arg_t
;
54 dsl_destroy_snapshot_check_impl(dsl_dataset_t
*ds
, boolean_t defer
)
56 if (!ds
->ds_is_snapshot
)
57 return (SET_ERROR(EINVAL
));
59 if (dsl_dataset_long_held(ds
))
60 return (SET_ERROR(EBUSY
));
63 * Only allow deferred destroy on pools that support it.
64 * NOTE: deferred destroy is only supported on snapshots.
67 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
69 return (SET_ERROR(ENOTSUP
));
74 * If this snapshot has an elevated user reference count,
75 * we can't destroy it yet.
77 if (ds
->ds_userrefs
> 0)
78 return (SET_ERROR(EBUSY
));
81 * Can't delete a branch point.
83 if (dsl_dataset_phys(ds
)->ds_num_children
> 1)
84 return (SET_ERROR(EEXIST
));
90 dsl_destroy_snapshot_check(void *arg
, dmu_tx_t
*tx
)
92 dmu_snapshots_destroy_arg_t
*dsda
= arg
;
93 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
97 if (!dmu_tx_is_syncing(tx
))
100 for (pair
= nvlist_next_nvpair(dsda
->dsda_snaps
, NULL
);
101 pair
!= NULL
; pair
= nvlist_next_nvpair(dsda
->dsda_snaps
, pair
)) {
104 error
= dsl_dataset_hold(dp
, nvpair_name(pair
),
108 * If the snapshot does not exist, silently ignore it
109 * (it's "already destroyed").
115 error
= dsl_destroy_snapshot_check_impl(ds
,
117 dsl_dataset_rele(ds
, FTAG
);
121 fnvlist_add_boolean(dsda
->dsda_successful_snaps
,
124 fnvlist_add_int32(dsda
->dsda_errlist
,
125 nvpair_name(pair
), error
);
129 pair
= nvlist_next_nvpair(dsda
->dsda_errlist
, NULL
);
131 return (fnvpair_value_int32(pair
));
136 struct process_old_arg
{
138 dsl_dataset_t
*ds_prev
;
139 boolean_t after_branch_point
;
141 uint64_t used
, comp
, uncomp
;
145 process_old_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
147 struct process_old_arg
*poa
= arg
;
148 dsl_pool_t
*dp
= poa
->ds
->ds_dir
->dd_pool
;
150 ASSERT(!BP_IS_HOLE(bp
));
152 if (bp
->blk_birth
<= dsl_dataset_phys(poa
->ds
)->ds_prev_snap_txg
) {
153 dsl_deadlist_insert(&poa
->ds
->ds_deadlist
, bp
, tx
);
154 if (poa
->ds_prev
&& !poa
->after_branch_point
&&
156 dsl_dataset_phys(poa
->ds_prev
)->ds_prev_snap_txg
) {
157 dsl_dataset_phys(poa
->ds_prev
)->ds_unique_bytes
+=
158 bp_get_dsize_sync(dp
->dp_spa
, bp
);
161 poa
->used
+= bp_get_dsize_sync(dp
->dp_spa
, bp
);
162 poa
->comp
+= BP_GET_PSIZE(bp
);
163 poa
->uncomp
+= BP_GET_UCSIZE(bp
);
164 dsl_free_sync(poa
->pio
, dp
, tx
->tx_txg
, bp
);
170 process_old_deadlist(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_prev
,
171 dsl_dataset_t
*ds_next
, boolean_t after_branch_point
, dmu_tx_t
*tx
)
173 struct process_old_arg poa
= { 0 };
174 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
175 objset_t
*mos
= dp
->dp_meta_objset
;
176 uint64_t deadlist_obj
;
178 ASSERT(ds
->ds_deadlist
.dl_oldfmt
);
179 ASSERT(ds_next
->ds_deadlist
.dl_oldfmt
);
182 poa
.ds_prev
= ds_prev
;
183 poa
.after_branch_point
= after_branch_point
;
184 poa
.pio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
185 VERIFY0(bpobj_iterate(&ds_next
->ds_deadlist
.dl_bpobj
,
186 process_old_cb
, &poa
, tx
));
187 VERIFY0(zio_wait(poa
.pio
));
188 ASSERT3U(poa
.used
, ==, dsl_dataset_phys(ds
)->ds_unique_bytes
);
190 /* change snapused */
191 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
192 -poa
.used
, -poa
.comp
, -poa
.uncomp
, tx
);
194 /* swap next's deadlist to our deadlist */
195 dsl_deadlist_close(&ds
->ds_deadlist
);
196 dsl_deadlist_close(&ds_next
->ds_deadlist
);
197 deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
198 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
199 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
;
200 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
= deadlist_obj
;
201 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
202 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
203 dsl_deadlist_open(&ds_next
->ds_deadlist
, mos
,
204 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
);
208 dsl_dataset_remove_clones_key(dsl_dataset_t
*ds
, uint64_t mintxg
, dmu_tx_t
*tx
)
210 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
215 * If it is the old version, dd_clones doesn't exist so we can't
216 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
219 if (dsl_dir_phys(ds
->ds_dir
)->dd_clones
== 0)
222 zc
= kmem_alloc(sizeof (zap_cursor_t
), KM_SLEEP
);
223 za
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
225 for (zap_cursor_init(zc
, mos
, dsl_dir_phys(ds
->ds_dir
)->dd_clones
);
226 zap_cursor_retrieve(zc
, za
) == 0;
227 zap_cursor_advance(zc
)) {
228 dsl_dataset_t
*clone
;
230 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
231 za
->za_first_integer
, FTAG
, &clone
));
232 if (clone
->ds_dir
->dd_origin_txg
> mintxg
) {
233 dsl_deadlist_remove_key(&clone
->ds_deadlist
,
235 dsl_dataset_remove_clones_key(clone
, mintxg
, tx
);
237 dsl_dataset_rele(clone
, FTAG
);
241 kmem_free(za
, sizeof (zap_attribute_t
));
242 kmem_free(zc
, sizeof (zap_cursor_t
));
246 dsl_destroy_snapshot_sync_impl(dsl_dataset_t
*ds
, boolean_t defer
, dmu_tx_t
*tx
)
248 int after_branch_point
= FALSE
;
249 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
250 objset_t
*mos
= dp
->dp_meta_objset
;
251 dsl_dataset_t
*ds_prev
= NULL
;
254 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
255 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
256 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
257 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
258 ASSERT(refcount_is_zero(&ds
->ds_longholds
));
261 (ds
->ds_userrefs
> 0 ||
262 dsl_dataset_phys(ds
)->ds_num_children
> 1)) {
263 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
264 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
265 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_DEFER_DESTROY
;
266 spa_history_log_internal_ds(ds
, "defer_destroy", tx
, "");
270 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
272 /* We need to log before removing it from the namespace. */
273 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
275 dsl_scan_ds_destroyed(ds
, tx
);
279 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
280 if (ds
->ds_feature_inuse
[f
]) {
281 dsl_dataset_deactivate_feature(obj
, f
, tx
);
282 ds
->ds_feature_inuse
[f
] = B_FALSE
;
285 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
286 ASSERT3P(ds
->ds_prev
, ==, NULL
);
287 VERIFY0(dsl_dataset_hold_obj(dp
,
288 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &ds_prev
));
290 (dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
!= obj
);
292 dmu_buf_will_dirty(ds_prev
->ds_dbuf
, tx
);
293 if (after_branch_point
&&
294 dsl_dataset_phys(ds_prev
)->ds_next_clones_obj
!= 0) {
295 dsl_dataset_remove_from_next_clones(ds_prev
, obj
, tx
);
296 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
297 VERIFY0(zap_add_int(mos
,
298 dsl_dataset_phys(ds_prev
)->
300 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
304 if (!after_branch_point
) {
305 dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
=
306 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
310 dsl_dataset_t
*ds_next
;
312 uint64_t used
= 0, comp
= 0, uncomp
= 0;
314 VERIFY0(dsl_dataset_hold_obj(dp
,
315 dsl_dataset_phys(ds
)->ds_next_snap_obj
, FTAG
, &ds_next
));
316 ASSERT3U(dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
, ==, obj
);
318 old_unique
= dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
320 dmu_buf_will_dirty(ds_next
->ds_dbuf
, tx
);
321 dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
=
322 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
323 dsl_dataset_phys(ds_next
)->ds_prev_snap_txg
=
324 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
325 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
326 ds_prev
? dsl_dataset_phys(ds_prev
)->ds_creation_txg
: 0);
328 if (ds_next
->ds_deadlist
.dl_oldfmt
) {
329 process_old_deadlist(ds
, ds_prev
, ds_next
,
330 after_branch_point
, tx
);
332 /* Adjust prev's unique space. */
333 if (ds_prev
&& !after_branch_point
) {
334 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
335 dsl_dataset_phys(ds_prev
)->ds_prev_snap_txg
,
336 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
337 &used
, &comp
, &uncomp
);
338 dsl_dataset_phys(ds_prev
)->ds_unique_bytes
+= used
;
341 /* Adjust snapused. */
342 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
343 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, UINT64_MAX
,
344 &used
, &comp
, &uncomp
);
345 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
346 -used
, -comp
, -uncomp
, tx
);
348 /* Move blocks to be freed to pool's free list. */
349 dsl_deadlist_move_bpobj(&ds_next
->ds_deadlist
,
350 &dp
->dp_free_bpobj
, dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
352 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
353 DD_USED_HEAD
, used
, comp
, uncomp
, tx
);
355 /* Merge our deadlist into next's and free it. */
356 dsl_deadlist_merge(&ds_next
->ds_deadlist
,
357 dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
359 dsl_deadlist_close(&ds
->ds_deadlist
);
360 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
361 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
362 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
364 /* Collapse range in clone heads */
365 dsl_dataset_remove_clones_key(ds
,
366 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
368 if (ds_next
->ds_is_snapshot
) {
369 dsl_dataset_t
*ds_nextnext
;
372 * Update next's unique to include blocks which
373 * were previously shared by only this snapshot
374 * and it. Those blocks will be born after the
375 * prev snap and before this snap, and will have
376 * died after the next snap and before the one
377 * after that (ie. be on the snap after next's
380 VERIFY0(dsl_dataset_hold_obj(dp
,
381 dsl_dataset_phys(ds_next
)->ds_next_snap_obj
,
382 FTAG
, &ds_nextnext
));
383 dsl_deadlist_space_range(&ds_nextnext
->ds_deadlist
,
384 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
385 dsl_dataset_phys(ds
)->ds_creation_txg
,
386 &used
, &comp
, &uncomp
);
387 dsl_dataset_phys(ds_next
)->ds_unique_bytes
+= used
;
388 dsl_dataset_rele(ds_nextnext
, FTAG
);
389 ASSERT3P(ds_next
->ds_prev
, ==, NULL
);
391 /* Collapse range in this head. */
393 VERIFY0(dsl_dataset_hold_obj(dp
,
394 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &hds
));
395 dsl_deadlist_remove_key(&hds
->ds_deadlist
,
396 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
397 dsl_dataset_rele(hds
, FTAG
);
400 ASSERT3P(ds_next
->ds_prev
, ==, ds
);
401 dsl_dataset_rele(ds_next
->ds_prev
, ds_next
);
402 ds_next
->ds_prev
= NULL
;
404 VERIFY0(dsl_dataset_hold_obj(dp
,
405 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
406 ds_next
, &ds_next
->ds_prev
));
409 dsl_dataset_recalc_head_uniq(ds_next
);
412 * Reduce the amount of our unconsumed refreservation
413 * being charged to our parent by the amount of
414 * new unique data we have gained.
416 if (old_unique
< ds_next
->ds_reserved
) {
418 uint64_t new_unique
=
419 dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
421 ASSERT(old_unique
<= new_unique
);
422 mrsdelta
= MIN(new_unique
- old_unique
,
423 ds_next
->ds_reserved
- old_unique
);
424 dsl_dir_diduse_space(ds
->ds_dir
,
425 DD_USED_REFRSRV
, -mrsdelta
, 0, 0, tx
);
428 dsl_dataset_rele(ds_next
, FTAG
);
431 * This must be done after the dsl_traverse(), because it will
432 * re-open the objset.
435 dmu_objset_evict(ds
->ds_objset
);
436 ds
->ds_objset
= NULL
;
439 /* remove from snapshot namespace */
440 dsl_dataset_t
*ds_head
;
441 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0);
442 VERIFY0(dsl_dataset_hold_obj(dp
,
443 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &ds_head
));
444 VERIFY0(dsl_dataset_get_snapname(ds
));
450 err
= dsl_dataset_snap_lookup(ds_head
,
451 ds
->ds_snapname
, &val
);
453 ASSERT3U(val
, ==, obj
);
456 VERIFY0(dsl_dataset_snap_remove(ds_head
, ds
->ds_snapname
, tx
, B_TRUE
));
457 dsl_dataset_rele(ds_head
, FTAG
);
460 dsl_dataset_rele(ds_prev
, FTAG
);
462 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
464 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
465 ASSERTV(uint64_t count
);
466 ASSERT0(zap_count(mos
,
467 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
) &&
469 VERIFY0(dmu_object_free(mos
,
470 dsl_dataset_phys(ds
)->ds_next_clones_obj
, tx
));
472 if (dsl_dataset_phys(ds
)->ds_props_obj
!= 0)
473 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_props_obj
,
475 if (dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0)
476 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_userrefs_obj
,
478 dsl_dir_rele(ds
->ds_dir
, ds
);
480 dmu_object_free_zapified(mos
, obj
, tx
);
484 dsl_destroy_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
486 dmu_snapshots_destroy_arg_t
*dsda
= arg
;
487 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
490 for (pair
= nvlist_next_nvpair(dsda
->dsda_successful_snaps
, NULL
);
492 pair
= nvlist_next_nvpair(dsda
->dsda_successful_snaps
, pair
)) {
495 VERIFY0(dsl_dataset_hold(dp
, nvpair_name(pair
), FTAG
, &ds
));
497 dsl_destroy_snapshot_sync_impl(ds
, dsda
->dsda_defer
, tx
);
498 zvol_remove_minors(dp
->dp_spa
, nvpair_name(pair
), B_TRUE
);
499 dsl_dataset_rele(ds
, FTAG
);
504 * The semantics of this function are described in the comment above
505 * lzc_destroy_snaps(). To summarize:
507 * The snapshots must all be in the same pool.
509 * Snapshots that don't exist will be silently ignored (considered to be
510 * "already deleted").
512 * On success, all snaps will be destroyed and this will return 0.
513 * On failure, no snaps will be destroyed, the errlist will be filled in,
514 * and this will return an errno.
517 dsl_destroy_snapshots_nvl(nvlist_t
*snaps
, boolean_t defer
,
520 dmu_snapshots_destroy_arg_t dsda
;
524 pair
= nvlist_next_nvpair(snaps
, NULL
);
528 dsda
.dsda_snaps
= snaps
;
529 VERIFY0(nvlist_alloc(&dsda
.dsda_successful_snaps
,
530 NV_UNIQUE_NAME
, KM_SLEEP
));
531 dsda
.dsda_defer
= defer
;
532 dsda
.dsda_errlist
= errlist
;
534 error
= dsl_sync_task(nvpair_name(pair
),
535 dsl_destroy_snapshot_check
, dsl_destroy_snapshot_sync
,
536 &dsda
, 0, ZFS_SPACE_CHECK_NONE
);
537 fnvlist_free(dsda
.dsda_successful_snaps
);
543 dsl_destroy_snapshot(const char *name
, boolean_t defer
)
546 nvlist_t
*nvl
= fnvlist_alloc();
547 nvlist_t
*errlist
= fnvlist_alloc();
549 fnvlist_add_boolean(nvl
, name
);
550 error
= dsl_destroy_snapshots_nvl(nvl
, defer
, errlist
);
551 fnvlist_free(errlist
);
563 kill_blkptr(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
564 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
566 struct killarg
*ka
= arg
;
567 dmu_tx_t
*tx
= ka
->tx
;
569 if (bp
== NULL
|| BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
))
572 if (zb
->zb_level
== ZB_ZIL_LEVEL
) {
573 ASSERT(zilog
!= NULL
);
575 * It's a block in the intent log. It has no
576 * accounting, so just free it.
578 dsl_free(ka
->tx
->tx_pool
, ka
->tx
->tx_txg
, bp
);
580 ASSERT(zilog
== NULL
);
581 ASSERT3U(bp
->blk_birth
, >,
582 dsl_dataset_phys(ka
->ds
)->ds_prev_snap_txg
);
583 (void) dsl_dataset_block_kill(ka
->ds
, bp
, tx
, B_FALSE
);
590 old_synchronous_dataset_destroy(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
595 * Free everything that we point to (that's born after
596 * the previous snapshot, if we are a clone)
598 * NB: this should be very quick, because we already
599 * freed all the objects in open context.
603 VERIFY0(traverse_dataset(ds
,
604 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, TRAVERSE_POST
|
605 TRAVERSE_NO_DECRYPT
, kill_blkptr
, &ka
));
606 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
607 dsl_dataset_phys(ds
)->ds_unique_bytes
== 0);
610 typedef struct dsl_destroy_head_arg
{
611 const char *ddha_name
;
612 } dsl_destroy_head_arg_t
;
615 dsl_destroy_head_check_impl(dsl_dataset_t
*ds
, int expected_holds
)
621 ASSERT(!ds
->ds_is_snapshot
);
622 if (ds
->ds_is_snapshot
)
623 return (SET_ERROR(EINVAL
));
625 if (refcount_count(&ds
->ds_longholds
) != expected_holds
)
626 return (SET_ERROR(EBUSY
));
628 mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
631 * Can't delete a head dataset if there are snapshots of it.
632 * (Except if the only snapshots are from the branch we cloned
635 if (ds
->ds_prev
!= NULL
&&
636 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
== ds
->ds_object
)
637 return (SET_ERROR(EBUSY
));
640 * Can't delete if there are children of this fs.
642 error
= zap_count(mos
,
643 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &count
);
647 return (SET_ERROR(EEXIST
));
649 if (dsl_dir_is_clone(ds
->ds_dir
) && DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
650 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
651 ds
->ds_prev
->ds_userrefs
== 0) {
652 /* We need to remove the origin snapshot as well. */
653 if (!refcount_is_zero(&ds
->ds_prev
->ds_longholds
))
654 return (SET_ERROR(EBUSY
));
660 dsl_destroy_head_check(void *arg
, dmu_tx_t
*tx
)
662 dsl_destroy_head_arg_t
*ddha
= arg
;
663 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
667 error
= dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
);
671 error
= dsl_destroy_head_check_impl(ds
, 0);
672 dsl_dataset_rele(ds
, FTAG
);
677 dsl_dir_destroy_sync(uint64_t ddobj
, dmu_tx_t
*tx
)
680 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
681 objset_t
*mos
= dp
->dp_meta_objset
;
684 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx
)->dp_config_rwlock
));
686 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, NULL
, FTAG
, &dd
));
688 ASSERT0(dsl_dir_phys(dd
)->dd_head_dataset_obj
);
691 * Decrement the filesystem count for all parent filesystems.
693 * When we receive an incremental stream into a filesystem that already
694 * exists, a temporary clone is created. We never count this temporary
695 * clone, whose name begins with a '%'.
697 if (dd
->dd_myname
[0] != '%' && dd
->dd_parent
!= NULL
)
698 dsl_fs_ss_count_adjust(dd
->dd_parent
, -1,
699 DD_FIELD_FILESYSTEM_COUNT
, tx
);
702 * Remove our reservation. The impl() routine avoids setting the
703 * actual property, which would require the (already destroyed) ds.
705 dsl_dir_set_reservation_sync_impl(dd
, 0, tx
);
707 ASSERT0(dsl_dir_phys(dd
)->dd_used_bytes
);
708 ASSERT0(dsl_dir_phys(dd
)->dd_reserved
);
709 for (t
= 0; t
< DD_USED_NUM
; t
++)
710 ASSERT0(dsl_dir_phys(dd
)->dd_used_breakdown
[t
]);
712 if (dd
->dd_crypto_obj
!= 0) {
713 dsl_crypto_key_destroy_sync(dd
->dd_crypto_obj
, tx
);
714 (void) spa_keystore_unload_wkey_impl(dp
->dp_spa
, dd
->dd_object
);
717 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_child_dir_zapobj
, tx
));
718 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_props_zapobj
, tx
));
719 VERIFY0(dsl_deleg_destroy(mos
, dsl_dir_phys(dd
)->dd_deleg_zapobj
, tx
));
720 VERIFY0(zap_remove(mos
,
721 dsl_dir_phys(dd
->dd_parent
)->dd_child_dir_zapobj
,
724 dsl_dir_rele(dd
, FTAG
);
725 dmu_object_free_zapified(mos
, ddobj
, tx
);
729 dsl_destroy_head_sync_impl(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
731 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
732 objset_t
*mos
= dp
->dp_meta_objset
;
733 uint64_t obj
, ddobj
, prevobj
= 0;
736 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
737 ASSERT(ds
->ds_prev
== NULL
||
738 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
!= ds
->ds_object
);
739 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
740 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
741 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
742 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
744 /* We need to log before removing it from the namespace. */
745 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
747 rmorigin
= (dsl_dir_is_clone(ds
->ds_dir
) &&
748 DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
749 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
750 ds
->ds_prev
->ds_userrefs
== 0);
752 /* Remove our reservation. */
753 if (ds
->ds_reserved
!= 0) {
754 dsl_dataset_set_refreservation_sync_impl(ds
,
755 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
757 ASSERT0(ds
->ds_reserved
);
762 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
763 if (ds
->ds_feature_inuse
[f
]) {
764 dsl_dataset_deactivate_feature(obj
, f
, tx
);
765 ds
->ds_feature_inuse
[f
] = B_FALSE
;
769 dsl_scan_ds_destroyed(ds
, tx
);
771 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
772 /* This is a clone */
773 ASSERT(ds
->ds_prev
!= NULL
);
774 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
, !=,
776 ASSERT0(dsl_dataset_phys(ds
)->ds_next_snap_obj
);
778 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
779 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
!= 0) {
780 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
784 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
, >, 1);
785 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
--;
789 * Destroy the deadlist. Unless it's a clone, the
790 * deadlist should be empty. (If it's a clone, it's
791 * safe to ignore the deadlist contents.)
793 dsl_deadlist_close(&ds
->ds_deadlist
);
794 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
795 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
796 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
799 VERIFY0(dmu_objset_from_ds(ds
, &os
));
801 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
802 old_synchronous_dataset_destroy(ds
, tx
);
805 * Move the bptree into the pool's list of trees to
806 * clean up and update space accounting information.
808 uint64_t used
, comp
, uncomp
;
810 zil_destroy_sync(dmu_objset_zil(os
), tx
);
812 if (!spa_feature_is_active(dp
->dp_spa
,
813 SPA_FEATURE_ASYNC_DESTROY
)) {
814 dsl_scan_t
*scn
= dp
->dp_scan
;
815 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
,
817 dp
->dp_bptree_obj
= bptree_alloc(mos
, tx
);
819 DMU_POOL_DIRECTORY_OBJECT
,
820 DMU_POOL_BPTREE_OBJ
, sizeof (uint64_t), 1,
821 &dp
->dp_bptree_obj
, tx
));
822 ASSERT(!scn
->scn_async_destroying
);
823 scn
->scn_async_destroying
= B_TRUE
;
826 used
= dsl_dir_phys(ds
->ds_dir
)->dd_used_bytes
;
827 comp
= dsl_dir_phys(ds
->ds_dir
)->dd_compressed_bytes
;
828 uncomp
= dsl_dir_phys(ds
->ds_dir
)->dd_uncompressed_bytes
;
830 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
831 dsl_dataset_phys(ds
)->ds_unique_bytes
== used
);
833 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
834 bptree_add(mos
, dp
->dp_bptree_obj
,
835 &dsl_dataset_phys(ds
)->ds_bp
,
836 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
837 used
, comp
, uncomp
, tx
);
838 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
839 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
840 -used
, -comp
, -uncomp
, tx
);
841 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
842 used
, comp
, uncomp
, tx
);
845 if (ds
->ds_prev
!= NULL
) {
846 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
847 VERIFY0(zap_remove_int(mos
,
848 dsl_dir_phys(ds
->ds_prev
->ds_dir
)->dd_clones
,
851 prevobj
= ds
->ds_prev
->ds_object
;
852 dsl_dataset_rele(ds
->ds_prev
, ds
);
857 * This must be done after the dsl_traverse(), because it will
858 * re-open the objset.
861 dmu_objset_evict(ds
->ds_objset
);
862 ds
->ds_objset
= NULL
;
865 /* Erase the link in the dir */
866 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
867 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
= 0;
868 ddobj
= ds
->ds_dir
->dd_object
;
869 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0);
870 VERIFY0(zap_destroy(mos
,
871 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, tx
));
873 if (ds
->ds_bookmarks
!= 0) {
874 VERIFY0(zap_destroy(mos
, ds
->ds_bookmarks
, tx
));
875 spa_feature_decr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
878 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
880 ASSERT0(dsl_dataset_phys(ds
)->ds_next_clones_obj
);
881 ASSERT0(dsl_dataset_phys(ds
)->ds_props_obj
);
882 ASSERT0(dsl_dataset_phys(ds
)->ds_userrefs_obj
);
883 dsl_dir_rele(ds
->ds_dir
, ds
);
885 dmu_object_free_zapified(mos
, obj
, tx
);
887 dsl_dir_destroy_sync(ddobj
, tx
);
891 VERIFY0(dsl_dataset_hold_obj(dp
, prevobj
, FTAG
, &prev
));
892 dsl_destroy_snapshot_sync_impl(prev
, B_FALSE
, tx
);
893 dsl_dataset_rele(prev
, FTAG
);
898 dsl_destroy_head_sync(void *arg
, dmu_tx_t
*tx
)
900 dsl_destroy_head_arg_t
*ddha
= arg
;
901 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
904 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
905 dsl_destroy_head_sync_impl(ds
, tx
);
906 zvol_remove_minors(dp
->dp_spa
, ddha
->ddha_name
, B_TRUE
);
907 dsl_dataset_rele(ds
, FTAG
);
911 dsl_destroy_head_begin_sync(void *arg
, dmu_tx_t
*tx
)
913 dsl_destroy_head_arg_t
*ddha
= arg
;
914 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
917 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
919 /* Mark it as inconsistent on-disk, in case we crash */
920 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
921 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
923 spa_history_log_internal_ds(ds
, "destroy begin", tx
, "");
924 dsl_dataset_rele(ds
, FTAG
);
928 dsl_destroy_head(const char *name
)
930 dsl_destroy_head_arg_t ddha
;
936 zfs_destroy_unmount_origin(name
);
939 error
= spa_open(name
, &spa
, FTAG
);
942 isenabled
= spa_feature_is_enabled(spa
, SPA_FEATURE_ASYNC_DESTROY
);
943 spa_close(spa
, FTAG
);
945 ddha
.ddha_name
= name
;
950 error
= dsl_sync_task(name
, dsl_destroy_head_check
,
951 dsl_destroy_head_begin_sync
, &ddha
,
952 0, ZFS_SPACE_CHECK_NONE
);
957 * Head deletion is processed in one txg on old pools;
958 * remove the objects from open context so that the txg sync
961 error
= dmu_objset_own(name
, DMU_OST_ANY
, B_FALSE
, B_FALSE
,
964 uint64_t prev_snap_txg
=
965 dsl_dataset_phys(dmu_objset_ds(os
))->
967 for (uint64_t obj
= 0; error
== 0;
968 error
= dmu_object_next(os
, &obj
, FALSE
,
970 (void) dmu_free_long_object(os
, obj
);
971 /* sync out all frees */
972 txg_wait_synced(dmu_objset_pool(os
), 0);
973 dmu_objset_disown(os
, B_FALSE
, FTAG
);
977 return (dsl_sync_task(name
, dsl_destroy_head_check
,
978 dsl_destroy_head_sync
, &ddha
, 0, ZFS_SPACE_CHECK_NONE
));
982 * Note, this function is used as the callback for dmu_objset_find(). We
983 * always return 0 so that we will continue to find and process
984 * inconsistent datasets, even if we encounter an error trying to
985 * process one of them.
989 dsl_destroy_inconsistent(const char *dsname
, void *arg
)
993 if (dmu_objset_hold(dsname
, FTAG
, &os
) == 0) {
994 boolean_t need_destroy
= DS_IS_INCONSISTENT(dmu_objset_ds(os
));
997 * If the dataset is inconsistent because a resumable receive
998 * has failed, then do not destroy it.
1000 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os
)))
1001 need_destroy
= B_FALSE
;
1003 dmu_objset_rele(os
, FTAG
);
1005 (void) dsl_destroy_head(dsname
);
1011 #if defined(_KERNEL) && defined(HAVE_SPL)
1012 EXPORT_SYMBOL(dsl_destroy_head
);
1013 EXPORT_SYMBOL(dsl_destroy_head_sync_impl
);
1014 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one
);
1015 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl
);
1016 EXPORT_SYMBOL(dsl_destroy_inconsistent
);
1017 EXPORT_SYMBOL(dsl_dataset_user_release_tmp
);
1018 EXPORT_SYMBOL(dsl_destroy_head_check_impl
);