]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_destroy.c
Stack overflow when destroying deeply nested clones
[mirror_zfs.git] / module / zfs / dsl_destroy.c
CommitLineData
13fe0198
MA
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
5b72a38d 23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
788eb90c 25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
a0bd735a 26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
13fe0198
MA
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dsl_userhold.h>
31#include <sys/dsl_dataset.h>
32#include <sys/dsl_synctask.h>
d99a0153 33#include <sys/dsl_destroy.h>
13fe0198
MA
34#include <sys/dmu_tx.h>
35#include <sys/dsl_pool.h>
36#include <sys/dsl_dir.h>
37#include <sys/dmu_traverse.h>
38#include <sys/dsl_scan.h>
39#include <sys/dmu_objset.h>
40#include <sys/zap.h>
41#include <sys/zfeature.h>
42#include <sys/zfs_ioctl.h>
43#include <sys/dsl_deleg.h>
fa86b5db 44#include <sys/dmu_impl.h>
a0bd735a 45#include <sys/zvol.h>
d99a0153 46#include <sys/zcp.h>
13fe0198 47
19580676 48int
13fe0198
MA
49dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
50{
0c66c32d 51 if (!ds->ds_is_snapshot)
2e528b49 52 return (SET_ERROR(EINVAL));
13fe0198
MA
53
54 if (dsl_dataset_long_held(ds))
2e528b49 55 return (SET_ERROR(EBUSY));
13fe0198
MA
56
57 /*
58 * Only allow deferred destroy on pools that support it.
59 * NOTE: deferred destroy is only supported on snapshots.
60 */
61 if (defer) {
62 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
63 SPA_VERSION_USERREFS)
2e528b49 64 return (SET_ERROR(ENOTSUP));
13fe0198
MA
65 return (0);
66 }
67
68 /*
69 * If this snapshot has an elevated user reference count,
70 * we can't destroy it yet.
71 */
72 if (ds->ds_userrefs > 0)
2e528b49 73 return (SET_ERROR(EBUSY));
13fe0198
MA
74
75 /*
76 * Can't delete a branch point.
77 */
d683ddbb 78 if (dsl_dataset_phys(ds)->ds_num_children > 1)
2e528b49 79 return (SET_ERROR(EEXIST));
13fe0198
MA
80
81 return (0);
82}
83
d99a0153 84int
13fe0198
MA
85dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
86{
d99a0153
CW
87 dsl_destroy_snapshot_arg_t *ddsa = arg;
88 const char *dsname = ddsa->ddsa_name;
89 boolean_t defer = ddsa->ddsa_defer;
90
13fe0198 91 dsl_pool_t *dp = dmu_tx_pool(tx);
13fe0198 92 int error = 0;
d99a0153 93 dsl_dataset_t *ds;
13fe0198 94
d99a0153 95 error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
13fe0198 96
d99a0153
CW
97 /*
98 * If the snapshot does not exist, silently ignore it, and
99 * dsl_destroy_snapshot_sync() will be a no-op
100 * (it's "already destroyed").
101 */
102 if (error == ENOENT)
103 return (0);
13fe0198 104
d99a0153
CW
105 if (error == 0) {
106 error = dsl_destroy_snapshot_check_impl(ds, defer);
107 dsl_dataset_rele(ds, FTAG);
13fe0198
MA
108 }
109
d99a0153 110 return (error);
13fe0198
MA
111}
112
113struct process_old_arg {
114 dsl_dataset_t *ds;
115 dsl_dataset_t *ds_prev;
116 boolean_t after_branch_point;
117 zio_t *pio;
118 uint64_t used, comp, uncomp;
119};
120
121static int
122process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
123{
124 struct process_old_arg *poa = arg;
125 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
126
b0bc7a84
MG
127 ASSERT(!BP_IS_HOLE(bp));
128
d683ddbb 129 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
13fe0198
MA
130 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
131 if (poa->ds_prev && !poa->after_branch_point &&
132 bp->blk_birth >
d683ddbb
JG
133 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
134 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
13fe0198
MA
135 bp_get_dsize_sync(dp->dp_spa, bp);
136 }
137 } else {
138 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
139 poa->comp += BP_GET_PSIZE(bp);
140 poa->uncomp += BP_GET_UCSIZE(bp);
141 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
142 }
143 return (0);
144}
145
146static void
147process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
148 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
149{
150 struct process_old_arg poa = { 0 };
151 dsl_pool_t *dp = ds->ds_dir->dd_pool;
152 objset_t *mos = dp->dp_meta_objset;
153 uint64_t deadlist_obj;
154
155 ASSERT(ds->ds_deadlist.dl_oldfmt);
156 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
157
158 poa.ds = ds;
159 poa.ds_prev = ds_prev;
160 poa.after_branch_point = after_branch_point;
161 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
162 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
163 process_old_cb, &poa, tx));
164 VERIFY0(zio_wait(poa.pio));
d683ddbb 165 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
13fe0198
MA
166
167 /* change snapused */
168 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
169 -poa.used, -poa.comp, -poa.uncomp, tx);
170
171 /* swap next's deadlist to our deadlist */
172 dsl_deadlist_close(&ds->ds_deadlist);
173 dsl_deadlist_close(&ds_next->ds_deadlist);
d683ddbb
JG
174 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
175 dsl_dataset_phys(ds)->ds_deadlist_obj =
176 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
177 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
178 dsl_deadlist_open(&ds->ds_deadlist, mos,
179 dsl_dataset_phys(ds)->ds_deadlist_obj);
13fe0198 180 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
d683ddbb 181 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
13fe0198
MA
182}
183
c434d880 184struct removeclonesnode {
185 list_node_t link;
186 dsl_dataset_t *ds;
187};
188
13fe0198
MA
189static void
190dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
191{
192 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
c434d880 193 list_t clones;
194 struct removeclonesnode *rcn;
13fe0198 195
c434d880 196 list_create(&clones, sizeof (struct removeclonesnode),
197 offsetof(struct removeclonesnode, link));
198
199 rcn = kmem_zalloc(sizeof (struct removeclonesnode), KM_SLEEP);
200 rcn->ds = ds;
201 list_insert_head(&clones, rcn);
202
203 for (; rcn != NULL; rcn = list_next(&clones, rcn)) {
204 zap_cursor_t zc;
205 zap_attribute_t za;
206 /*
207 * If it is the old version, dd_clones doesn't exist so we can't
208 * find the clones, but dsl_deadlist_remove_key() is a no-op so
209 * it doesn't matter.
210 */
211 if (dsl_dir_phys(rcn->ds->ds_dir)->dd_clones == 0)
212 continue;
213
214 for (zap_cursor_init(&zc, mos,
215 dsl_dir_phys(rcn->ds->ds_dir)->dd_clones);
216 zap_cursor_retrieve(&zc, &za) == 0;
217 zap_cursor_advance(&zc)) {
218 dsl_dataset_t *clone;
219
220 VERIFY0(dsl_dataset_hold_obj(rcn->ds->ds_dir->dd_pool,
221 za.za_first_integer, FTAG, &clone));
222 if (clone->ds_dir->dd_origin_txg > mintxg) {
223 dsl_deadlist_remove_key(&clone->ds_deadlist,
224 mintxg, tx);
225 if (dsl_dataset_remap_deadlist_exists(clone)) {
226 dsl_deadlist_remove_key(
227 &clone->ds_remap_deadlist, mintxg,
228 tx);
229 }
230 rcn = kmem_zalloc(
231 sizeof (struct removeclonesnode), KM_SLEEP);
232 rcn->ds = clone;
233 list_insert_tail(&clones, rcn);
234 } else {
235 dsl_dataset_rele(clone, FTAG);
a1d477c2 236 }
13fe0198 237 }
c434d880 238 zap_cursor_fini(&zc);
13fe0198 239 }
77831e17 240
c434d880 241 rcn = list_remove_head(&clones);
242 kmem_free(rcn, sizeof (struct removeclonesnode));
243 while ((rcn = list_remove_head(&clones)) != NULL) {
244 dsl_dataset_rele(rcn->ds, FTAG);
245 kmem_free(rcn, sizeof (struct removeclonesnode));
246 }
247 list_destroy(&clones);
13fe0198
MA
248}
249
a1d477c2
MA
250static void
251dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
252 dmu_tx_t *tx)
253{
254 dsl_pool_t *dp = ds->ds_dir->dd_pool;
255
256 /* Move blocks to be obsoleted to pool's obsolete list. */
257 if (dsl_dataset_remap_deadlist_exists(ds_next)) {
258 if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
259 dsl_pool_create_obsolete_bpobj(dp, tx);
260
261 dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
262 &dp->dp_obsolete_bpobj,
263 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
264 }
265
266 /* Merge our deadlist into next's and free it. */
267 if (dsl_dataset_remap_deadlist_exists(ds)) {
268 uint64_t remap_deadlist_object =
269 dsl_dataset_get_remap_deadlist_object(ds);
270 ASSERT(remap_deadlist_object != 0);
271
272 mutex_enter(&ds_next->ds_remap_deadlist_lock);
273 if (!dsl_dataset_remap_deadlist_exists(ds_next))
274 dsl_dataset_create_remap_deadlist(ds_next, tx);
275 mutex_exit(&ds_next->ds_remap_deadlist_lock);
276
277 dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
278 remap_deadlist_object, tx);
279 dsl_dataset_destroy_remap_deadlist(ds, tx);
280 }
281}
282
13fe0198
MA
283void
284dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
285{
13fe0198
MA
286 int after_branch_point = FALSE;
287 dsl_pool_t *dp = ds->ds_dir->dd_pool;
288 objset_t *mos = dp->dp_meta_objset;
289 dsl_dataset_t *ds_prev = NULL;
1c27024e 290 uint64_t obj;
13fe0198
MA
291
292 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
cc9bb3e5 293 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
d683ddbb 294 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
cc9bb3e5 295 rrw_exit(&ds->ds_bp_rwlock, FTAG);
13fe0198
MA
296 ASSERT(refcount_is_zero(&ds->ds_longholds));
297
298 if (defer &&
d683ddbb
JG
299 (ds->ds_userrefs > 0 ||
300 dsl_dataset_phys(ds)->ds_num_children > 1)) {
13fe0198
MA
301 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
302 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 303 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
13fe0198
MA
304 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
305 return;
306 }
307
d683ddbb 308 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
13fe0198
MA
309
310 /* We need to log before removing it from the namespace. */
311 spa_history_log_internal_ds(ds, "destroy", tx, "");
312
313 dsl_scan_ds_destroyed(ds, tx);
314
315 obj = ds->ds_object;
316
1c27024e 317 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
241b5415
MA
318 if (ds->ds_feature_inuse[f]) {
319 dsl_dataset_deactivate_feature(obj, f, tx);
320 ds->ds_feature_inuse[f] = B_FALSE;
321 }
f1512ee6 322 }
d683ddbb 323 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
13fe0198
MA
324 ASSERT3P(ds->ds_prev, ==, NULL);
325 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 326 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
13fe0198 327 after_branch_point =
d683ddbb 328 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
13fe0198
MA
329
330 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
331 if (after_branch_point &&
d683ddbb 332 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
13fe0198 333 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
d683ddbb 334 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
13fe0198 335 VERIFY0(zap_add_int(mos,
d683ddbb
JG
336 dsl_dataset_phys(ds_prev)->
337 ds_next_clones_obj,
338 dsl_dataset_phys(ds)->ds_next_snap_obj,
339 tx));
13fe0198
MA
340 }
341 }
342 if (!after_branch_point) {
d683ddbb
JG
343 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
344 dsl_dataset_phys(ds)->ds_next_snap_obj;
13fe0198
MA
345 }
346 }
347
1c27024e
DB
348 dsl_dataset_t *ds_next;
349 uint64_t old_unique;
350 uint64_t used = 0, comp = 0, uncomp = 0;
351
13fe0198 352 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
353 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
354 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
13fe0198 355
d683ddbb 356 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
13fe0198
MA
357
358 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
d683ddbb
JG
359 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
360 dsl_dataset_phys(ds)->ds_prev_snap_obj;
361 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
362 dsl_dataset_phys(ds)->ds_prev_snap_txg;
363 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
364 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
13fe0198
MA
365
366 if (ds_next->ds_deadlist.dl_oldfmt) {
367 process_old_deadlist(ds, ds_prev, ds_next,
368 after_branch_point, tx);
369 } else {
370 /* Adjust prev's unique space. */
371 if (ds_prev && !after_branch_point) {
372 dsl_deadlist_space_range(&ds_next->ds_deadlist,
d683ddbb
JG
373 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
374 dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198 375 &used, &comp, &uncomp);
d683ddbb 376 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
13fe0198
MA
377 }
378
379 /* Adjust snapused. */
380 dsl_deadlist_space_range(&ds_next->ds_deadlist,
d683ddbb 381 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
13fe0198
MA
382 &used, &comp, &uncomp);
383 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
384 -used, -comp, -uncomp, tx);
385
386 /* Move blocks to be freed to pool's free list. */
387 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
d683ddbb 388 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198
MA
389 tx);
390 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
391 DD_USED_HEAD, used, comp, uncomp, tx);
392
393 /* Merge our deadlist into next's and free it. */
394 dsl_deadlist_merge(&ds_next->ds_deadlist,
d683ddbb 395 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198 396 }
a1d477c2 397
13fe0198 398 dsl_deadlist_close(&ds->ds_deadlist);
d683ddbb 399 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198 400 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 401 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
13fe0198 402
a1d477c2
MA
403 dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
404
13fe0198
MA
405 /* Collapse range in clone heads */
406 dsl_dataset_remove_clones_key(ds,
d683ddbb 407 dsl_dataset_phys(ds)->ds_creation_txg, tx);
13fe0198 408
0c66c32d 409 if (ds_next->ds_is_snapshot) {
13fe0198
MA
410 dsl_dataset_t *ds_nextnext;
411
412 /*
413 * Update next's unique to include blocks which
414 * were previously shared by only this snapshot
415 * and it. Those blocks will be born after the
416 * prev snap and before this snap, and will have
417 * died after the next snap and before the one
418 * after that (ie. be on the snap after next's
419 * deadlist).
420 */
421 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
422 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
423 FTAG, &ds_nextnext));
13fe0198 424 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
d683ddbb
JG
425 dsl_dataset_phys(ds)->ds_prev_snap_txg,
426 dsl_dataset_phys(ds)->ds_creation_txg,
13fe0198 427 &used, &comp, &uncomp);
d683ddbb 428 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
13fe0198
MA
429 dsl_dataset_rele(ds_nextnext, FTAG);
430 ASSERT3P(ds_next->ds_prev, ==, NULL);
431
432 /* Collapse range in this head. */
1c27024e 433 dsl_dataset_t *hds;
13fe0198 434 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 435 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
13fe0198 436 dsl_deadlist_remove_key(&hds->ds_deadlist,
d683ddbb 437 dsl_dataset_phys(ds)->ds_creation_txg, tx);
a1d477c2
MA
438 if (dsl_dataset_remap_deadlist_exists(hds)) {
439 dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
440 dsl_dataset_phys(ds)->ds_creation_txg, tx);
441 }
13fe0198
MA
442 dsl_dataset_rele(hds, FTAG);
443
444 } else {
445 ASSERT3P(ds_next->ds_prev, ==, ds);
446 dsl_dataset_rele(ds_next->ds_prev, ds_next);
447 ds_next->ds_prev = NULL;
448 if (ds_prev) {
449 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 450 dsl_dataset_phys(ds)->ds_prev_snap_obj,
13fe0198
MA
451 ds_next, &ds_next->ds_prev));
452 }
453
454 dsl_dataset_recalc_head_uniq(ds_next);
455
456 /*
457 * Reduce the amount of our unconsumed refreservation
458 * being charged to our parent by the amount of
459 * new unique data we have gained.
460 */
461 if (old_unique < ds_next->ds_reserved) {
462 int64_t mrsdelta;
463 uint64_t new_unique =
d683ddbb 464 dsl_dataset_phys(ds_next)->ds_unique_bytes;
13fe0198
MA
465
466 ASSERT(old_unique <= new_unique);
467 mrsdelta = MIN(new_unique - old_unique,
468 ds_next->ds_reserved - old_unique);
469 dsl_dir_diduse_space(ds->ds_dir,
470 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
471 }
472 }
473 dsl_dataset_rele(ds_next, FTAG);
474
475 /*
476 * This must be done after the dsl_traverse(), because it will
477 * re-open the objset.
478 */
479 if (ds->ds_objset) {
480 dmu_objset_evict(ds->ds_objset);
481 ds->ds_objset = NULL;
482 }
483
484 /* remove from snapshot namespace */
1c27024e 485 dsl_dataset_t *ds_head;
d683ddbb 486 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
13fe0198 487 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 488 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
13fe0198
MA
489 VERIFY0(dsl_dataset_get_snapname(ds));
490#ifdef ZFS_DEBUG
491 {
492 uint64_t val;
a0bd735a 493 int err;
13fe0198
MA
494
495 err = dsl_dataset_snap_lookup(ds_head,
496 ds->ds_snapname, &val);
497 ASSERT0(err);
498 ASSERT3U(val, ==, obj);
499 }
500#endif
788eb90c 501 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
13fe0198
MA
502 dsl_dataset_rele(ds_head, FTAG);
503
504 if (ds_prev != NULL)
505 dsl_dataset_rele(ds_prev, FTAG);
506
507 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
508
d683ddbb 509 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
13fe0198
MA
510 ASSERTV(uint64_t count);
511 ASSERT0(zap_count(mos,
d683ddbb
JG
512 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
513 count == 0);
13fe0198 514 VERIFY0(dmu_object_free(mos,
d683ddbb 515 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
13fe0198 516 }
d683ddbb
JG
517 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
518 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
519 tx));
520 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
521 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
522 tx));
13fe0198
MA
523 dsl_dir_rele(ds->ds_dir, ds);
524 ds->ds_dir = NULL;
fa86b5db 525 dmu_object_free_zapified(mos, obj, tx);
13fe0198
MA
526}
527
d99a0153 528void
13fe0198
MA
529dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
530{
d99a0153
CW
531 dsl_destroy_snapshot_arg_t *ddsa = arg;
532 const char *dsname = ddsa->ddsa_name;
533 boolean_t defer = ddsa->ddsa_defer;
13fe0198 534
d99a0153
CW
535 dsl_pool_t *dp = dmu_tx_pool(tx);
536 dsl_dataset_t *ds;
13fe0198 537
d99a0153
CW
538 int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
539 if (error == ENOENT)
540 return;
541 ASSERT0(error);
542 dsl_destroy_snapshot_sync_impl(ds, defer, tx);
543 zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
544 dsl_dataset_rele(ds, FTAG);
13fe0198
MA
545}
546
547/*
548 * The semantics of this function are described in the comment above
549 * lzc_destroy_snaps(). To summarize:
550 *
551 * The snapshots must all be in the same pool.
552 *
553 * Snapshots that don't exist will be silently ignored (considered to be
554 * "already deleted").
555 *
556 * On success, all snaps will be destroyed and this will return 0.
557 * On failure, no snaps will be destroyed, the errlist will be filled in,
558 * and this will return an errno.
559 */
560int
561dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
562 nvlist_t *errlist)
563{
d99a0153 564 if (nvlist_next_nvpair(snaps, NULL) == NULL)
13fe0198
MA
565 return (0);
566
d99a0153
CW
567 /*
568 * lzc_destroy_snaps() is documented to take an nvlist whose
8d103d88
SD
569 * values "don't matter". We need to convert that nvlist to
570 * one that we know can be converted to LUA. We also don't
571 * care about any duplicate entries because the nvlist will
572 * be converted to a LUA table which should take care of this.
d99a0153 573 */
8d103d88
SD
574 nvlist_t *snaps_normalized;
575 VERIFY0(nvlist_alloc(&snaps_normalized, 0, KM_SLEEP));
d99a0153
CW
576 for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
577 pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
578 fnvlist_add_boolean_value(snaps_normalized,
579 nvpair_name(pair), B_TRUE);
580 }
8d103d88
SD
581
582 nvlist_t *arg;
583 VERIFY0(nvlist_alloc(&arg, 0, KM_SLEEP));
d99a0153
CW
584 fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
585 fnvlist_free(snaps_normalized);
586 fnvlist_add_boolean_value(arg, "defer", defer);
587
8d103d88
SD
588 nvlist_t *wrapper;
589 VERIFY0(nvlist_alloc(&wrapper, 0, KM_SLEEP));
d99a0153
CW
590 fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
591 fnvlist_free(arg);
592
593 const char *program =
594 "arg = ...\n"
595 "snaps = arg['snaps']\n"
596 "defer = arg['defer']\n"
597 "errors = { }\n"
598 "has_errors = false\n"
599 "for snap, v in pairs(snaps) do\n"
600 " errno = zfs.check.destroy{snap, defer=defer}\n"
601 " zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
602 " if errno == ENOENT then\n"
603 " snaps[snap] = nil\n"
604 " elseif errno ~= 0 then\n"
605 " errors[snap] = errno\n"
606 " has_errors = true\n"
607 " end\n"
608 "end\n"
609 "if has_errors then\n"
610 " return errors\n"
611 "end\n"
612 "for snap, v in pairs(snaps) do\n"
613 " errno = zfs.sync.destroy{snap, defer=defer}\n"
614 " assert(errno == 0)\n"
615 "end\n"
616 "return { }\n";
617
618 nvlist_t *result = fnvlist_alloc();
619 int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
620 program,
5b72a38d 621 B_TRUE,
d99a0153
CW
622 0,
623 zfs_lua_max_memlimit,
8d103d88 624 nvlist_next_nvpair(wrapper, NULL), result);
d99a0153
CW
625 if (error != 0) {
626 char *errorstr = NULL;
627 (void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
628 if (errorstr != NULL) {
629 zfs_dbgmsg(errorstr);
630 }
631 return (error);
632 }
633 fnvlist_free(wrapper);
13fe0198 634
d99a0153
CW
635 /*
636 * lzc_destroy_snaps() is documented to fill the errlist with
637 * int32 values, so we need to covert the int64 values that are
638 * returned from LUA.
639 */
640 int rv = 0;
641 nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
642 for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
643 pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
644 int32_t val = (int32_t)fnvpair_value_int64(pair);
645 if (rv == 0)
646 rv = val;
647 fnvlist_add_int32(errlist, nvpair_name(pair), val);
648 }
649 fnvlist_free(result);
650 return (rv);
13fe0198
MA
651}
652
653int
654dsl_destroy_snapshot(const char *name, boolean_t defer)
655{
656 int error;
79c76d5b
BB
657 nvlist_t *nvl = fnvlist_alloc();
658 nvlist_t *errlist = fnvlist_alloc();
13fe0198
MA
659
660 fnvlist_add_boolean(nvl, name);
661 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
662 fnvlist_free(errlist);
663 fnvlist_free(nvl);
664 return (error);
665}
666
667struct killarg {
668 dsl_dataset_t *ds;
669 dmu_tx_t *tx;
670};
671
672/* ARGSUSED */
673static int
674kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
5dbd68a3 675 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
13fe0198
MA
676{
677 struct killarg *ka = arg;
678 dmu_tx_t *tx = ka->tx;
679
fcff0f35 680 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
13fe0198
MA
681 return (0);
682
683 if (zb->zb_level == ZB_ZIL_LEVEL) {
684 ASSERT(zilog != NULL);
685 /*
686 * It's a block in the intent log. It has no
687 * accounting, so just free it.
688 */
689 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
690 } else {
691 ASSERT(zilog == NULL);
d683ddbb
JG
692 ASSERT3U(bp->blk_birth, >,
693 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
13fe0198
MA
694 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
695 }
696
697 return (0);
698}
699
700static void
701old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
702{
703 struct killarg ka;
704
705 /*
706 * Free everything that we point to (that's born after
707 * the previous snapshot, if we are a clone)
708 *
709 * NB: this should be very quick, because we already
710 * freed all the objects in open context.
711 */
712 ka.ds = ds;
713 ka.tx = tx;
714 VERIFY0(traverse_dataset(ds,
b5256303
TC
715 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
716 TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
d683ddbb
JG
717 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
718 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
13fe0198
MA
719}
720
13fe0198
MA
721int
722dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
723{
724 int error;
725 uint64_t count;
726 objset_t *mos;
727
0c66c32d
JG
728 ASSERT(!ds->ds_is_snapshot);
729 if (ds->ds_is_snapshot)
2e528b49 730 return (SET_ERROR(EINVAL));
13fe0198
MA
731
732 if (refcount_count(&ds->ds_longholds) != expected_holds)
2e528b49 733 return (SET_ERROR(EBUSY));
13fe0198
MA
734
735 mos = ds->ds_dir->dd_pool->dp_meta_objset;
736
737 /*
738 * Can't delete a head dataset if there are snapshots of it.
739 * (Except if the only snapshots are from the branch we cloned
740 * from.)
741 */
742 if (ds->ds_prev != NULL &&
d683ddbb 743 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
2e528b49 744 return (SET_ERROR(EBUSY));
13fe0198
MA
745
746 /*
747 * Can't delete if there are children of this fs.
748 */
749 error = zap_count(mos,
d683ddbb 750 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
13fe0198
MA
751 if (error != 0)
752 return (error);
753 if (count != 0)
2e528b49 754 return (SET_ERROR(EEXIST));
13fe0198
MA
755
756 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
d683ddbb 757 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
13fe0198
MA
758 ds->ds_prev->ds_userrefs == 0) {
759 /* We need to remove the origin snapshot as well. */
760 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
2e528b49 761 return (SET_ERROR(EBUSY));
13fe0198
MA
762 }
763 return (0);
764}
765
d99a0153 766int
13fe0198
MA
767dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
768{
769 dsl_destroy_head_arg_t *ddha = arg;
770 dsl_pool_t *dp = dmu_tx_pool(tx);
771 dsl_dataset_t *ds;
772 int error;
773
774 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
775 if (error != 0)
776 return (error);
777
778 error = dsl_destroy_head_check_impl(ds, 0);
779 dsl_dataset_rele(ds, FTAG);
780 return (error);
781}
782
783static void
784dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
785{
786 dsl_dir_t *dd;
787 dsl_pool_t *dp = dmu_tx_pool(tx);
788 objset_t *mos = dp->dp_meta_objset;
789 dd_used_t t;
790
791 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
792
793 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
794
d683ddbb 795 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
13fe0198 796
788eb90c
JJ
797 /*
798 * Decrement the filesystem count for all parent filesystems.
799 *
800 * When we receive an incremental stream into a filesystem that already
801 * exists, a temporary clone is created. We never count this temporary
802 * clone, whose name begins with a '%'.
803 */
804 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
805 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
806 DD_FIELD_FILESYSTEM_COUNT, tx);
807
13fe0198
MA
808 /*
809 * Remove our reservation. The impl() routine avoids setting the
810 * actual property, which would require the (already destroyed) ds.
811 */
812 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
813
d683ddbb
JG
814 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
815 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
13fe0198 816 for (t = 0; t < DD_USED_NUM; t++)
d683ddbb 817 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
13fe0198 818
b5256303
TC
819 if (dd->dd_crypto_obj != 0) {
820 dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
821 (void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
822 }
823
d683ddbb
JG
824 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
825 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
826 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
13fe0198 827 VERIFY0(zap_remove(mos,
d683ddbb
JG
828 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
829 dd->dd_myname, tx));
13fe0198
MA
830
831 dsl_dir_rele(dd, FTAG);
fa86b5db 832 dmu_object_free_zapified(mos, ddobj, tx);
13fe0198
MA
833}
834
835void
836dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
837{
838 dsl_pool_t *dp = dmu_tx_pool(tx);
839 objset_t *mos = dp->dp_meta_objset;
840 uint64_t obj, ddobj, prevobj = 0;
841 boolean_t rmorigin;
13fe0198 842
d683ddbb 843 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
13fe0198 844 ASSERT(ds->ds_prev == NULL ||
d683ddbb 845 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
cc9bb3e5 846 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
d683ddbb 847 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
cc9bb3e5 848 rrw_exit(&ds->ds_bp_rwlock, FTAG);
13fe0198
MA
849 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
850
851 /* We need to log before removing it from the namespace. */
852 spa_history_log_internal_ds(ds, "destroy", tx, "");
853
854 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
855 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
d683ddbb 856 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
13fe0198
MA
857 ds->ds_prev->ds_userrefs == 0);
858
9b67f605 859 /* Remove our reservation. */
13fe0198
MA
860 if (ds->ds_reserved != 0) {
861 dsl_dataset_set_refreservation_sync_impl(ds,
862 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
863 0, tx);
864 ASSERT0(ds->ds_reserved);
865 }
866
241b5415 867 obj = ds->ds_object;
f1512ee6 868
1c27024e 869 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
241b5415
MA
870 if (ds->ds_feature_inuse[f]) {
871 dsl_dataset_deactivate_feature(obj, f, tx);
872 ds->ds_feature_inuse[f] = B_FALSE;
873 }
874 }
13fe0198 875
241b5415 876 dsl_scan_ds_destroyed(ds, tx);
13fe0198 877
d683ddbb 878 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
13fe0198
MA
879 /* This is a clone */
880 ASSERT(ds->ds_prev != NULL);
d683ddbb
JG
881 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
882 obj);
883 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
13fe0198
MA
884
885 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
d683ddbb 886 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
13fe0198
MA
887 dsl_dataset_remove_from_next_clones(ds->ds_prev,
888 obj, tx);
889 }
890
d683ddbb
JG
891 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
892 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
13fe0198
MA
893 }
894
13fe0198
MA
895 /*
896 * Destroy the deadlist. Unless it's a clone, the
a1d477c2
MA
897 * deadlist should be empty since the dataset has no snapshots.
898 * (If it's a clone, it's safe to ignore the deadlist contents
899 * since they are still referenced by the origin snapshot.)
13fe0198
MA
900 */
901 dsl_deadlist_close(&ds->ds_deadlist);
d683ddbb 902 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198 903 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 904 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
13fe0198 905
a1d477c2
MA
906 if (dsl_dataset_remap_deadlist_exists(ds))
907 dsl_dataset_destroy_remap_deadlist(ds, tx);
908
1c27024e 909 objset_t *os;
13fe0198
MA
910 VERIFY0(dmu_objset_from_ds(ds, &os));
911
fa86b5db 912 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
13fe0198
MA
913 old_synchronous_dataset_destroy(ds, tx);
914 } else {
915 /*
916 * Move the bptree into the pool's list of trees to
917 * clean up and update space accounting information.
918 */
919 uint64_t used, comp, uncomp;
920
921 zil_destroy_sync(dmu_objset_zil(os), tx);
922
fa86b5db
MA
923 if (!spa_feature_is_active(dp->dp_spa,
924 SPA_FEATURE_ASYNC_DESTROY)) {
2696dfaf 925 dsl_scan_t *scn = dp->dp_scan;
fa86b5db
MA
926 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
927 tx);
13fe0198
MA
928 dp->dp_bptree_obj = bptree_alloc(mos, tx);
929 VERIFY0(zap_add(mos,
930 DMU_POOL_DIRECTORY_OBJECT,
931 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
932 &dp->dp_bptree_obj, tx));
2696dfaf
GW
933 ASSERT(!scn->scn_async_destroying);
934 scn->scn_async_destroying = B_TRUE;
13fe0198
MA
935 }
936
d683ddbb
JG
937 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
938 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
939 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
13fe0198
MA
940
941 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
d683ddbb 942 dsl_dataset_phys(ds)->ds_unique_bytes == used);
13fe0198 943
cc9bb3e5 944 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
13fe0198 945 bptree_add(mos, dp->dp_bptree_obj,
d683ddbb
JG
946 &dsl_dataset_phys(ds)->ds_bp,
947 dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198 948 used, comp, uncomp, tx);
cc9bb3e5 949 rrw_exit(&ds->ds_bp_rwlock, FTAG);
13fe0198
MA
950 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
951 -used, -comp, -uncomp, tx);
952 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
953 used, comp, uncomp, tx);
954 }
955
956 if (ds->ds_prev != NULL) {
957 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
958 VERIFY0(zap_remove_int(mos,
d683ddbb 959 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
13fe0198
MA
960 ds->ds_object, tx));
961 }
962 prevobj = ds->ds_prev->ds_object;
963 dsl_dataset_rele(ds->ds_prev, ds);
964 ds->ds_prev = NULL;
965 }
966
967 /*
968 * This must be done after the dsl_traverse(), because it will
969 * re-open the objset.
970 */
971 if (ds->ds_objset) {
972 dmu_objset_evict(ds->ds_objset);
973 ds->ds_objset = NULL;
974 }
975
976 /* Erase the link in the dir */
977 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
d683ddbb 978 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
13fe0198 979 ddobj = ds->ds_dir->dd_object;
d683ddbb
JG
980 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
981 VERIFY0(zap_destroy(mos,
982 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
13fe0198 983
da536844 984 if (ds->ds_bookmarks != 0) {
d683ddbb 985 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
da536844
MA
986 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
987 }
988
13fe0198
MA
989 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
990
d683ddbb
JG
991 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
992 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
993 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
13fe0198
MA
994 dsl_dir_rele(ds->ds_dir, ds);
995 ds->ds_dir = NULL;
fa86b5db 996 dmu_object_free_zapified(mos, obj, tx);
13fe0198
MA
997
998 dsl_dir_destroy_sync(ddobj, tx);
999
1000 if (rmorigin) {
1001 dsl_dataset_t *prev;
1002 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
1003 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
1004 dsl_dataset_rele(prev, FTAG);
1005 }
1006}
1007
d99a0153 1008void
13fe0198
MA
1009dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
1010{
1011 dsl_destroy_head_arg_t *ddha = arg;
1012 dsl_pool_t *dp = dmu_tx_pool(tx);
1013 dsl_dataset_t *ds;
1014
1015 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1016 dsl_destroy_head_sync_impl(ds, tx);
a0bd735a 1017 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
13fe0198
MA
1018 dsl_dataset_rele(ds, FTAG);
1019}
1020
1021static void
1022dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
1023{
1024 dsl_destroy_head_arg_t *ddha = arg;
1025 dsl_pool_t *dp = dmu_tx_pool(tx);
1026 dsl_dataset_t *ds;
1027
1028 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1029
1030 /* Mark it as inconsistent on-disk, in case we crash */
1031 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 1032 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
13fe0198
MA
1033
1034 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
1035 dsl_dataset_rele(ds, FTAG);
1036}
1037
1038int
1039dsl_destroy_head(const char *name)
1040{
1041 dsl_destroy_head_arg_t ddha;
1042 int error;
1043 spa_t *spa;
1044 boolean_t isenabled;
1045
1046#ifdef _KERNEL
1047 zfs_destroy_unmount_origin(name);
1048#endif
1049
1050 error = spa_open(name, &spa, FTAG);
1051 if (error != 0)
1052 return (error);
fa86b5db 1053 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
13fe0198
MA
1054 spa_close(spa, FTAG);
1055
1056 ddha.ddha_name = name;
1057
1058 if (!isenabled) {
1059 objset_t *os;
1060
1061 error = dsl_sync_task(name, dsl_destroy_head_check,
3d45fdd6 1062 dsl_destroy_head_begin_sync, &ddha,
d2734cce 1063 0, ZFS_SPACE_CHECK_DESTROY);
13fe0198
MA
1064 if (error != 0)
1065 return (error);
1066
1067 /*
1068 * Head deletion is processed in one txg on old pools;
1069 * remove the objects from open context so that the txg sync
1070 * is not too long.
1071 */
b5256303
TC
1072 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE,
1073 FTAG, &os);
13fe0198 1074 if (error == 0) {
13fe0198 1075 uint64_t prev_snap_txg =
d683ddbb
JG
1076 dsl_dataset_phys(dmu_objset_ds(os))->
1077 ds_prev_snap_txg;
1c27024e 1078 for (uint64_t obj = 0; error == 0;
13fe0198
MA
1079 error = dmu_object_next(os, &obj, FALSE,
1080 prev_snap_txg))
b663a23d 1081 (void) dmu_free_long_object(os, obj);
13fe0198
MA
1082 /* sync out all frees */
1083 txg_wait_synced(dmu_objset_pool(os), 0);
b5256303 1084 dmu_objset_disown(os, B_FALSE, FTAG);
13fe0198
MA
1085 }
1086 }
1087
1088 return (dsl_sync_task(name, dsl_destroy_head_check,
d2734cce 1089 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
13fe0198
MA
1090}
1091
1092/*
1093 * Note, this function is used as the callback for dmu_objset_find(). We
1094 * always return 0 so that we will continue to find and process
1095 * inconsistent datasets, even if we encounter an error trying to
1096 * process one of them.
1097 */
1098/* ARGSUSED */
1099int
1100dsl_destroy_inconsistent(const char *dsname, void *arg)
1101{
1102 objset_t *os;
1103
1104 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
47dfff3b
MA
1105 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1106
1107 /*
1108 * If the dataset is inconsistent because a resumable receive
1109 * has failed, then do not destroy it.
1110 */
1111 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1112 need_destroy = B_FALSE;
1113
13fe0198 1114 dmu_objset_rele(os, FTAG);
47dfff3b 1115 if (need_destroy)
13fe0198
MA
1116 (void) dsl_destroy_head(dsname);
1117 }
1118 return (0);
1119}
1120
1121
93ce2b4c 1122#if defined(_KERNEL)
13fe0198
MA
1123EXPORT_SYMBOL(dsl_destroy_head);
1124EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1125EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1126EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1127EXPORT_SYMBOL(dsl_destroy_inconsistent);
1128EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1129EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1130#endif