]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_destroy.c
Fletcher4: Incremental updates and ctx calculation
[mirror_zfs.git] / module / zfs / dsl_destroy.c
CommitLineData
13fe0198
MA
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
241b5415 23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
95fd54a1 24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
788eb90c 25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
a0bd735a 26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
13fe0198
MA
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dsl_userhold.h>
31#include <sys/dsl_dataset.h>
32#include <sys/dsl_synctask.h>
33#include <sys/dmu_tx.h>
34#include <sys/dsl_pool.h>
35#include <sys/dsl_dir.h>
36#include <sys/dmu_traverse.h>
37#include <sys/dsl_scan.h>
38#include <sys/dmu_objset.h>
39#include <sys/zap.h>
40#include <sys/zfeature.h>
41#include <sys/zfs_ioctl.h>
42#include <sys/dsl_deleg.h>
fa86b5db 43#include <sys/dmu_impl.h>
a0bd735a 44#include <sys/zvol.h>
13fe0198
MA
45
46typedef struct dmu_snapshots_destroy_arg {
47 nvlist_t *dsda_snaps;
48 nvlist_t *dsda_successful_snaps;
49 boolean_t dsda_defer;
50 nvlist_t *dsda_errlist;
51} dmu_snapshots_destroy_arg_t;
52
19580676 53int
13fe0198
MA
54dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
55{
0c66c32d 56 if (!ds->ds_is_snapshot)
2e528b49 57 return (SET_ERROR(EINVAL));
13fe0198
MA
58
59 if (dsl_dataset_long_held(ds))
2e528b49 60 return (SET_ERROR(EBUSY));
13fe0198
MA
61
62 /*
63 * Only allow deferred destroy on pools that support it.
64 * NOTE: deferred destroy is only supported on snapshots.
65 */
66 if (defer) {
67 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
68 SPA_VERSION_USERREFS)
2e528b49 69 return (SET_ERROR(ENOTSUP));
13fe0198
MA
70 return (0);
71 }
72
73 /*
74 * If this snapshot has an elevated user reference count,
75 * we can't destroy it yet.
76 */
77 if (ds->ds_userrefs > 0)
2e528b49 78 return (SET_ERROR(EBUSY));
13fe0198
MA
79
80 /*
81 * Can't delete a branch point.
82 */
d683ddbb 83 if (dsl_dataset_phys(ds)->ds_num_children > 1)
2e528b49 84 return (SET_ERROR(EEXIST));
13fe0198
MA
85
86 return (0);
87}
88
89static int
90dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
91{
92 dmu_snapshots_destroy_arg_t *dsda = arg;
93 dsl_pool_t *dp = dmu_tx_pool(tx);
94 nvpair_t *pair;
95 int error = 0;
96
97 if (!dmu_tx_is_syncing(tx))
98 return (0);
99
100 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
101 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
102 dsl_dataset_t *ds;
103
104 error = dsl_dataset_hold(dp, nvpair_name(pair),
105 FTAG, &ds);
106
107 /*
108 * If the snapshot does not exist, silently ignore it
109 * (it's "already destroyed").
110 */
111 if (error == ENOENT)
112 continue;
113
114 if (error == 0) {
115 error = dsl_destroy_snapshot_check_impl(ds,
116 dsda->dsda_defer);
117 dsl_dataset_rele(ds, FTAG);
118 }
119
120 if (error == 0) {
121 fnvlist_add_boolean(dsda->dsda_successful_snaps,
122 nvpair_name(pair));
123 } else {
124 fnvlist_add_int32(dsda->dsda_errlist,
125 nvpair_name(pair), error);
126 }
127 }
128
129 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
130 if (pair != NULL)
131 return (fnvpair_value_int32(pair));
95fd54a1 132
13fe0198
MA
133 return (0);
134}
135
136struct process_old_arg {
137 dsl_dataset_t *ds;
138 dsl_dataset_t *ds_prev;
139 boolean_t after_branch_point;
140 zio_t *pio;
141 uint64_t used, comp, uncomp;
142};
143
144static int
145process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
146{
147 struct process_old_arg *poa = arg;
148 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
149
b0bc7a84
MG
150 ASSERT(!BP_IS_HOLE(bp));
151
d683ddbb 152 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
13fe0198
MA
153 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
154 if (poa->ds_prev && !poa->after_branch_point &&
155 bp->blk_birth >
d683ddbb
JG
156 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
157 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
13fe0198
MA
158 bp_get_dsize_sync(dp->dp_spa, bp);
159 }
160 } else {
161 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
162 poa->comp += BP_GET_PSIZE(bp);
163 poa->uncomp += BP_GET_UCSIZE(bp);
164 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
165 }
166 return (0);
167}
168
169static void
170process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
171 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
172{
173 struct process_old_arg poa = { 0 };
174 dsl_pool_t *dp = ds->ds_dir->dd_pool;
175 objset_t *mos = dp->dp_meta_objset;
176 uint64_t deadlist_obj;
177
178 ASSERT(ds->ds_deadlist.dl_oldfmt);
179 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
180
181 poa.ds = ds;
182 poa.ds_prev = ds_prev;
183 poa.after_branch_point = after_branch_point;
184 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
185 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
186 process_old_cb, &poa, tx));
187 VERIFY0(zio_wait(poa.pio));
d683ddbb 188 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
13fe0198
MA
189
190 /* change snapused */
191 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
192 -poa.used, -poa.comp, -poa.uncomp, tx);
193
194 /* swap next's deadlist to our deadlist */
195 dsl_deadlist_close(&ds->ds_deadlist);
196 dsl_deadlist_close(&ds_next->ds_deadlist);
d683ddbb
JG
197 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
198 dsl_dataset_phys(ds)->ds_deadlist_obj =
199 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
200 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
201 dsl_deadlist_open(&ds->ds_deadlist, mos,
202 dsl_dataset_phys(ds)->ds_deadlist_obj);
13fe0198 203 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
d683ddbb 204 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
13fe0198
MA
205}
206
207static void
208dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
209{
210 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
77831e17
KK
211 zap_cursor_t *zc;
212 zap_attribute_t *za;
13fe0198
MA
213
214 /*
215 * If it is the old version, dd_clones doesn't exist so we can't
216 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
217 * doesn't matter.
218 */
d683ddbb 219 if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
13fe0198
MA
220 return;
221
79c76d5b
BB
222 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
223 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
77831e17 224
d683ddbb 225 for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
77831e17
KK
226 zap_cursor_retrieve(zc, za) == 0;
227 zap_cursor_advance(zc)) {
13fe0198
MA
228 dsl_dataset_t *clone;
229
230 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
77831e17 231 za->za_first_integer, FTAG, &clone));
13fe0198
MA
232 if (clone->ds_dir->dd_origin_txg > mintxg) {
233 dsl_deadlist_remove_key(&clone->ds_deadlist,
234 mintxg, tx);
235 dsl_dataset_remove_clones_key(clone, mintxg, tx);
236 }
237 dsl_dataset_rele(clone, FTAG);
238 }
77831e17
KK
239 zap_cursor_fini(zc);
240
241 kmem_free(za, sizeof (zap_attribute_t));
242 kmem_free(zc, sizeof (zap_cursor_t));
13fe0198
MA
243}
244
245void
246dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
247{
241b5415 248 spa_feature_t f;
13fe0198
MA
249 int after_branch_point = FALSE;
250 dsl_pool_t *dp = ds->ds_dir->dd_pool;
251 objset_t *mos = dp->dp_meta_objset;
252 dsl_dataset_t *ds_prev = NULL;
253 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
254 dsl_dataset_t *ds_next, *ds_head, *hds;
255
256
257 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
d683ddbb 258 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
13fe0198
MA
259 ASSERT(refcount_is_zero(&ds->ds_longholds));
260
261 if (defer &&
d683ddbb
JG
262 (ds->ds_userrefs > 0 ||
263 dsl_dataset_phys(ds)->ds_num_children > 1)) {
13fe0198
MA
264 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
265 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 266 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
13fe0198
MA
267 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
268 return;
269 }
270
d683ddbb 271 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
13fe0198
MA
272
273 /* We need to log before removing it from the namespace. */
274 spa_history_log_internal_ds(ds, "destroy", tx, "");
275
276 dsl_scan_ds_destroyed(ds, tx);
277
278 obj = ds->ds_object;
279
241b5415
MA
280 for (f = 0; f < SPA_FEATURES; f++) {
281 if (ds->ds_feature_inuse[f]) {
282 dsl_dataset_deactivate_feature(obj, f, tx);
283 ds->ds_feature_inuse[f] = B_FALSE;
284 }
f1512ee6 285 }
d683ddbb 286 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
13fe0198
MA
287 ASSERT3P(ds->ds_prev, ==, NULL);
288 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 289 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
13fe0198 290 after_branch_point =
d683ddbb 291 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
13fe0198
MA
292
293 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
294 if (after_branch_point &&
d683ddbb 295 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
13fe0198 296 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
d683ddbb 297 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
13fe0198 298 VERIFY0(zap_add_int(mos,
d683ddbb
JG
299 dsl_dataset_phys(ds_prev)->
300 ds_next_clones_obj,
301 dsl_dataset_phys(ds)->ds_next_snap_obj,
302 tx));
13fe0198
MA
303 }
304 }
305 if (!after_branch_point) {
d683ddbb
JG
306 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
307 dsl_dataset_phys(ds)->ds_next_snap_obj;
13fe0198
MA
308 }
309 }
310
311 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
312 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
313 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
13fe0198 314
d683ddbb 315 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
13fe0198
MA
316
317 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
d683ddbb
JG
318 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
319 dsl_dataset_phys(ds)->ds_prev_snap_obj;
320 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
321 dsl_dataset_phys(ds)->ds_prev_snap_txg;
322 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
323 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
13fe0198
MA
324
325 if (ds_next->ds_deadlist.dl_oldfmt) {
326 process_old_deadlist(ds, ds_prev, ds_next,
327 after_branch_point, tx);
328 } else {
329 /* Adjust prev's unique space. */
330 if (ds_prev && !after_branch_point) {
331 dsl_deadlist_space_range(&ds_next->ds_deadlist,
d683ddbb
JG
332 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
333 dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198 334 &used, &comp, &uncomp);
d683ddbb 335 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
13fe0198
MA
336 }
337
338 /* Adjust snapused. */
339 dsl_deadlist_space_range(&ds_next->ds_deadlist,
d683ddbb 340 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
13fe0198
MA
341 &used, &comp, &uncomp);
342 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
343 -used, -comp, -uncomp, tx);
344
345 /* Move blocks to be freed to pool's free list. */
346 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
d683ddbb 347 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198
MA
348 tx);
349 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
350 DD_USED_HEAD, used, comp, uncomp, tx);
351
352 /* Merge our deadlist into next's and free it. */
353 dsl_deadlist_merge(&ds_next->ds_deadlist,
d683ddbb 354 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198
MA
355 }
356 dsl_deadlist_close(&ds->ds_deadlist);
d683ddbb 357 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198 358 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 359 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
13fe0198
MA
360
361 /* Collapse range in clone heads */
362 dsl_dataset_remove_clones_key(ds,
d683ddbb 363 dsl_dataset_phys(ds)->ds_creation_txg, tx);
13fe0198 364
0c66c32d 365 if (ds_next->ds_is_snapshot) {
13fe0198
MA
366 dsl_dataset_t *ds_nextnext;
367
368 /*
369 * Update next's unique to include blocks which
370 * were previously shared by only this snapshot
371 * and it. Those blocks will be born after the
372 * prev snap and before this snap, and will have
373 * died after the next snap and before the one
374 * after that (ie. be on the snap after next's
375 * deadlist).
376 */
377 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb
JG
378 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
379 FTAG, &ds_nextnext));
13fe0198 380 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
d683ddbb
JG
381 dsl_dataset_phys(ds)->ds_prev_snap_txg,
382 dsl_dataset_phys(ds)->ds_creation_txg,
13fe0198 383 &used, &comp, &uncomp);
d683ddbb 384 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
13fe0198
MA
385 dsl_dataset_rele(ds_nextnext, FTAG);
386 ASSERT3P(ds_next->ds_prev, ==, NULL);
387
388 /* Collapse range in this head. */
389 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 390 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
13fe0198 391 dsl_deadlist_remove_key(&hds->ds_deadlist,
d683ddbb 392 dsl_dataset_phys(ds)->ds_creation_txg, tx);
13fe0198
MA
393 dsl_dataset_rele(hds, FTAG);
394
395 } else {
396 ASSERT3P(ds_next->ds_prev, ==, ds);
397 dsl_dataset_rele(ds_next->ds_prev, ds_next);
398 ds_next->ds_prev = NULL;
399 if (ds_prev) {
400 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 401 dsl_dataset_phys(ds)->ds_prev_snap_obj,
13fe0198
MA
402 ds_next, &ds_next->ds_prev));
403 }
404
405 dsl_dataset_recalc_head_uniq(ds_next);
406
407 /*
408 * Reduce the amount of our unconsumed refreservation
409 * being charged to our parent by the amount of
410 * new unique data we have gained.
411 */
412 if (old_unique < ds_next->ds_reserved) {
413 int64_t mrsdelta;
414 uint64_t new_unique =
d683ddbb 415 dsl_dataset_phys(ds_next)->ds_unique_bytes;
13fe0198
MA
416
417 ASSERT(old_unique <= new_unique);
418 mrsdelta = MIN(new_unique - old_unique,
419 ds_next->ds_reserved - old_unique);
420 dsl_dir_diduse_space(ds->ds_dir,
421 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
422 }
423 }
424 dsl_dataset_rele(ds_next, FTAG);
425
426 /*
427 * This must be done after the dsl_traverse(), because it will
428 * re-open the objset.
429 */
430 if (ds->ds_objset) {
431 dmu_objset_evict(ds->ds_objset);
432 ds->ds_objset = NULL;
433 }
434
435 /* remove from snapshot namespace */
d683ddbb 436 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
13fe0198 437 VERIFY0(dsl_dataset_hold_obj(dp,
d683ddbb 438 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
13fe0198
MA
439 VERIFY0(dsl_dataset_get_snapname(ds));
440#ifdef ZFS_DEBUG
441 {
442 uint64_t val;
a0bd735a 443 int err;
13fe0198
MA
444
445 err = dsl_dataset_snap_lookup(ds_head,
446 ds->ds_snapname, &val);
447 ASSERT0(err);
448 ASSERT3U(val, ==, obj);
449 }
450#endif
788eb90c 451 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
13fe0198
MA
452 dsl_dataset_rele(ds_head, FTAG);
453
454 if (ds_prev != NULL)
455 dsl_dataset_rele(ds_prev, FTAG);
456
457 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
458
d683ddbb 459 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
13fe0198
MA
460 ASSERTV(uint64_t count);
461 ASSERT0(zap_count(mos,
d683ddbb
JG
462 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
463 count == 0);
13fe0198 464 VERIFY0(dmu_object_free(mos,
d683ddbb 465 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
13fe0198 466 }
d683ddbb
JG
467 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
468 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
469 tx));
470 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
471 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
472 tx));
13fe0198
MA
473 dsl_dir_rele(ds->ds_dir, ds);
474 ds->ds_dir = NULL;
fa86b5db 475 dmu_object_free_zapified(mos, obj, tx);
13fe0198
MA
476}
477
478static void
479dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
480{
481 dmu_snapshots_destroy_arg_t *dsda = arg;
482 dsl_pool_t *dp = dmu_tx_pool(tx);
483 nvpair_t *pair;
484
485 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
486 pair != NULL;
487 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
488 dsl_dataset_t *ds;
489
490 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
491
492 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
a0bd735a 493 zvol_remove_minors(dp->dp_spa, nvpair_name(pair), B_TRUE);
13fe0198
MA
494 dsl_dataset_rele(ds, FTAG);
495 }
496}
497
498/*
499 * The semantics of this function are described in the comment above
500 * lzc_destroy_snaps(). To summarize:
501 *
502 * The snapshots must all be in the same pool.
503 *
504 * Snapshots that don't exist will be silently ignored (considered to be
505 * "already deleted").
506 *
507 * On success, all snaps will be destroyed and this will return 0.
508 * On failure, no snaps will be destroyed, the errlist will be filled in,
509 * and this will return an errno.
510 */
511int
512dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
513 nvlist_t *errlist)
514{
515 dmu_snapshots_destroy_arg_t dsda;
516 int error;
517 nvpair_t *pair;
518
519 pair = nvlist_next_nvpair(snaps, NULL);
520 if (pair == NULL)
521 return (0);
522
523 dsda.dsda_snaps = snaps;
d1d7e268 524 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps,
79c76d5b 525 NV_UNIQUE_NAME, KM_SLEEP));
13fe0198
MA
526 dsda.dsda_defer = defer;
527 dsda.dsda_errlist = errlist;
528
529 error = dsl_sync_task(nvpair_name(pair),
530 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
3d45fdd6 531 &dsda, 0, ZFS_SPACE_CHECK_NONE);
13fe0198
MA
532 fnvlist_free(dsda.dsda_successful_snaps);
533
534 return (error);
535}
536
537int
538dsl_destroy_snapshot(const char *name, boolean_t defer)
539{
540 int error;
79c76d5b
BB
541 nvlist_t *nvl = fnvlist_alloc();
542 nvlist_t *errlist = fnvlist_alloc();
13fe0198
MA
543
544 fnvlist_add_boolean(nvl, name);
545 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
546 fnvlist_free(errlist);
547 fnvlist_free(nvl);
548 return (error);
549}
550
551struct killarg {
552 dsl_dataset_t *ds;
553 dmu_tx_t *tx;
554};
555
556/* ARGSUSED */
557static int
558kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
5dbd68a3 559 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
13fe0198
MA
560{
561 struct killarg *ka = arg;
562 dmu_tx_t *tx = ka->tx;
563
fcff0f35 564 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
13fe0198
MA
565 return (0);
566
567 if (zb->zb_level == ZB_ZIL_LEVEL) {
568 ASSERT(zilog != NULL);
569 /*
570 * It's a block in the intent log. It has no
571 * accounting, so just free it.
572 */
573 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
574 } else {
575 ASSERT(zilog == NULL);
d683ddbb
JG
576 ASSERT3U(bp->blk_birth, >,
577 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
13fe0198
MA
578 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
579 }
580
581 return (0);
582}
583
584static void
585old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
586{
587 struct killarg ka;
588
589 /*
590 * Free everything that we point to (that's born after
591 * the previous snapshot, if we are a clone)
592 *
593 * NB: this should be very quick, because we already
594 * freed all the objects in open context.
595 */
596 ka.ds = ds;
597 ka.tx = tx;
598 VERIFY0(traverse_dataset(ds,
d683ddbb 599 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
13fe0198 600 kill_blkptr, &ka));
d683ddbb
JG
601 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
602 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
13fe0198
MA
603}
604
605typedef struct dsl_destroy_head_arg {
606 const char *ddha_name;
607} dsl_destroy_head_arg_t;
608
609int
610dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
611{
612 int error;
613 uint64_t count;
614 objset_t *mos;
615
0c66c32d
JG
616 ASSERT(!ds->ds_is_snapshot);
617 if (ds->ds_is_snapshot)
2e528b49 618 return (SET_ERROR(EINVAL));
13fe0198
MA
619
620 if (refcount_count(&ds->ds_longholds) != expected_holds)
2e528b49 621 return (SET_ERROR(EBUSY));
13fe0198
MA
622
623 mos = ds->ds_dir->dd_pool->dp_meta_objset;
624
625 /*
626 * Can't delete a head dataset if there are snapshots of it.
627 * (Except if the only snapshots are from the branch we cloned
628 * from.)
629 */
630 if (ds->ds_prev != NULL &&
d683ddbb 631 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
2e528b49 632 return (SET_ERROR(EBUSY));
13fe0198
MA
633
634 /*
635 * Can't delete if there are children of this fs.
636 */
637 error = zap_count(mos,
d683ddbb 638 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
13fe0198
MA
639 if (error != 0)
640 return (error);
641 if (count != 0)
2e528b49 642 return (SET_ERROR(EEXIST));
13fe0198
MA
643
644 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
d683ddbb 645 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
13fe0198
MA
646 ds->ds_prev->ds_userrefs == 0) {
647 /* We need to remove the origin snapshot as well. */
648 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
2e528b49 649 return (SET_ERROR(EBUSY));
13fe0198
MA
650 }
651 return (0);
652}
653
654static int
655dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
656{
657 dsl_destroy_head_arg_t *ddha = arg;
658 dsl_pool_t *dp = dmu_tx_pool(tx);
659 dsl_dataset_t *ds;
660 int error;
661
662 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
663 if (error != 0)
664 return (error);
665
666 error = dsl_destroy_head_check_impl(ds, 0);
667 dsl_dataset_rele(ds, FTAG);
668 return (error);
669}
670
671static void
672dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
673{
674 dsl_dir_t *dd;
675 dsl_pool_t *dp = dmu_tx_pool(tx);
676 objset_t *mos = dp->dp_meta_objset;
677 dd_used_t t;
678
679 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
680
681 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
682
d683ddbb 683 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
13fe0198 684
788eb90c
JJ
685 /*
686 * Decrement the filesystem count for all parent filesystems.
687 *
688 * When we receive an incremental stream into a filesystem that already
689 * exists, a temporary clone is created. We never count this temporary
690 * clone, whose name begins with a '%'.
691 */
692 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
693 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
694 DD_FIELD_FILESYSTEM_COUNT, tx);
695
13fe0198
MA
696 /*
697 * Remove our reservation. The impl() routine avoids setting the
698 * actual property, which would require the (already destroyed) ds.
699 */
700 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
701
d683ddbb
JG
702 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
703 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
13fe0198 704 for (t = 0; t < DD_USED_NUM; t++)
d683ddbb 705 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
13fe0198 706
d683ddbb
JG
707 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
708 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
709 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
13fe0198 710 VERIFY0(zap_remove(mos,
d683ddbb
JG
711 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
712 dd->dd_myname, tx));
13fe0198
MA
713
714 dsl_dir_rele(dd, FTAG);
fa86b5db 715 dmu_object_free_zapified(mos, ddobj, tx);
13fe0198
MA
716}
717
718void
719dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
720{
721 dsl_pool_t *dp = dmu_tx_pool(tx);
241b5415 722 spa_feature_t f;
13fe0198
MA
723 objset_t *mos = dp->dp_meta_objset;
724 uint64_t obj, ddobj, prevobj = 0;
725 boolean_t rmorigin;
13fe0198
MA
726 objset_t *os;
727
d683ddbb 728 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
13fe0198 729 ASSERT(ds->ds_prev == NULL ||
d683ddbb
JG
730 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
731 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
13fe0198
MA
732 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
733
734 /* We need to log before removing it from the namespace. */
735 spa_history_log_internal_ds(ds, "destroy", tx, "");
736
737 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
738 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
d683ddbb 739 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
13fe0198
MA
740 ds->ds_prev->ds_userrefs == 0);
741
9b67f605 742 /* Remove our reservation. */
13fe0198
MA
743 if (ds->ds_reserved != 0) {
744 dsl_dataset_set_refreservation_sync_impl(ds,
745 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
746 0, tx);
747 ASSERT0(ds->ds_reserved);
748 }
749
241b5415 750 obj = ds->ds_object;
f1512ee6 751
241b5415
MA
752 for (f = 0; f < SPA_FEATURES; f++) {
753 if (ds->ds_feature_inuse[f]) {
754 dsl_dataset_deactivate_feature(obj, f, tx);
755 ds->ds_feature_inuse[f] = B_FALSE;
756 }
757 }
13fe0198 758
241b5415 759 dsl_scan_ds_destroyed(ds, tx);
13fe0198 760
d683ddbb 761 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
13fe0198
MA
762 /* This is a clone */
763 ASSERT(ds->ds_prev != NULL);
d683ddbb
JG
764 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
765 obj);
766 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
13fe0198
MA
767
768 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
d683ddbb 769 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
13fe0198
MA
770 dsl_dataset_remove_from_next_clones(ds->ds_prev,
771 obj, tx);
772 }
773
d683ddbb
JG
774 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
775 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
13fe0198
MA
776 }
777
13fe0198
MA
778 /*
779 * Destroy the deadlist. Unless it's a clone, the
780 * deadlist should be empty. (If it's a clone, it's
781 * safe to ignore the deadlist contents.)
782 */
783 dsl_deadlist_close(&ds->ds_deadlist);
d683ddbb 784 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
13fe0198 785 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 786 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
13fe0198
MA
787
788 VERIFY0(dmu_objset_from_ds(ds, &os));
789
fa86b5db 790 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
13fe0198
MA
791 old_synchronous_dataset_destroy(ds, tx);
792 } else {
793 /*
794 * Move the bptree into the pool's list of trees to
795 * clean up and update space accounting information.
796 */
797 uint64_t used, comp, uncomp;
798
799 zil_destroy_sync(dmu_objset_zil(os), tx);
800
fa86b5db
MA
801 if (!spa_feature_is_active(dp->dp_spa,
802 SPA_FEATURE_ASYNC_DESTROY)) {
2696dfaf 803 dsl_scan_t *scn = dp->dp_scan;
fa86b5db
MA
804 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
805 tx);
13fe0198
MA
806 dp->dp_bptree_obj = bptree_alloc(mos, tx);
807 VERIFY0(zap_add(mos,
808 DMU_POOL_DIRECTORY_OBJECT,
809 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
810 &dp->dp_bptree_obj, tx));
2696dfaf
GW
811 ASSERT(!scn->scn_async_destroying);
812 scn->scn_async_destroying = B_TRUE;
13fe0198
MA
813 }
814
d683ddbb
JG
815 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
816 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
817 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
13fe0198
MA
818
819 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
d683ddbb 820 dsl_dataset_phys(ds)->ds_unique_bytes == used);
13fe0198
MA
821
822 bptree_add(mos, dp->dp_bptree_obj,
d683ddbb
JG
823 &dsl_dataset_phys(ds)->ds_bp,
824 dsl_dataset_phys(ds)->ds_prev_snap_txg,
13fe0198
MA
825 used, comp, uncomp, tx);
826 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
827 -used, -comp, -uncomp, tx);
828 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
829 used, comp, uncomp, tx);
830 }
831
832 if (ds->ds_prev != NULL) {
833 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
834 VERIFY0(zap_remove_int(mos,
d683ddbb 835 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
13fe0198
MA
836 ds->ds_object, tx));
837 }
838 prevobj = ds->ds_prev->ds_object;
839 dsl_dataset_rele(ds->ds_prev, ds);
840 ds->ds_prev = NULL;
841 }
842
843 /*
844 * This must be done after the dsl_traverse(), because it will
845 * re-open the objset.
846 */
847 if (ds->ds_objset) {
848 dmu_objset_evict(ds->ds_objset);
849 ds->ds_objset = NULL;
850 }
851
852 /* Erase the link in the dir */
853 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
d683ddbb 854 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
13fe0198 855 ddobj = ds->ds_dir->dd_object;
d683ddbb
JG
856 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
857 VERIFY0(zap_destroy(mos,
858 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
13fe0198 859
da536844 860 if (ds->ds_bookmarks != 0) {
d683ddbb 861 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
da536844
MA
862 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
863 }
864
13fe0198
MA
865 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
866
d683ddbb
JG
867 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
868 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
869 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
13fe0198
MA
870 dsl_dir_rele(ds->ds_dir, ds);
871 ds->ds_dir = NULL;
fa86b5db 872 dmu_object_free_zapified(mos, obj, tx);
13fe0198
MA
873
874 dsl_dir_destroy_sync(ddobj, tx);
875
876 if (rmorigin) {
877 dsl_dataset_t *prev;
878 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
879 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
880 dsl_dataset_rele(prev, FTAG);
881 }
882}
883
884static void
885dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
886{
887 dsl_destroy_head_arg_t *ddha = arg;
888 dsl_pool_t *dp = dmu_tx_pool(tx);
889 dsl_dataset_t *ds;
890
891 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
892 dsl_destroy_head_sync_impl(ds, tx);
a0bd735a 893 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
13fe0198
MA
894 dsl_dataset_rele(ds, FTAG);
895}
896
897static void
898dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
899{
900 dsl_destroy_head_arg_t *ddha = arg;
901 dsl_pool_t *dp = dmu_tx_pool(tx);
902 dsl_dataset_t *ds;
903
904 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
905
906 /* Mark it as inconsistent on-disk, in case we crash */
907 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 908 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
13fe0198
MA
909
910 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
911 dsl_dataset_rele(ds, FTAG);
912}
913
914int
915dsl_destroy_head(const char *name)
916{
917 dsl_destroy_head_arg_t ddha;
918 int error;
919 spa_t *spa;
920 boolean_t isenabled;
921
922#ifdef _KERNEL
923 zfs_destroy_unmount_origin(name);
924#endif
925
926 error = spa_open(name, &spa, FTAG);
927 if (error != 0)
928 return (error);
fa86b5db 929 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
13fe0198
MA
930 spa_close(spa, FTAG);
931
932 ddha.ddha_name = name;
933
934 if (!isenabled) {
935 objset_t *os;
936
937 error = dsl_sync_task(name, dsl_destroy_head_check,
3d45fdd6
MA
938 dsl_destroy_head_begin_sync, &ddha,
939 0, ZFS_SPACE_CHECK_NONE);
13fe0198
MA
940 if (error != 0)
941 return (error);
942
943 /*
944 * Head deletion is processed in one txg on old pools;
945 * remove the objects from open context so that the txg sync
946 * is not too long.
947 */
948 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
949 if (error == 0) {
950 uint64_t obj;
951 uint64_t prev_snap_txg =
d683ddbb
JG
952 dsl_dataset_phys(dmu_objset_ds(os))->
953 ds_prev_snap_txg;
13fe0198
MA
954 for (obj = 0; error == 0;
955 error = dmu_object_next(os, &obj, FALSE,
956 prev_snap_txg))
b663a23d 957 (void) dmu_free_long_object(os, obj);
13fe0198
MA
958 /* sync out all frees */
959 txg_wait_synced(dmu_objset_pool(os), 0);
960 dmu_objset_disown(os, FTAG);
961 }
962 }
963
964 return (dsl_sync_task(name, dsl_destroy_head_check,
3d45fdd6 965 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
13fe0198
MA
966}
967
968/*
969 * Note, this function is used as the callback for dmu_objset_find(). We
970 * always return 0 so that we will continue to find and process
971 * inconsistent datasets, even if we encounter an error trying to
972 * process one of them.
973 */
974/* ARGSUSED */
975int
976dsl_destroy_inconsistent(const char *dsname, void *arg)
977{
978 objset_t *os;
979
980 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
47dfff3b
MA
981 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
982
983 /*
984 * If the dataset is inconsistent because a resumable receive
985 * has failed, then do not destroy it.
986 */
987 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
988 need_destroy = B_FALSE;
989
13fe0198 990 dmu_objset_rele(os, FTAG);
47dfff3b 991 if (need_destroy)
13fe0198
MA
992 (void) dsl_destroy_head(dsname);
993 }
994 return (0);
995}
996
997
998#if defined(_KERNEL) && defined(HAVE_SPL)
999EXPORT_SYMBOL(dsl_destroy_head);
1000EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1001EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1002EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1003EXPORT_SYMBOL(dsl_destroy_inconsistent);
1004EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1005EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1006#endif