]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - zfs/module/zfs/dsl_destroy.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / zfs / module / zfs / dsl_destroy.c
CommitLineData
7bdf406d
TG
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
0bd31011 23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
7bdf406d
TG
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dsl_userhold.h>
31#include <sys/dsl_dataset.h>
32#include <sys/dsl_synctask.h>
33#include <sys/dmu_tx.h>
34#include <sys/dsl_pool.h>
35#include <sys/dsl_dir.h>
36#include <sys/dmu_traverse.h>
37#include <sys/dsl_scan.h>
38#include <sys/dmu_objset.h>
39#include <sys/zap.h>
40#include <sys/zfeature.h>
41#include <sys/zfs_ioctl.h>
42#include <sys/dsl_deleg.h>
43#include <sys/dmu_impl.h>
44#include <sys/zvol.h>
45
46typedef struct dmu_snapshots_destroy_arg {
47 nvlist_t *dsda_snaps;
48 nvlist_t *dsda_successful_snaps;
49 boolean_t dsda_defer;
50 nvlist_t *dsda_errlist;
51} dmu_snapshots_destroy_arg_t;
52
53int
54dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
55{
56 if (!ds->ds_is_snapshot)
57 return (SET_ERROR(EINVAL));
58
59 if (dsl_dataset_long_held(ds))
60 return (SET_ERROR(EBUSY));
61
62 /*
63 * Only allow deferred destroy on pools that support it.
64 * NOTE: deferred destroy is only supported on snapshots.
65 */
66 if (defer) {
67 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
68 SPA_VERSION_USERREFS)
69 return (SET_ERROR(ENOTSUP));
70 return (0);
71 }
72
73 /*
74 * If this snapshot has an elevated user reference count,
75 * we can't destroy it yet.
76 */
77 if (ds->ds_userrefs > 0)
78 return (SET_ERROR(EBUSY));
79
80 /*
81 * Can't delete a branch point.
82 */
83 if (dsl_dataset_phys(ds)->ds_num_children > 1)
84 return (SET_ERROR(EEXIST));
85
86 return (0);
87}
88
89static int
90dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
91{
92 dmu_snapshots_destroy_arg_t *dsda = arg;
93 dsl_pool_t *dp = dmu_tx_pool(tx);
94 nvpair_t *pair;
95 int error = 0;
96
97 if (!dmu_tx_is_syncing(tx))
98 return (0);
99
100 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
101 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
102 dsl_dataset_t *ds;
103
104 error = dsl_dataset_hold(dp, nvpair_name(pair),
105 FTAG, &ds);
106
107 /*
108 * If the snapshot does not exist, silently ignore it
109 * (it's "already destroyed").
110 */
111 if (error == ENOENT)
112 continue;
113
114 if (error == 0) {
115 error = dsl_destroy_snapshot_check_impl(ds,
116 dsda->dsda_defer);
117 dsl_dataset_rele(ds, FTAG);
118 }
119
120 if (error == 0) {
121 fnvlist_add_boolean(dsda->dsda_successful_snaps,
122 nvpair_name(pair));
123 } else {
124 fnvlist_add_int32(dsda->dsda_errlist,
125 nvpair_name(pair), error);
126 }
127 }
128
129 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
130 if (pair != NULL)
131 return (fnvpair_value_int32(pair));
132
133 return (0);
134}
135
136struct process_old_arg {
137 dsl_dataset_t *ds;
138 dsl_dataset_t *ds_prev;
139 boolean_t after_branch_point;
140 zio_t *pio;
141 uint64_t used, comp, uncomp;
142};
143
144static int
145process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
146{
147 struct process_old_arg *poa = arg;
148 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
149
150 ASSERT(!BP_IS_HOLE(bp));
151
152 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
153 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
154 if (poa->ds_prev && !poa->after_branch_point &&
155 bp->blk_birth >
156 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
157 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
158 bp_get_dsize_sync(dp->dp_spa, bp);
159 }
160 } else {
161 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
162 poa->comp += BP_GET_PSIZE(bp);
163 poa->uncomp += BP_GET_UCSIZE(bp);
164 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
165 }
166 return (0);
167}
168
169static void
170process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
171 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
172{
173 struct process_old_arg poa = { 0 };
174 dsl_pool_t *dp = ds->ds_dir->dd_pool;
175 objset_t *mos = dp->dp_meta_objset;
176 uint64_t deadlist_obj;
177
178 ASSERT(ds->ds_deadlist.dl_oldfmt);
179 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
180
181 poa.ds = ds;
182 poa.ds_prev = ds_prev;
183 poa.after_branch_point = after_branch_point;
184 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
185 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
186 process_old_cb, &poa, tx));
187 VERIFY0(zio_wait(poa.pio));
188 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
189
190 /* change snapused */
191 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
192 -poa.used, -poa.comp, -poa.uncomp, tx);
193
194 /* swap next's deadlist to our deadlist */
195 dsl_deadlist_close(&ds->ds_deadlist);
196 dsl_deadlist_close(&ds_next->ds_deadlist);
197 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
198 dsl_dataset_phys(ds)->ds_deadlist_obj =
199 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
200 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
201 dsl_deadlist_open(&ds->ds_deadlist, mos,
202 dsl_dataset_phys(ds)->ds_deadlist_obj);
203 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
204 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
205}
206
207static void
208dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
209{
210 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
211 zap_cursor_t *zc;
212 zap_attribute_t *za;
213
214 /*
215 * If it is the old version, dd_clones doesn't exist so we can't
216 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
217 * doesn't matter.
218 */
219 if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
220 return;
221
222 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
223 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
224
225 for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
226 zap_cursor_retrieve(zc, za) == 0;
227 zap_cursor_advance(zc)) {
228 dsl_dataset_t *clone;
229
230 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
231 za->za_first_integer, FTAG, &clone));
232 if (clone->ds_dir->dd_origin_txg > mintxg) {
233 dsl_deadlist_remove_key(&clone->ds_deadlist,
234 mintxg, tx);
235 dsl_dataset_remove_clones_key(clone, mintxg, tx);
236 }
237 dsl_dataset_rele(clone, FTAG);
238 }
239 zap_cursor_fini(zc);
240
241 kmem_free(za, sizeof (zap_attribute_t));
242 kmem_free(zc, sizeof (zap_cursor_t));
243}
244
245void
246dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
247{
248 int after_branch_point = FALSE;
249 dsl_pool_t *dp = ds->ds_dir->dd_pool;
250 objset_t *mos = dp->dp_meta_objset;
251 dsl_dataset_t *ds_prev = NULL;
252 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
253 dsl_dataset_t *ds_next, *ds_head, *hds;
254
255
256 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
257 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
258 ASSERT(refcount_is_zero(&ds->ds_longholds));
259
260 if (defer &&
261 (ds->ds_userrefs > 0 ||
262 dsl_dataset_phys(ds)->ds_num_children > 1)) {
263 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
264 dmu_buf_will_dirty(ds->ds_dbuf, tx);
265 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
266 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
267 return;
268 }
269
270 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
271
272 /* We need to log before removing it from the namespace. */
273 spa_history_log_internal_ds(ds, "destroy", tx, "");
274
275 dsl_scan_ds_destroyed(ds, tx);
276
277 obj = ds->ds_object;
278
0bd31011
TG
279 if (ds->ds_large_blocks) {
280 ASSERT0(zap_contains(mos, obj, DS_FIELD_LARGE_BLOCKS));
281 spa_feature_decr(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS, tx);
7bdf406d
TG
282 }
283 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
284 ASSERT3P(ds->ds_prev, ==, NULL);
285 VERIFY0(dsl_dataset_hold_obj(dp,
286 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
287 after_branch_point =
288 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
289
290 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
291 if (after_branch_point &&
292 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
293 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
294 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
295 VERIFY0(zap_add_int(mos,
296 dsl_dataset_phys(ds_prev)->
297 ds_next_clones_obj,
298 dsl_dataset_phys(ds)->ds_next_snap_obj,
299 tx));
300 }
301 }
302 if (!after_branch_point) {
303 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
304 dsl_dataset_phys(ds)->ds_next_snap_obj;
305 }
306 }
307
308 VERIFY0(dsl_dataset_hold_obj(dp,
309 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
310 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
311
312 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
313
314 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
315 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
316 dsl_dataset_phys(ds)->ds_prev_snap_obj;
317 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
318 dsl_dataset_phys(ds)->ds_prev_snap_txg;
319 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
320 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
321
322 if (ds_next->ds_deadlist.dl_oldfmt) {
323 process_old_deadlist(ds, ds_prev, ds_next,
324 after_branch_point, tx);
325 } else {
326 /* Adjust prev's unique space. */
327 if (ds_prev && !after_branch_point) {
328 dsl_deadlist_space_range(&ds_next->ds_deadlist,
329 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
330 dsl_dataset_phys(ds)->ds_prev_snap_txg,
331 &used, &comp, &uncomp);
332 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
333 }
334
335 /* Adjust snapused. */
336 dsl_deadlist_space_range(&ds_next->ds_deadlist,
337 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
338 &used, &comp, &uncomp);
339 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
340 -used, -comp, -uncomp, tx);
341
342 /* Move blocks to be freed to pool's free list. */
343 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
344 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
345 tx);
346 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
347 DD_USED_HEAD, used, comp, uncomp, tx);
348
349 /* Merge our deadlist into next's and free it. */
350 dsl_deadlist_merge(&ds_next->ds_deadlist,
351 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
352 }
353 dsl_deadlist_close(&ds->ds_deadlist);
354 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
355 dmu_buf_will_dirty(ds->ds_dbuf, tx);
356 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
357
358 /* Collapse range in clone heads */
359 dsl_dataset_remove_clones_key(ds,
360 dsl_dataset_phys(ds)->ds_creation_txg, tx);
361
362 if (ds_next->ds_is_snapshot) {
363 dsl_dataset_t *ds_nextnext;
364
365 /*
366 * Update next's unique to include blocks which
367 * were previously shared by only this snapshot
368 * and it. Those blocks will be born after the
369 * prev snap and before this snap, and will have
370 * died after the next snap and before the one
371 * after that (ie. be on the snap after next's
372 * deadlist).
373 */
374 VERIFY0(dsl_dataset_hold_obj(dp,
375 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
376 FTAG, &ds_nextnext));
377 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
378 dsl_dataset_phys(ds)->ds_prev_snap_txg,
379 dsl_dataset_phys(ds)->ds_creation_txg,
380 &used, &comp, &uncomp);
381 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
382 dsl_dataset_rele(ds_nextnext, FTAG);
383 ASSERT3P(ds_next->ds_prev, ==, NULL);
384
385 /* Collapse range in this head. */
386 VERIFY0(dsl_dataset_hold_obj(dp,
387 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
388 dsl_deadlist_remove_key(&hds->ds_deadlist,
389 dsl_dataset_phys(ds)->ds_creation_txg, tx);
390 dsl_dataset_rele(hds, FTAG);
391
392 } else {
393 ASSERT3P(ds_next->ds_prev, ==, ds);
394 dsl_dataset_rele(ds_next->ds_prev, ds_next);
395 ds_next->ds_prev = NULL;
396 if (ds_prev) {
397 VERIFY0(dsl_dataset_hold_obj(dp,
398 dsl_dataset_phys(ds)->ds_prev_snap_obj,
399 ds_next, &ds_next->ds_prev));
400 }
401
402 dsl_dataset_recalc_head_uniq(ds_next);
403
404 /*
405 * Reduce the amount of our unconsumed refreservation
406 * being charged to our parent by the amount of
407 * new unique data we have gained.
408 */
409 if (old_unique < ds_next->ds_reserved) {
410 int64_t mrsdelta;
411 uint64_t new_unique =
412 dsl_dataset_phys(ds_next)->ds_unique_bytes;
413
414 ASSERT(old_unique <= new_unique);
415 mrsdelta = MIN(new_unique - old_unique,
416 ds_next->ds_reserved - old_unique);
417 dsl_dir_diduse_space(ds->ds_dir,
418 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
419 }
420 }
421 dsl_dataset_rele(ds_next, FTAG);
422
423 /*
424 * This must be done after the dsl_traverse(), because it will
425 * re-open the objset.
426 */
427 if (ds->ds_objset) {
428 dmu_objset_evict(ds->ds_objset);
429 ds->ds_objset = NULL;
430 }
431
432 /* remove from snapshot namespace */
433 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
434 VERIFY0(dsl_dataset_hold_obj(dp,
435 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
436 VERIFY0(dsl_dataset_get_snapname(ds));
437#ifdef ZFS_DEBUG
438 {
439 uint64_t val;
440 int err;
441
442 err = dsl_dataset_snap_lookup(ds_head,
443 ds->ds_snapname, &val);
444 ASSERT0(err);
445 ASSERT3U(val, ==, obj);
446 }
447#endif
448 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
449 dsl_dataset_rele(ds_head, FTAG);
450
451 if (ds_prev != NULL)
452 dsl_dataset_rele(ds_prev, FTAG);
453
454 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
455
456 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
457 ASSERTV(uint64_t count);
458 ASSERT0(zap_count(mos,
459 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
460 count == 0);
461 VERIFY0(dmu_object_free(mos,
462 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
463 }
464 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
465 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
466 tx));
467 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
468 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
469 tx));
470 dsl_dir_rele(ds->ds_dir, ds);
471 ds->ds_dir = NULL;
472 dmu_object_free_zapified(mos, obj, tx);
473}
474
475static void
476dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
477{
478 dmu_snapshots_destroy_arg_t *dsda = arg;
479 dsl_pool_t *dp = dmu_tx_pool(tx);
480 nvpair_t *pair;
481
482 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
483 pair != NULL;
484 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
485 dsl_dataset_t *ds;
486
487 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
488
489 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
490 zvol_remove_minors(dp->dp_spa, nvpair_name(pair), B_TRUE);
491 dsl_dataset_rele(ds, FTAG);
492 }
493}
494
495/*
496 * The semantics of this function are described in the comment above
497 * lzc_destroy_snaps(). To summarize:
498 *
499 * The snapshots must all be in the same pool.
500 *
501 * Snapshots that don't exist will be silently ignored (considered to be
502 * "already deleted").
503 *
504 * On success, all snaps will be destroyed and this will return 0.
505 * On failure, no snaps will be destroyed, the errlist will be filled in,
506 * and this will return an errno.
507 */
508int
509dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
510 nvlist_t *errlist)
511{
512 dmu_snapshots_destroy_arg_t dsda;
513 int error;
514 nvpair_t *pair;
515
516 pair = nvlist_next_nvpair(snaps, NULL);
517 if (pair == NULL)
518 return (0);
519
520 dsda.dsda_snaps = snaps;
521 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps,
522 NV_UNIQUE_NAME, KM_SLEEP));
523 dsda.dsda_defer = defer;
524 dsda.dsda_errlist = errlist;
525
526 error = dsl_sync_task(nvpair_name(pair),
527 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
528 &dsda, 0, ZFS_SPACE_CHECK_NONE);
529 fnvlist_free(dsda.dsda_successful_snaps);
530
531 return (error);
532}
533
534int
535dsl_destroy_snapshot(const char *name, boolean_t defer)
536{
537 int error;
538 nvlist_t *nvl = fnvlist_alloc();
539 nvlist_t *errlist = fnvlist_alloc();
540
541 fnvlist_add_boolean(nvl, name);
542 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
543 fnvlist_free(errlist);
544 fnvlist_free(nvl);
545 return (error);
546}
547
548struct killarg {
549 dsl_dataset_t *ds;
550 dmu_tx_t *tx;
551};
552
553/* ARGSUSED */
554static int
555kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
556 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
557{
558 struct killarg *ka = arg;
559 dmu_tx_t *tx = ka->tx;
560
0bd31011 561 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
7bdf406d
TG
562 return (0);
563
564 if (zb->zb_level == ZB_ZIL_LEVEL) {
565 ASSERT(zilog != NULL);
566 /*
567 * It's a block in the intent log. It has no
568 * accounting, so just free it.
569 */
570 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
571 } else {
572 ASSERT(zilog == NULL);
573 ASSERT3U(bp->blk_birth, >,
574 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
575 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
576 }
577
578 return (0);
579}
580
581static void
582old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
583{
584 struct killarg ka;
585
586 /*
587 * Free everything that we point to (that's born after
588 * the previous snapshot, if we are a clone)
589 *
590 * NB: this should be very quick, because we already
591 * freed all the objects in open context.
592 */
593 ka.ds = ds;
594 ka.tx = tx;
595 VERIFY0(traverse_dataset(ds,
596 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
597 kill_blkptr, &ka));
598 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
599 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
600}
601
602typedef struct dsl_destroy_head_arg {
603 const char *ddha_name;
604} dsl_destroy_head_arg_t;
605
606int
607dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
608{
609 int error;
610 uint64_t count;
611 objset_t *mos;
612
613 ASSERT(!ds->ds_is_snapshot);
614 if (ds->ds_is_snapshot)
615 return (SET_ERROR(EINVAL));
616
617 if (refcount_count(&ds->ds_longholds) != expected_holds)
618 return (SET_ERROR(EBUSY));
619
620 mos = ds->ds_dir->dd_pool->dp_meta_objset;
621
622 /*
623 * Can't delete a head dataset if there are snapshots of it.
624 * (Except if the only snapshots are from the branch we cloned
625 * from.)
626 */
627 if (ds->ds_prev != NULL &&
628 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
629 return (SET_ERROR(EBUSY));
630
631 /*
632 * Can't delete if there are children of this fs.
633 */
634 error = zap_count(mos,
635 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
636 if (error != 0)
637 return (error);
638 if (count != 0)
639 return (SET_ERROR(EEXIST));
640
641 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
642 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
643 ds->ds_prev->ds_userrefs == 0) {
644 /* We need to remove the origin snapshot as well. */
645 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
646 return (SET_ERROR(EBUSY));
647 }
648 return (0);
649}
650
651static int
652dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
653{
654 dsl_destroy_head_arg_t *ddha = arg;
655 dsl_pool_t *dp = dmu_tx_pool(tx);
656 dsl_dataset_t *ds;
657 int error;
658
659 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
660 if (error != 0)
661 return (error);
662
663 error = dsl_destroy_head_check_impl(ds, 0);
664 dsl_dataset_rele(ds, FTAG);
665 return (error);
666}
667
668static void
669dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
670{
671 dsl_dir_t *dd;
672 dsl_pool_t *dp = dmu_tx_pool(tx);
673 objset_t *mos = dp->dp_meta_objset;
674 dd_used_t t;
675
676 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
677
678 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
679
680 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
681
682 /*
683 * Decrement the filesystem count for all parent filesystems.
684 *
685 * When we receive an incremental stream into a filesystem that already
686 * exists, a temporary clone is created. We never count this temporary
687 * clone, whose name begins with a '%'.
688 */
689 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
690 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
691 DD_FIELD_FILESYSTEM_COUNT, tx);
692
693 /*
694 * Remove our reservation. The impl() routine avoids setting the
695 * actual property, which would require the (already destroyed) ds.
696 */
697 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
698
699 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
700 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
701 for (t = 0; t < DD_USED_NUM; t++)
702 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
703
704 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
705 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
706 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
707 VERIFY0(zap_remove(mos,
708 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
709 dd->dd_myname, tx));
710
711 dsl_dir_rele(dd, FTAG);
712 dmu_object_free_zapified(mos, ddobj, tx);
713}
714
715void
716dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
717{
718 dsl_pool_t *dp = dmu_tx_pool(tx);
719 objset_t *mos = dp->dp_meta_objset;
720 uint64_t obj, ddobj, prevobj = 0;
721 boolean_t rmorigin;
722 objset_t *os;
723
724 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
725 ASSERT(ds->ds_prev == NULL ||
726 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
727 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
728 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
729
730 /* We need to log before removing it from the namespace. */
731 spa_history_log_internal_ds(ds, "destroy", tx, "");
732
733 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
734 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
735 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
736 ds->ds_prev->ds_userrefs == 0);
737
738 /* Remove our reservation. */
739 if (ds->ds_reserved != 0) {
740 dsl_dataset_set_refreservation_sync_impl(ds,
741 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
742 0, tx);
743 ASSERT0(ds->ds_reserved);
744 }
745
0bd31011
TG
746 if (ds->ds_large_blocks)
747 spa_feature_decr(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS, tx);
7bdf406d 748
51d97d8f 749 dsl_scan_ds_destroyed(ds, tx);
7bdf406d 750
0bd31011
TG
751 obj = ds->ds_object;
752
7bdf406d
TG
753 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
754 /* This is a clone */
755 ASSERT(ds->ds_prev != NULL);
756 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
757 obj);
758 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
759
760 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
761 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
762 dsl_dataset_remove_from_next_clones(ds->ds_prev,
763 obj, tx);
764 }
765
766 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
767 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
768 }
769
770 /*
771 * Destroy the deadlist. Unless it's a clone, the
772 * deadlist should be empty. (If it's a clone, it's
773 * safe to ignore the deadlist contents.)
774 */
775 dsl_deadlist_close(&ds->ds_deadlist);
776 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
777 dmu_buf_will_dirty(ds->ds_dbuf, tx);
778 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
779
780 VERIFY0(dmu_objset_from_ds(ds, &os));
781
782 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
783 old_synchronous_dataset_destroy(ds, tx);
784 } else {
785 /*
786 * Move the bptree into the pool's list of trees to
787 * clean up and update space accounting information.
788 */
789 uint64_t used, comp, uncomp;
790
791 zil_destroy_sync(dmu_objset_zil(os), tx);
792
793 if (!spa_feature_is_active(dp->dp_spa,
794 SPA_FEATURE_ASYNC_DESTROY)) {
795 dsl_scan_t *scn = dp->dp_scan;
796 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
797 tx);
798 dp->dp_bptree_obj = bptree_alloc(mos, tx);
799 VERIFY0(zap_add(mos,
800 DMU_POOL_DIRECTORY_OBJECT,
801 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
802 &dp->dp_bptree_obj, tx));
803 ASSERT(!scn->scn_async_destroying);
804 scn->scn_async_destroying = B_TRUE;
805 }
806
807 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
808 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
809 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
810
811 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
812 dsl_dataset_phys(ds)->ds_unique_bytes == used);
813
814 bptree_add(mos, dp->dp_bptree_obj,
815 &dsl_dataset_phys(ds)->ds_bp,
816 dsl_dataset_phys(ds)->ds_prev_snap_txg,
817 used, comp, uncomp, tx);
818 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
819 -used, -comp, -uncomp, tx);
820 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
821 used, comp, uncomp, tx);
822 }
823
824 if (ds->ds_prev != NULL) {
825 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
826 VERIFY0(zap_remove_int(mos,
827 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
828 ds->ds_object, tx));
829 }
830 prevobj = ds->ds_prev->ds_object;
831 dsl_dataset_rele(ds->ds_prev, ds);
832 ds->ds_prev = NULL;
833 }
834
835 /*
836 * This must be done after the dsl_traverse(), because it will
837 * re-open the objset.
838 */
839 if (ds->ds_objset) {
840 dmu_objset_evict(ds->ds_objset);
841 ds->ds_objset = NULL;
842 }
843
844 /* Erase the link in the dir */
845 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
846 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
847 ddobj = ds->ds_dir->dd_object;
848 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
849 VERIFY0(zap_destroy(mos,
850 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
851
852 if (ds->ds_bookmarks != 0) {
853 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
854 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
855 }
856
857 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
858
859 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
860 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
861 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
862 dsl_dir_rele(ds->ds_dir, ds);
863 ds->ds_dir = NULL;
864 dmu_object_free_zapified(mos, obj, tx);
865
866 dsl_dir_destroy_sync(ddobj, tx);
867
868 if (rmorigin) {
869 dsl_dataset_t *prev;
870 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
871 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
872 dsl_dataset_rele(prev, FTAG);
873 }
874}
875
876static void
877dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
878{
879 dsl_destroy_head_arg_t *ddha = arg;
880 dsl_pool_t *dp = dmu_tx_pool(tx);
881 dsl_dataset_t *ds;
882
883 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
884 dsl_destroy_head_sync_impl(ds, tx);
885 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
886 dsl_dataset_rele(ds, FTAG);
887}
888
889static void
890dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
891{
892 dsl_destroy_head_arg_t *ddha = arg;
893 dsl_pool_t *dp = dmu_tx_pool(tx);
894 dsl_dataset_t *ds;
895
896 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
897
898 /* Mark it as inconsistent on-disk, in case we crash */
899 dmu_buf_will_dirty(ds->ds_dbuf, tx);
900 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
901
902 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
903 dsl_dataset_rele(ds, FTAG);
904}
905
906int
907dsl_destroy_head(const char *name)
908{
909 dsl_destroy_head_arg_t ddha;
910 int error;
911 spa_t *spa;
912 boolean_t isenabled;
913
914#ifdef _KERNEL
915 zfs_destroy_unmount_origin(name);
916#endif
917
918 error = spa_open(name, &spa, FTAG);
919 if (error != 0)
920 return (error);
921 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
922 spa_close(spa, FTAG);
923
924 ddha.ddha_name = name;
925
926 if (!isenabled) {
927 objset_t *os;
928
929 error = dsl_sync_task(name, dsl_destroy_head_check,
930 dsl_destroy_head_begin_sync, &ddha,
931 0, ZFS_SPACE_CHECK_NONE);
932 if (error != 0)
933 return (error);
934
935 /*
936 * Head deletion is processed in one txg on old pools;
937 * remove the objects from open context so that the txg sync
938 * is not too long.
939 */
940 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
941 if (error == 0) {
942 uint64_t obj;
943 uint64_t prev_snap_txg =
944 dsl_dataset_phys(dmu_objset_ds(os))->
945 ds_prev_snap_txg;
946 for (obj = 0; error == 0;
947 error = dmu_object_next(os, &obj, FALSE,
948 prev_snap_txg))
949 (void) dmu_free_long_object(os, obj);
950 /* sync out all frees */
951 txg_wait_synced(dmu_objset_pool(os), 0);
952 dmu_objset_disown(os, FTAG);
953 }
954 }
955
956 return (dsl_sync_task(name, dsl_destroy_head_check,
957 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
958}
959
960/*
961 * Note, this function is used as the callback for dmu_objset_find(). We
962 * always return 0 so that we will continue to find and process
963 * inconsistent datasets, even if we encounter an error trying to
964 * process one of them.
965 */
966/* ARGSUSED */
967int
968dsl_destroy_inconsistent(const char *dsname, void *arg)
969{
970 objset_t *os;
971
972 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
0bd31011 973 boolean_t inconsistent = DS_IS_INCONSISTENT(dmu_objset_ds(os));
7bdf406d 974 dmu_objset_rele(os, FTAG);
0bd31011 975 if (inconsistent)
7bdf406d
TG
976 (void) dsl_destroy_head(dsname);
977 }
978 return (0);
979}
980
981
982#if defined(_KERNEL) && defined(HAVE_SPL)
983EXPORT_SYMBOL(dsl_destroy_head);
984EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
985EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
986EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
987EXPORT_SYMBOL(dsl_destroy_inconsistent);
988EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
989EXPORT_SYMBOL(dsl_destroy_head_check_impl);
990#endif