]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_destroy.c
ABD changes for vectorized RAIDZ
[mirror_zfs.git] / module / zfs / dsl_destroy.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_scan.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/zap.h>
40 #include <sys/zfeature.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/dsl_deleg.h>
43 #include <sys/dmu_impl.h>
44 #include <sys/zvol.h>
45
46 typedef struct dmu_snapshots_destroy_arg {
47 nvlist_t *dsda_snaps;
48 nvlist_t *dsda_successful_snaps;
49 boolean_t dsda_defer;
50 nvlist_t *dsda_errlist;
51 } dmu_snapshots_destroy_arg_t;
52
53 int
54 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
55 {
56 if (!ds->ds_is_snapshot)
57 return (SET_ERROR(EINVAL));
58
59 if (dsl_dataset_long_held(ds))
60 return (SET_ERROR(EBUSY));
61
62 /*
63 * Only allow deferred destroy on pools that support it.
64 * NOTE: deferred destroy is only supported on snapshots.
65 */
66 if (defer) {
67 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
68 SPA_VERSION_USERREFS)
69 return (SET_ERROR(ENOTSUP));
70 return (0);
71 }
72
73 /*
74 * If this snapshot has an elevated user reference count,
75 * we can't destroy it yet.
76 */
77 if (ds->ds_userrefs > 0)
78 return (SET_ERROR(EBUSY));
79
80 /*
81 * Can't delete a branch point.
82 */
83 if (dsl_dataset_phys(ds)->ds_num_children > 1)
84 return (SET_ERROR(EEXIST));
85
86 return (0);
87 }
88
89 static int
90 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
91 {
92 dmu_snapshots_destroy_arg_t *dsda = arg;
93 dsl_pool_t *dp = dmu_tx_pool(tx);
94 nvpair_t *pair;
95 int error = 0;
96
97 if (!dmu_tx_is_syncing(tx))
98 return (0);
99
100 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
101 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
102 dsl_dataset_t *ds;
103
104 error = dsl_dataset_hold(dp, nvpair_name(pair),
105 FTAG, &ds);
106
107 /*
108 * If the snapshot does not exist, silently ignore it
109 * (it's "already destroyed").
110 */
111 if (error == ENOENT)
112 continue;
113
114 if (error == 0) {
115 error = dsl_destroy_snapshot_check_impl(ds,
116 dsda->dsda_defer);
117 dsl_dataset_rele(ds, FTAG);
118 }
119
120 if (error == 0) {
121 fnvlist_add_boolean(dsda->dsda_successful_snaps,
122 nvpair_name(pair));
123 } else {
124 fnvlist_add_int32(dsda->dsda_errlist,
125 nvpair_name(pair), error);
126 }
127 }
128
129 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
130 if (pair != NULL)
131 return (fnvpair_value_int32(pair));
132
133 return (0);
134 }
135
136 struct process_old_arg {
137 dsl_dataset_t *ds;
138 dsl_dataset_t *ds_prev;
139 boolean_t after_branch_point;
140 zio_t *pio;
141 uint64_t used, comp, uncomp;
142 };
143
144 static int
145 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
146 {
147 struct process_old_arg *poa = arg;
148 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
149
150 ASSERT(!BP_IS_HOLE(bp));
151
152 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
153 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
154 if (poa->ds_prev && !poa->after_branch_point &&
155 bp->blk_birth >
156 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
157 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
158 bp_get_dsize_sync(dp->dp_spa, bp);
159 }
160 } else {
161 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
162 poa->comp += BP_GET_PSIZE(bp);
163 poa->uncomp += BP_GET_UCSIZE(bp);
164 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
165 }
166 return (0);
167 }
168
169 static void
170 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
171 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
172 {
173 struct process_old_arg poa = { 0 };
174 dsl_pool_t *dp = ds->ds_dir->dd_pool;
175 objset_t *mos = dp->dp_meta_objset;
176 uint64_t deadlist_obj;
177
178 ASSERT(ds->ds_deadlist.dl_oldfmt);
179 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
180
181 poa.ds = ds;
182 poa.ds_prev = ds_prev;
183 poa.after_branch_point = after_branch_point;
184 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
185 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
186 process_old_cb, &poa, tx));
187 VERIFY0(zio_wait(poa.pio));
188 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
189
190 /* change snapused */
191 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
192 -poa.used, -poa.comp, -poa.uncomp, tx);
193
194 /* swap next's deadlist to our deadlist */
195 dsl_deadlist_close(&ds->ds_deadlist);
196 dsl_deadlist_close(&ds_next->ds_deadlist);
197 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
198 dsl_dataset_phys(ds)->ds_deadlist_obj =
199 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
200 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
201 dsl_deadlist_open(&ds->ds_deadlist, mos,
202 dsl_dataset_phys(ds)->ds_deadlist_obj);
203 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
204 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
205 }
206
207 static void
208 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
209 {
210 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
211 zap_cursor_t *zc;
212 zap_attribute_t *za;
213
214 /*
215 * If it is the old version, dd_clones doesn't exist so we can't
216 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
217 * doesn't matter.
218 */
219 if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
220 return;
221
222 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
223 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
224
225 for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
226 zap_cursor_retrieve(zc, za) == 0;
227 zap_cursor_advance(zc)) {
228 dsl_dataset_t *clone;
229
230 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
231 za->za_first_integer, FTAG, &clone));
232 if (clone->ds_dir->dd_origin_txg > mintxg) {
233 dsl_deadlist_remove_key(&clone->ds_deadlist,
234 mintxg, tx);
235 dsl_dataset_remove_clones_key(clone, mintxg, tx);
236 }
237 dsl_dataset_rele(clone, FTAG);
238 }
239 zap_cursor_fini(zc);
240
241 kmem_free(za, sizeof (zap_attribute_t));
242 kmem_free(zc, sizeof (zap_cursor_t));
243 }
244
245 void
246 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
247 {
248 spa_feature_t f;
249 int after_branch_point = FALSE;
250 dsl_pool_t *dp = ds->ds_dir->dd_pool;
251 objset_t *mos = dp->dp_meta_objset;
252 dsl_dataset_t *ds_prev = NULL;
253 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
254 dsl_dataset_t *ds_next, *ds_head, *hds;
255
256
257 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
258 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
259 ASSERT(refcount_is_zero(&ds->ds_longholds));
260
261 if (defer &&
262 (ds->ds_userrefs > 0 ||
263 dsl_dataset_phys(ds)->ds_num_children > 1)) {
264 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
265 dmu_buf_will_dirty(ds->ds_dbuf, tx);
266 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
267 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
268 return;
269 }
270
271 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
272
273 /* We need to log before removing it from the namespace. */
274 spa_history_log_internal_ds(ds, "destroy", tx, "");
275
276 dsl_scan_ds_destroyed(ds, tx);
277
278 obj = ds->ds_object;
279
280 for (f = 0; f < SPA_FEATURES; f++) {
281 if (ds->ds_feature_inuse[f]) {
282 dsl_dataset_deactivate_feature(obj, f, tx);
283 ds->ds_feature_inuse[f] = B_FALSE;
284 }
285 }
286 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
287 ASSERT3P(ds->ds_prev, ==, NULL);
288 VERIFY0(dsl_dataset_hold_obj(dp,
289 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
290 after_branch_point =
291 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
292
293 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
294 if (after_branch_point &&
295 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
296 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
297 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
298 VERIFY0(zap_add_int(mos,
299 dsl_dataset_phys(ds_prev)->
300 ds_next_clones_obj,
301 dsl_dataset_phys(ds)->ds_next_snap_obj,
302 tx));
303 }
304 }
305 if (!after_branch_point) {
306 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
307 dsl_dataset_phys(ds)->ds_next_snap_obj;
308 }
309 }
310
311 VERIFY0(dsl_dataset_hold_obj(dp,
312 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
313 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
314
315 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
316
317 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
318 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
319 dsl_dataset_phys(ds)->ds_prev_snap_obj;
320 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
321 dsl_dataset_phys(ds)->ds_prev_snap_txg;
322 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
323 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
324
325 if (ds_next->ds_deadlist.dl_oldfmt) {
326 process_old_deadlist(ds, ds_prev, ds_next,
327 after_branch_point, tx);
328 } else {
329 /* Adjust prev's unique space. */
330 if (ds_prev && !after_branch_point) {
331 dsl_deadlist_space_range(&ds_next->ds_deadlist,
332 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
333 dsl_dataset_phys(ds)->ds_prev_snap_txg,
334 &used, &comp, &uncomp);
335 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
336 }
337
338 /* Adjust snapused. */
339 dsl_deadlist_space_range(&ds_next->ds_deadlist,
340 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
341 &used, &comp, &uncomp);
342 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
343 -used, -comp, -uncomp, tx);
344
345 /* Move blocks to be freed to pool's free list. */
346 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
347 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
348 tx);
349 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
350 DD_USED_HEAD, used, comp, uncomp, tx);
351
352 /* Merge our deadlist into next's and free it. */
353 dsl_deadlist_merge(&ds_next->ds_deadlist,
354 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
355 }
356 dsl_deadlist_close(&ds->ds_deadlist);
357 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
358 dmu_buf_will_dirty(ds->ds_dbuf, tx);
359 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
360
361 /* Collapse range in clone heads */
362 dsl_dataset_remove_clones_key(ds,
363 dsl_dataset_phys(ds)->ds_creation_txg, tx);
364
365 if (ds_next->ds_is_snapshot) {
366 dsl_dataset_t *ds_nextnext;
367
368 /*
369 * Update next's unique to include blocks which
370 * were previously shared by only this snapshot
371 * and it. Those blocks will be born after the
372 * prev snap and before this snap, and will have
373 * died after the next snap and before the one
374 * after that (ie. be on the snap after next's
375 * deadlist).
376 */
377 VERIFY0(dsl_dataset_hold_obj(dp,
378 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
379 FTAG, &ds_nextnext));
380 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
381 dsl_dataset_phys(ds)->ds_prev_snap_txg,
382 dsl_dataset_phys(ds)->ds_creation_txg,
383 &used, &comp, &uncomp);
384 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
385 dsl_dataset_rele(ds_nextnext, FTAG);
386 ASSERT3P(ds_next->ds_prev, ==, NULL);
387
388 /* Collapse range in this head. */
389 VERIFY0(dsl_dataset_hold_obj(dp,
390 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
391 dsl_deadlist_remove_key(&hds->ds_deadlist,
392 dsl_dataset_phys(ds)->ds_creation_txg, tx);
393 dsl_dataset_rele(hds, FTAG);
394
395 } else {
396 ASSERT3P(ds_next->ds_prev, ==, ds);
397 dsl_dataset_rele(ds_next->ds_prev, ds_next);
398 ds_next->ds_prev = NULL;
399 if (ds_prev) {
400 VERIFY0(dsl_dataset_hold_obj(dp,
401 dsl_dataset_phys(ds)->ds_prev_snap_obj,
402 ds_next, &ds_next->ds_prev));
403 }
404
405 dsl_dataset_recalc_head_uniq(ds_next);
406
407 /*
408 * Reduce the amount of our unconsumed refreservation
409 * being charged to our parent by the amount of
410 * new unique data we have gained.
411 */
412 if (old_unique < ds_next->ds_reserved) {
413 int64_t mrsdelta;
414 uint64_t new_unique =
415 dsl_dataset_phys(ds_next)->ds_unique_bytes;
416
417 ASSERT(old_unique <= new_unique);
418 mrsdelta = MIN(new_unique - old_unique,
419 ds_next->ds_reserved - old_unique);
420 dsl_dir_diduse_space(ds->ds_dir,
421 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
422 }
423 }
424 dsl_dataset_rele(ds_next, FTAG);
425
426 /*
427 * This must be done after the dsl_traverse(), because it will
428 * re-open the objset.
429 */
430 if (ds->ds_objset) {
431 dmu_objset_evict(ds->ds_objset);
432 ds->ds_objset = NULL;
433 }
434
435 /* remove from snapshot namespace */
436 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
437 VERIFY0(dsl_dataset_hold_obj(dp,
438 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
439 VERIFY0(dsl_dataset_get_snapname(ds));
440 #ifdef ZFS_DEBUG
441 {
442 uint64_t val;
443 int err;
444
445 err = dsl_dataset_snap_lookup(ds_head,
446 ds->ds_snapname, &val);
447 ASSERT0(err);
448 ASSERT3U(val, ==, obj);
449 }
450 #endif
451 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
452 dsl_dataset_rele(ds_head, FTAG);
453
454 if (ds_prev != NULL)
455 dsl_dataset_rele(ds_prev, FTAG);
456
457 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
458
459 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
460 ASSERTV(uint64_t count);
461 ASSERT0(zap_count(mos,
462 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
463 count == 0);
464 VERIFY0(dmu_object_free(mos,
465 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
466 }
467 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
468 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
469 tx));
470 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
471 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
472 tx));
473 dsl_dir_rele(ds->ds_dir, ds);
474 ds->ds_dir = NULL;
475 dmu_object_free_zapified(mos, obj, tx);
476 }
477
478 static void
479 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
480 {
481 dmu_snapshots_destroy_arg_t *dsda = arg;
482 dsl_pool_t *dp = dmu_tx_pool(tx);
483 nvpair_t *pair;
484
485 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
486 pair != NULL;
487 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
488 dsl_dataset_t *ds;
489
490 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
491
492 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
493 zvol_remove_minors(dp->dp_spa, nvpair_name(pair), B_TRUE);
494 dsl_dataset_rele(ds, FTAG);
495 }
496 }
497
498 /*
499 * The semantics of this function are described in the comment above
500 * lzc_destroy_snaps(). To summarize:
501 *
502 * The snapshots must all be in the same pool.
503 *
504 * Snapshots that don't exist will be silently ignored (considered to be
505 * "already deleted").
506 *
507 * On success, all snaps will be destroyed and this will return 0.
508 * On failure, no snaps will be destroyed, the errlist will be filled in,
509 * and this will return an errno.
510 */
511 int
512 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
513 nvlist_t *errlist)
514 {
515 dmu_snapshots_destroy_arg_t dsda;
516 int error;
517 nvpair_t *pair;
518
519 pair = nvlist_next_nvpair(snaps, NULL);
520 if (pair == NULL)
521 return (0);
522
523 dsda.dsda_snaps = snaps;
524 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps,
525 NV_UNIQUE_NAME, KM_SLEEP));
526 dsda.dsda_defer = defer;
527 dsda.dsda_errlist = errlist;
528
529 error = dsl_sync_task(nvpair_name(pair),
530 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
531 &dsda, 0, ZFS_SPACE_CHECK_NONE);
532 fnvlist_free(dsda.dsda_successful_snaps);
533
534 return (error);
535 }
536
537 int
538 dsl_destroy_snapshot(const char *name, boolean_t defer)
539 {
540 int error;
541 nvlist_t *nvl = fnvlist_alloc();
542 nvlist_t *errlist = fnvlist_alloc();
543
544 fnvlist_add_boolean(nvl, name);
545 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
546 fnvlist_free(errlist);
547 fnvlist_free(nvl);
548 return (error);
549 }
550
551 struct killarg {
552 dsl_dataset_t *ds;
553 dmu_tx_t *tx;
554 };
555
556 /* ARGSUSED */
557 static int
558 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
559 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
560 {
561 struct killarg *ka = arg;
562 dmu_tx_t *tx = ka->tx;
563
564 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
565 return (0);
566
567 if (zb->zb_level == ZB_ZIL_LEVEL) {
568 ASSERT(zilog != NULL);
569 /*
570 * It's a block in the intent log. It has no
571 * accounting, so just free it.
572 */
573 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
574 } else {
575 ASSERT(zilog == NULL);
576 ASSERT3U(bp->blk_birth, >,
577 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
578 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
579 }
580
581 return (0);
582 }
583
584 static void
585 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
586 {
587 struct killarg ka;
588
589 /*
590 * Free everything that we point to (that's born after
591 * the previous snapshot, if we are a clone)
592 *
593 * NB: this should be very quick, because we already
594 * freed all the objects in open context.
595 */
596 ka.ds = ds;
597 ka.tx = tx;
598 VERIFY0(traverse_dataset(ds,
599 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
600 kill_blkptr, &ka));
601 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
602 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
603 }
604
605 typedef struct dsl_destroy_head_arg {
606 const char *ddha_name;
607 } dsl_destroy_head_arg_t;
608
609 int
610 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
611 {
612 int error;
613 uint64_t count;
614 objset_t *mos;
615
616 ASSERT(!ds->ds_is_snapshot);
617 if (ds->ds_is_snapshot)
618 return (SET_ERROR(EINVAL));
619
620 if (refcount_count(&ds->ds_longholds) != expected_holds)
621 return (SET_ERROR(EBUSY));
622
623 mos = ds->ds_dir->dd_pool->dp_meta_objset;
624
625 /*
626 * Can't delete a head dataset if there are snapshots of it.
627 * (Except if the only snapshots are from the branch we cloned
628 * from.)
629 */
630 if (ds->ds_prev != NULL &&
631 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
632 return (SET_ERROR(EBUSY));
633
634 /*
635 * Can't delete if there are children of this fs.
636 */
637 error = zap_count(mos,
638 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
639 if (error != 0)
640 return (error);
641 if (count != 0)
642 return (SET_ERROR(EEXIST));
643
644 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
645 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
646 ds->ds_prev->ds_userrefs == 0) {
647 /* We need to remove the origin snapshot as well. */
648 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
649 return (SET_ERROR(EBUSY));
650 }
651 return (0);
652 }
653
654 static int
655 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
656 {
657 dsl_destroy_head_arg_t *ddha = arg;
658 dsl_pool_t *dp = dmu_tx_pool(tx);
659 dsl_dataset_t *ds;
660 int error;
661
662 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
663 if (error != 0)
664 return (error);
665
666 error = dsl_destroy_head_check_impl(ds, 0);
667 dsl_dataset_rele(ds, FTAG);
668 return (error);
669 }
670
671 static void
672 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
673 {
674 dsl_dir_t *dd;
675 dsl_pool_t *dp = dmu_tx_pool(tx);
676 objset_t *mos = dp->dp_meta_objset;
677 dd_used_t t;
678
679 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
680
681 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
682
683 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
684
685 /*
686 * Decrement the filesystem count for all parent filesystems.
687 *
688 * When we receive an incremental stream into a filesystem that already
689 * exists, a temporary clone is created. We never count this temporary
690 * clone, whose name begins with a '%'.
691 */
692 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
693 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
694 DD_FIELD_FILESYSTEM_COUNT, tx);
695
696 /*
697 * Remove our reservation. The impl() routine avoids setting the
698 * actual property, which would require the (already destroyed) ds.
699 */
700 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
701
702 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
703 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
704 for (t = 0; t < DD_USED_NUM; t++)
705 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
706
707 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
708 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
709 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
710 VERIFY0(zap_remove(mos,
711 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
712 dd->dd_myname, tx));
713
714 dsl_dir_rele(dd, FTAG);
715 dmu_object_free_zapified(mos, ddobj, tx);
716 }
717
718 void
719 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
720 {
721 dsl_pool_t *dp = dmu_tx_pool(tx);
722 spa_feature_t f;
723 objset_t *mos = dp->dp_meta_objset;
724 uint64_t obj, ddobj, prevobj = 0;
725 boolean_t rmorigin;
726 objset_t *os;
727
728 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
729 ASSERT(ds->ds_prev == NULL ||
730 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
731 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
732 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
733
734 /* We need to log before removing it from the namespace. */
735 spa_history_log_internal_ds(ds, "destroy", tx, "");
736
737 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
738 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
739 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
740 ds->ds_prev->ds_userrefs == 0);
741
742 /* Remove our reservation. */
743 if (ds->ds_reserved != 0) {
744 dsl_dataset_set_refreservation_sync_impl(ds,
745 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
746 0, tx);
747 ASSERT0(ds->ds_reserved);
748 }
749
750 obj = ds->ds_object;
751
752 for (f = 0; f < SPA_FEATURES; f++) {
753 if (ds->ds_feature_inuse[f]) {
754 dsl_dataset_deactivate_feature(obj, f, tx);
755 ds->ds_feature_inuse[f] = B_FALSE;
756 }
757 }
758
759 dsl_scan_ds_destroyed(ds, tx);
760
761 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
762 /* This is a clone */
763 ASSERT(ds->ds_prev != NULL);
764 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
765 obj);
766 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
767
768 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
769 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
770 dsl_dataset_remove_from_next_clones(ds->ds_prev,
771 obj, tx);
772 }
773
774 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
775 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
776 }
777
778 /*
779 * Destroy the deadlist. Unless it's a clone, the
780 * deadlist should be empty. (If it's a clone, it's
781 * safe to ignore the deadlist contents.)
782 */
783 dsl_deadlist_close(&ds->ds_deadlist);
784 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
785 dmu_buf_will_dirty(ds->ds_dbuf, tx);
786 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
787
788 VERIFY0(dmu_objset_from_ds(ds, &os));
789
790 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
791 old_synchronous_dataset_destroy(ds, tx);
792 } else {
793 /*
794 * Move the bptree into the pool's list of trees to
795 * clean up and update space accounting information.
796 */
797 uint64_t used, comp, uncomp;
798
799 zil_destroy_sync(dmu_objset_zil(os), tx);
800
801 if (!spa_feature_is_active(dp->dp_spa,
802 SPA_FEATURE_ASYNC_DESTROY)) {
803 dsl_scan_t *scn = dp->dp_scan;
804 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
805 tx);
806 dp->dp_bptree_obj = bptree_alloc(mos, tx);
807 VERIFY0(zap_add(mos,
808 DMU_POOL_DIRECTORY_OBJECT,
809 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
810 &dp->dp_bptree_obj, tx));
811 ASSERT(!scn->scn_async_destroying);
812 scn->scn_async_destroying = B_TRUE;
813 }
814
815 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
816 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
817 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
818
819 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
820 dsl_dataset_phys(ds)->ds_unique_bytes == used);
821
822 bptree_add(mos, dp->dp_bptree_obj,
823 &dsl_dataset_phys(ds)->ds_bp,
824 dsl_dataset_phys(ds)->ds_prev_snap_txg,
825 used, comp, uncomp, tx);
826 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
827 -used, -comp, -uncomp, tx);
828 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
829 used, comp, uncomp, tx);
830 }
831
832 if (ds->ds_prev != NULL) {
833 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
834 VERIFY0(zap_remove_int(mos,
835 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
836 ds->ds_object, tx));
837 }
838 prevobj = ds->ds_prev->ds_object;
839 dsl_dataset_rele(ds->ds_prev, ds);
840 ds->ds_prev = NULL;
841 }
842
843 /*
844 * This must be done after the dsl_traverse(), because it will
845 * re-open the objset.
846 */
847 if (ds->ds_objset) {
848 dmu_objset_evict(ds->ds_objset);
849 ds->ds_objset = NULL;
850 }
851
852 /* Erase the link in the dir */
853 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
854 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
855 ddobj = ds->ds_dir->dd_object;
856 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
857 VERIFY0(zap_destroy(mos,
858 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
859
860 if (ds->ds_bookmarks != 0) {
861 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
862 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
863 }
864
865 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
866
867 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
868 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
869 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
870 dsl_dir_rele(ds->ds_dir, ds);
871 ds->ds_dir = NULL;
872 dmu_object_free_zapified(mos, obj, tx);
873
874 dsl_dir_destroy_sync(ddobj, tx);
875
876 if (rmorigin) {
877 dsl_dataset_t *prev;
878 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
879 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
880 dsl_dataset_rele(prev, FTAG);
881 }
882 }
883
884 static void
885 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
886 {
887 dsl_destroy_head_arg_t *ddha = arg;
888 dsl_pool_t *dp = dmu_tx_pool(tx);
889 dsl_dataset_t *ds;
890
891 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
892 dsl_destroy_head_sync_impl(ds, tx);
893 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
894 dsl_dataset_rele(ds, FTAG);
895 }
896
897 static void
898 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
899 {
900 dsl_destroy_head_arg_t *ddha = arg;
901 dsl_pool_t *dp = dmu_tx_pool(tx);
902 dsl_dataset_t *ds;
903
904 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
905
906 /* Mark it as inconsistent on-disk, in case we crash */
907 dmu_buf_will_dirty(ds->ds_dbuf, tx);
908 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
909
910 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
911 dsl_dataset_rele(ds, FTAG);
912 }
913
914 int
915 dsl_destroy_head(const char *name)
916 {
917 dsl_destroy_head_arg_t ddha;
918 int error;
919 spa_t *spa;
920 boolean_t isenabled;
921
922 #ifdef _KERNEL
923 zfs_destroy_unmount_origin(name);
924 #endif
925
926 error = spa_open(name, &spa, FTAG);
927 if (error != 0)
928 return (error);
929 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
930 spa_close(spa, FTAG);
931
932 ddha.ddha_name = name;
933
934 if (!isenabled) {
935 objset_t *os;
936
937 error = dsl_sync_task(name, dsl_destroy_head_check,
938 dsl_destroy_head_begin_sync, &ddha,
939 0, ZFS_SPACE_CHECK_NONE);
940 if (error != 0)
941 return (error);
942
943 /*
944 * Head deletion is processed in one txg on old pools;
945 * remove the objects from open context so that the txg sync
946 * is not too long.
947 */
948 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
949 if (error == 0) {
950 uint64_t obj;
951 uint64_t prev_snap_txg =
952 dsl_dataset_phys(dmu_objset_ds(os))->
953 ds_prev_snap_txg;
954 for (obj = 0; error == 0;
955 error = dmu_object_next(os, &obj, FALSE,
956 prev_snap_txg))
957 (void) dmu_free_long_object(os, obj);
958 /* sync out all frees */
959 txg_wait_synced(dmu_objset_pool(os), 0);
960 dmu_objset_disown(os, FTAG);
961 }
962 }
963
964 return (dsl_sync_task(name, dsl_destroy_head_check,
965 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
966 }
967
968 /*
969 * Note, this function is used as the callback for dmu_objset_find(). We
970 * always return 0 so that we will continue to find and process
971 * inconsistent datasets, even if we encounter an error trying to
972 * process one of them.
973 */
974 /* ARGSUSED */
975 int
976 dsl_destroy_inconsistent(const char *dsname, void *arg)
977 {
978 objset_t *os;
979
980 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
981 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
982
983 /*
984 * If the dataset is inconsistent because a resumable receive
985 * has failed, then do not destroy it.
986 */
987 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
988 need_destroy = B_FALSE;
989
990 dmu_objset_rele(os, FTAG);
991 if (need_destroy)
992 (void) dsl_destroy_head(dsname);
993 }
994 return (0);
995 }
996
997
998 #if defined(_KERNEL) && defined(HAVE_SPL)
999 EXPORT_SYMBOL(dsl_destroy_head);
1000 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1001 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1002 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1003 EXPORT_SYMBOL(dsl_destroy_inconsistent);
1004 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1005 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1006 #endif