]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_destroy.c
Illumos 4171, 4172
[mirror_zfs.git] / module / zfs / dsl_destroy.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dsl_userhold.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dmu_traverse.h>
35 #include <sys/dsl_scan.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/zap.h>
38 #include <sys/zfeature.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/dsl_deleg.h>
41 #include <sys/dmu_impl.h>
42
43 typedef struct dmu_snapshots_destroy_arg {
44 nvlist_t *dsda_snaps;
45 nvlist_t *dsda_successful_snaps;
46 boolean_t dsda_defer;
47 nvlist_t *dsda_errlist;
48 } dmu_snapshots_destroy_arg_t;
49
50 int
51 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
52 {
53 if (!dsl_dataset_is_snapshot(ds))
54 return (SET_ERROR(EINVAL));
55
56 if (dsl_dataset_long_held(ds))
57 return (SET_ERROR(EBUSY));
58
59 /*
60 * Only allow deferred destroy on pools that support it.
61 * NOTE: deferred destroy is only supported on snapshots.
62 */
63 if (defer) {
64 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
65 SPA_VERSION_USERREFS)
66 return (SET_ERROR(ENOTSUP));
67 return (0);
68 }
69
70 /*
71 * If this snapshot has an elevated user reference count,
72 * we can't destroy it yet.
73 */
74 if (ds->ds_userrefs > 0)
75 return (SET_ERROR(EBUSY));
76
77 /*
78 * Can't delete a branch point.
79 */
80 if (ds->ds_phys->ds_num_children > 1)
81 return (SET_ERROR(EEXIST));
82
83 return (0);
84 }
85
86 static int
87 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
88 {
89 dmu_snapshots_destroy_arg_t *dsda = arg;
90 dsl_pool_t *dp = dmu_tx_pool(tx);
91 nvpair_t *pair;
92 int error = 0;
93
94 if (!dmu_tx_is_syncing(tx))
95 return (0);
96
97 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
98 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
99 dsl_dataset_t *ds;
100
101 error = dsl_dataset_hold(dp, nvpair_name(pair),
102 FTAG, &ds);
103
104 /*
105 * If the snapshot does not exist, silently ignore it
106 * (it's "already destroyed").
107 */
108 if (error == ENOENT)
109 continue;
110
111 if (error == 0) {
112 error = dsl_destroy_snapshot_check_impl(ds,
113 dsda->dsda_defer);
114 dsl_dataset_rele(ds, FTAG);
115 }
116
117 if (error == 0) {
118 fnvlist_add_boolean(dsda->dsda_successful_snaps,
119 nvpair_name(pair));
120 } else {
121 fnvlist_add_int32(dsda->dsda_errlist,
122 nvpair_name(pair), error);
123 }
124 }
125
126 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
127 if (pair != NULL)
128 return (fnvpair_value_int32(pair));
129
130 return (0);
131 }
132
133 struct process_old_arg {
134 dsl_dataset_t *ds;
135 dsl_dataset_t *ds_prev;
136 boolean_t after_branch_point;
137 zio_t *pio;
138 uint64_t used, comp, uncomp;
139 };
140
141 static int
142 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
143 {
144 struct process_old_arg *poa = arg;
145 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
146
147 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
148 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
149 if (poa->ds_prev && !poa->after_branch_point &&
150 bp->blk_birth >
151 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
152 poa->ds_prev->ds_phys->ds_unique_bytes +=
153 bp_get_dsize_sync(dp->dp_spa, bp);
154 }
155 } else {
156 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
157 poa->comp += BP_GET_PSIZE(bp);
158 poa->uncomp += BP_GET_UCSIZE(bp);
159 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
160 }
161 return (0);
162 }
163
164 static void
165 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
166 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
167 {
168 struct process_old_arg poa = { 0 };
169 dsl_pool_t *dp = ds->ds_dir->dd_pool;
170 objset_t *mos = dp->dp_meta_objset;
171 uint64_t deadlist_obj;
172
173 ASSERT(ds->ds_deadlist.dl_oldfmt);
174 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
175
176 poa.ds = ds;
177 poa.ds_prev = ds_prev;
178 poa.after_branch_point = after_branch_point;
179 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
180 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
181 process_old_cb, &poa, tx));
182 VERIFY0(zio_wait(poa.pio));
183 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
184
185 /* change snapused */
186 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
187 -poa.used, -poa.comp, -poa.uncomp, tx);
188
189 /* swap next's deadlist to our deadlist */
190 dsl_deadlist_close(&ds->ds_deadlist);
191 dsl_deadlist_close(&ds_next->ds_deadlist);
192 deadlist_obj = ds->ds_phys->ds_deadlist_obj;
193 ds->ds_phys->ds_deadlist_obj = ds_next->ds_phys->ds_deadlist_obj;
194 ds_next->ds_phys->ds_deadlist_obj = deadlist_obj;
195 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
196 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
197 ds_next->ds_phys->ds_deadlist_obj);
198 }
199
200 static void
201 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
202 {
203 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
204 zap_cursor_t *zc;
205 zap_attribute_t *za;
206
207 /*
208 * If it is the old version, dd_clones doesn't exist so we can't
209 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
210 * doesn't matter.
211 */
212 if (ds->ds_dir->dd_phys->dd_clones == 0)
213 return;
214
215 zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE);
216 za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
217
218 for (zap_cursor_init(zc, mos, ds->ds_dir->dd_phys->dd_clones);
219 zap_cursor_retrieve(zc, za) == 0;
220 zap_cursor_advance(zc)) {
221 dsl_dataset_t *clone;
222
223 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
224 za->za_first_integer, FTAG, &clone));
225 if (clone->ds_dir->dd_origin_txg > mintxg) {
226 dsl_deadlist_remove_key(&clone->ds_deadlist,
227 mintxg, tx);
228 dsl_dataset_remove_clones_key(clone, mintxg, tx);
229 }
230 dsl_dataset_rele(clone, FTAG);
231 }
232 zap_cursor_fini(zc);
233
234 kmem_free(za, sizeof (zap_attribute_t));
235 kmem_free(zc, sizeof (zap_cursor_t));
236 }
237
238 void
239 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
240 {
241 #ifdef ZFS_DEBUG
242 int err;
243 #endif
244 int after_branch_point = FALSE;
245 dsl_pool_t *dp = ds->ds_dir->dd_pool;
246 objset_t *mos = dp->dp_meta_objset;
247 dsl_dataset_t *ds_prev = NULL;
248 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
249 dsl_dataset_t *ds_next, *ds_head, *hds;
250
251
252 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
253 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
254 ASSERT(refcount_is_zero(&ds->ds_longholds));
255
256 if (defer &&
257 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)) {
258 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
259 dmu_buf_will_dirty(ds->ds_dbuf, tx);
260 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
261 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
262 return;
263 }
264
265 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
266
267 /* We need to log before removing it from the namespace. */
268 spa_history_log_internal_ds(ds, "destroy", tx, "");
269
270 dsl_scan_ds_destroyed(ds, tx);
271
272 obj = ds->ds_object;
273
274 if (ds->ds_phys->ds_prev_snap_obj != 0) {
275 ASSERT3P(ds->ds_prev, ==, NULL);
276 VERIFY0(dsl_dataset_hold_obj(dp,
277 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
278 after_branch_point =
279 (ds_prev->ds_phys->ds_next_snap_obj != obj);
280
281 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
282 if (after_branch_point &&
283 ds_prev->ds_phys->ds_next_clones_obj != 0) {
284 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
285 if (ds->ds_phys->ds_next_snap_obj != 0) {
286 VERIFY0(zap_add_int(mos,
287 ds_prev->ds_phys->ds_next_clones_obj,
288 ds->ds_phys->ds_next_snap_obj, tx));
289 }
290 }
291 if (!after_branch_point) {
292 ds_prev->ds_phys->ds_next_snap_obj =
293 ds->ds_phys->ds_next_snap_obj;
294 }
295 }
296
297 VERIFY0(dsl_dataset_hold_obj(dp,
298 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
299 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
300
301 old_unique = ds_next->ds_phys->ds_unique_bytes;
302
303 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
304 ds_next->ds_phys->ds_prev_snap_obj =
305 ds->ds_phys->ds_prev_snap_obj;
306 ds_next->ds_phys->ds_prev_snap_txg =
307 ds->ds_phys->ds_prev_snap_txg;
308 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
309 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
310
311 if (ds_next->ds_deadlist.dl_oldfmt) {
312 process_old_deadlist(ds, ds_prev, ds_next,
313 after_branch_point, tx);
314 } else {
315 /* Adjust prev's unique space. */
316 if (ds_prev && !after_branch_point) {
317 dsl_deadlist_space_range(&ds_next->ds_deadlist,
318 ds_prev->ds_phys->ds_prev_snap_txg,
319 ds->ds_phys->ds_prev_snap_txg,
320 &used, &comp, &uncomp);
321 ds_prev->ds_phys->ds_unique_bytes += used;
322 }
323
324 /* Adjust snapused. */
325 dsl_deadlist_space_range(&ds_next->ds_deadlist,
326 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
327 &used, &comp, &uncomp);
328 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
329 -used, -comp, -uncomp, tx);
330
331 /* Move blocks to be freed to pool's free list. */
332 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
333 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
334 tx);
335 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
336 DD_USED_HEAD, used, comp, uncomp, tx);
337
338 /* Merge our deadlist into next's and free it. */
339 dsl_deadlist_merge(&ds_next->ds_deadlist,
340 ds->ds_phys->ds_deadlist_obj, tx);
341 }
342 dsl_deadlist_close(&ds->ds_deadlist);
343 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
344 dmu_buf_will_dirty(ds->ds_dbuf, tx);
345 ds->ds_phys->ds_deadlist_obj = 0;
346
347 /* Collapse range in clone heads */
348 dsl_dataset_remove_clones_key(ds,
349 ds->ds_phys->ds_creation_txg, tx);
350
351 if (dsl_dataset_is_snapshot(ds_next)) {
352 dsl_dataset_t *ds_nextnext;
353
354 /*
355 * Update next's unique to include blocks which
356 * were previously shared by only this snapshot
357 * and it. Those blocks will be born after the
358 * prev snap and before this snap, and will have
359 * died after the next snap and before the one
360 * after that (ie. be on the snap after next's
361 * deadlist).
362 */
363 VERIFY0(dsl_dataset_hold_obj(dp,
364 ds_next->ds_phys->ds_next_snap_obj, FTAG, &ds_nextnext));
365 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
366 ds->ds_phys->ds_prev_snap_txg,
367 ds->ds_phys->ds_creation_txg,
368 &used, &comp, &uncomp);
369 ds_next->ds_phys->ds_unique_bytes += used;
370 dsl_dataset_rele(ds_nextnext, FTAG);
371 ASSERT3P(ds_next->ds_prev, ==, NULL);
372
373 /* Collapse range in this head. */
374 VERIFY0(dsl_dataset_hold_obj(dp,
375 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds));
376 dsl_deadlist_remove_key(&hds->ds_deadlist,
377 ds->ds_phys->ds_creation_txg, tx);
378 dsl_dataset_rele(hds, FTAG);
379
380 } else {
381 ASSERT3P(ds_next->ds_prev, ==, ds);
382 dsl_dataset_rele(ds_next->ds_prev, ds_next);
383 ds_next->ds_prev = NULL;
384 if (ds_prev) {
385 VERIFY0(dsl_dataset_hold_obj(dp,
386 ds->ds_phys->ds_prev_snap_obj,
387 ds_next, &ds_next->ds_prev));
388 }
389
390 dsl_dataset_recalc_head_uniq(ds_next);
391
392 /*
393 * Reduce the amount of our unconsumed refreservation
394 * being charged to our parent by the amount of
395 * new unique data we have gained.
396 */
397 if (old_unique < ds_next->ds_reserved) {
398 int64_t mrsdelta;
399 uint64_t new_unique =
400 ds_next->ds_phys->ds_unique_bytes;
401
402 ASSERT(old_unique <= new_unique);
403 mrsdelta = MIN(new_unique - old_unique,
404 ds_next->ds_reserved - old_unique);
405 dsl_dir_diduse_space(ds->ds_dir,
406 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
407 }
408 }
409 dsl_dataset_rele(ds_next, FTAG);
410
411 /*
412 * This must be done after the dsl_traverse(), because it will
413 * re-open the objset.
414 */
415 if (ds->ds_objset) {
416 dmu_objset_evict(ds->ds_objset);
417 ds->ds_objset = NULL;
418 }
419
420 /* remove from snapshot namespace */
421 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
422 VERIFY0(dsl_dataset_hold_obj(dp,
423 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
424 VERIFY0(dsl_dataset_get_snapname(ds));
425 #ifdef ZFS_DEBUG
426 {
427 uint64_t val;
428
429 err = dsl_dataset_snap_lookup(ds_head,
430 ds->ds_snapname, &val);
431 ASSERT0(err);
432 ASSERT3U(val, ==, obj);
433 }
434 #endif
435 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx));
436 dsl_dataset_rele(ds_head, FTAG);
437
438 if (ds_prev != NULL)
439 dsl_dataset_rele(ds_prev, FTAG);
440
441 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
442
443 if (ds->ds_phys->ds_next_clones_obj != 0) {
444 ASSERTV(uint64_t count);
445 ASSERT0(zap_count(mos,
446 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
447 VERIFY0(dmu_object_free(mos,
448 ds->ds_phys->ds_next_clones_obj, tx));
449 }
450 if (ds->ds_phys->ds_props_obj != 0)
451 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
452 if (ds->ds_phys->ds_userrefs_obj != 0)
453 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
454 dsl_dir_rele(ds->ds_dir, ds);
455 ds->ds_dir = NULL;
456 dmu_object_free_zapified(mos, obj, tx);
457 }
458
459 static void
460 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
461 {
462 dmu_snapshots_destroy_arg_t *dsda = arg;
463 dsl_pool_t *dp = dmu_tx_pool(tx);
464 nvpair_t *pair;
465
466 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
467 pair != NULL;
468 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
469 dsl_dataset_t *ds;
470
471 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
472
473 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
474 dsl_dataset_rele(ds, FTAG);
475 }
476 }
477
478 /*
479 * The semantics of this function are described in the comment above
480 * lzc_destroy_snaps(). To summarize:
481 *
482 * The snapshots must all be in the same pool.
483 *
484 * Snapshots that don't exist will be silently ignored (considered to be
485 * "already deleted").
486 *
487 * On success, all snaps will be destroyed and this will return 0.
488 * On failure, no snaps will be destroyed, the errlist will be filled in,
489 * and this will return an errno.
490 */
491 int
492 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
493 nvlist_t *errlist)
494 {
495 dmu_snapshots_destroy_arg_t dsda;
496 int error;
497 nvpair_t *pair;
498
499 pair = nvlist_next_nvpair(snaps, NULL);
500 if (pair == NULL)
501 return (0);
502
503 dsda.dsda_snaps = snaps;
504 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps,
505 NV_UNIQUE_NAME, KM_PUSHPAGE));
506 dsda.dsda_defer = defer;
507 dsda.dsda_errlist = errlist;
508
509 error = dsl_sync_task(nvpair_name(pair),
510 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
511 &dsda, 0);
512 fnvlist_free(dsda.dsda_successful_snaps);
513
514 return (error);
515 }
516
517 int
518 dsl_destroy_snapshot(const char *name, boolean_t defer)
519 {
520 int error;
521 nvlist_t *nvl;
522 nvlist_t *errlist;
523
524 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
525 VERIFY0(nvlist_alloc(&errlist, NV_UNIQUE_NAME, KM_PUSHPAGE));
526
527 fnvlist_add_boolean(nvl, name);
528 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
529 fnvlist_free(errlist);
530 fnvlist_free(nvl);
531 return (error);
532 }
533
534 struct killarg {
535 dsl_dataset_t *ds;
536 dmu_tx_t *tx;
537 };
538
539 /* ARGSUSED */
540 static int
541 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
542 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
543 {
544 struct killarg *ka = arg;
545 dmu_tx_t *tx = ka->tx;
546
547 if (bp == NULL)
548 return (0);
549
550 if (zb->zb_level == ZB_ZIL_LEVEL) {
551 ASSERT(zilog != NULL);
552 /*
553 * It's a block in the intent log. It has no
554 * accounting, so just free it.
555 */
556 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
557 } else {
558 ASSERT(zilog == NULL);
559 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
560 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
561 }
562
563 return (0);
564 }
565
566 static void
567 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
568 {
569 struct killarg ka;
570
571 /*
572 * Free everything that we point to (that's born after
573 * the previous snapshot, if we are a clone)
574 *
575 * NB: this should be very quick, because we already
576 * freed all the objects in open context.
577 */
578 ka.ds = ds;
579 ka.tx = tx;
580 VERIFY0(traverse_dataset(ds,
581 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
582 kill_blkptr, &ka));
583 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
584 }
585
586 typedef struct dsl_destroy_head_arg {
587 const char *ddha_name;
588 } dsl_destroy_head_arg_t;
589
590 int
591 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
592 {
593 int error;
594 uint64_t count;
595 objset_t *mos;
596
597 if (dsl_dataset_is_snapshot(ds))
598 return (SET_ERROR(EINVAL));
599
600 if (refcount_count(&ds->ds_longholds) != expected_holds)
601 return (SET_ERROR(EBUSY));
602
603 mos = ds->ds_dir->dd_pool->dp_meta_objset;
604
605 /*
606 * Can't delete a head dataset if there are snapshots of it.
607 * (Except if the only snapshots are from the branch we cloned
608 * from.)
609 */
610 if (ds->ds_prev != NULL &&
611 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
612 return (SET_ERROR(EBUSY));
613
614 /*
615 * Can't delete if there are children of this fs.
616 */
617 error = zap_count(mos,
618 ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
619 if (error != 0)
620 return (error);
621 if (count != 0)
622 return (SET_ERROR(EEXIST));
623
624 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
625 ds->ds_prev->ds_phys->ds_num_children == 2 &&
626 ds->ds_prev->ds_userrefs == 0) {
627 /* We need to remove the origin snapshot as well. */
628 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
629 return (SET_ERROR(EBUSY));
630 }
631 return (0);
632 }
633
634 static int
635 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
636 {
637 dsl_destroy_head_arg_t *ddha = arg;
638 dsl_pool_t *dp = dmu_tx_pool(tx);
639 dsl_dataset_t *ds;
640 int error;
641
642 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
643 if (error != 0)
644 return (error);
645
646 error = dsl_destroy_head_check_impl(ds, 0);
647 dsl_dataset_rele(ds, FTAG);
648 return (error);
649 }
650
651 static void
652 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
653 {
654 dsl_dir_t *dd;
655 dsl_pool_t *dp = dmu_tx_pool(tx);
656 objset_t *mos = dp->dp_meta_objset;
657 dd_used_t t;
658
659 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
660
661 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
662
663 ASSERT0(dd->dd_phys->dd_head_dataset_obj);
664
665 /*
666 * Remove our reservation. The impl() routine avoids setting the
667 * actual property, which would require the (already destroyed) ds.
668 */
669 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
670
671 ASSERT0(dd->dd_phys->dd_used_bytes);
672 ASSERT0(dd->dd_phys->dd_reserved);
673 for (t = 0; t < DD_USED_NUM; t++)
674 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
675
676 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
677 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
678 VERIFY0(dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
679 VERIFY0(zap_remove(mos,
680 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
681
682 dsl_dir_rele(dd, FTAG);
683 dmu_object_free_zapified(mos, ddobj, tx);
684 }
685
686 void
687 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
688 {
689 dsl_pool_t *dp = dmu_tx_pool(tx);
690 objset_t *mos = dp->dp_meta_objset;
691 uint64_t obj, ddobj, prevobj = 0;
692 boolean_t rmorigin;
693 objset_t *os;
694
695 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
696 ASSERT(ds->ds_prev == NULL ||
697 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
698 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
699 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
700
701 /* We need to log before removing it from the namespace. */
702 spa_history_log_internal_ds(ds, "destroy", tx, "");
703
704 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
705 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
706 ds->ds_prev->ds_phys->ds_num_children == 2 &&
707 ds->ds_prev->ds_userrefs == 0);
708
709 /* Remove our reservation */
710 if (ds->ds_reserved != 0) {
711 dsl_dataset_set_refreservation_sync_impl(ds,
712 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
713 0, tx);
714 ASSERT0(ds->ds_reserved);
715 }
716
717 dsl_scan_ds_destroyed(ds, tx);
718
719 obj = ds->ds_object;
720
721 if (ds->ds_phys->ds_prev_snap_obj != 0) {
722 /* This is a clone */
723 ASSERT(ds->ds_prev != NULL);
724 ASSERT3U(ds->ds_prev->ds_phys->ds_next_snap_obj, !=, obj);
725 ASSERT0(ds->ds_phys->ds_next_snap_obj);
726
727 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
728 if (ds->ds_prev->ds_phys->ds_next_clones_obj != 0) {
729 dsl_dataset_remove_from_next_clones(ds->ds_prev,
730 obj, tx);
731 }
732
733 ASSERT3U(ds->ds_prev->ds_phys->ds_num_children, >, 1);
734 ds->ds_prev->ds_phys->ds_num_children--;
735 }
736
737 /*
738 * Destroy the deadlist. Unless it's a clone, the
739 * deadlist should be empty. (If it's a clone, it's
740 * safe to ignore the deadlist contents.)
741 */
742 dsl_deadlist_close(&ds->ds_deadlist);
743 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
744 dmu_buf_will_dirty(ds->ds_dbuf, tx);
745 ds->ds_phys->ds_deadlist_obj = 0;
746
747 VERIFY0(dmu_objset_from_ds(ds, &os));
748
749 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
750 old_synchronous_dataset_destroy(ds, tx);
751 } else {
752 /*
753 * Move the bptree into the pool's list of trees to
754 * clean up and update space accounting information.
755 */
756 uint64_t used, comp, uncomp;
757
758 zil_destroy_sync(dmu_objset_zil(os), tx);
759
760 if (!spa_feature_is_active(dp->dp_spa,
761 SPA_FEATURE_ASYNC_DESTROY)) {
762 dsl_scan_t *scn = dp->dp_scan;
763 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
764 tx);
765 dp->dp_bptree_obj = bptree_alloc(mos, tx);
766 VERIFY0(zap_add(mos,
767 DMU_POOL_DIRECTORY_OBJECT,
768 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
769 &dp->dp_bptree_obj, tx));
770 ASSERT(!scn->scn_async_destroying);
771 scn->scn_async_destroying = B_TRUE;
772 }
773
774 used = ds->ds_dir->dd_phys->dd_used_bytes;
775 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
776 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
777
778 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
779 ds->ds_phys->ds_unique_bytes == used);
780
781 bptree_add(mos, dp->dp_bptree_obj,
782 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
783 used, comp, uncomp, tx);
784 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
785 -used, -comp, -uncomp, tx);
786 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
787 used, comp, uncomp, tx);
788 }
789
790 if (ds->ds_prev != NULL) {
791 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
792 VERIFY0(zap_remove_int(mos,
793 ds->ds_prev->ds_dir->dd_phys->dd_clones,
794 ds->ds_object, tx));
795 }
796 prevobj = ds->ds_prev->ds_object;
797 dsl_dataset_rele(ds->ds_prev, ds);
798 ds->ds_prev = NULL;
799 }
800
801 /*
802 * This must be done after the dsl_traverse(), because it will
803 * re-open the objset.
804 */
805 if (ds->ds_objset) {
806 dmu_objset_evict(ds->ds_objset);
807 ds->ds_objset = NULL;
808 }
809
810 /* Erase the link in the dir */
811 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
812 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
813 ddobj = ds->ds_dir->dd_object;
814 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
815 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx));
816
817 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
818
819 ASSERT0(ds->ds_phys->ds_next_clones_obj);
820 ASSERT0(ds->ds_phys->ds_props_obj);
821 ASSERT0(ds->ds_phys->ds_userrefs_obj);
822 dsl_dir_rele(ds->ds_dir, ds);
823 ds->ds_dir = NULL;
824 dmu_object_free_zapified(mos, obj, tx);
825
826 dsl_dir_destroy_sync(ddobj, tx);
827
828 if (rmorigin) {
829 dsl_dataset_t *prev;
830 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
831 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
832 dsl_dataset_rele(prev, FTAG);
833 }
834 }
835
836 static void
837 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
838 {
839 dsl_destroy_head_arg_t *ddha = arg;
840 dsl_pool_t *dp = dmu_tx_pool(tx);
841 dsl_dataset_t *ds;
842
843 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
844 dsl_destroy_head_sync_impl(ds, tx);
845 dsl_dataset_rele(ds, FTAG);
846 }
847
848 static void
849 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
850 {
851 dsl_destroy_head_arg_t *ddha = arg;
852 dsl_pool_t *dp = dmu_tx_pool(tx);
853 dsl_dataset_t *ds;
854
855 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
856
857 /* Mark it as inconsistent on-disk, in case we crash */
858 dmu_buf_will_dirty(ds->ds_dbuf, tx);
859 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
860
861 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
862 dsl_dataset_rele(ds, FTAG);
863 }
864
865 int
866 dsl_destroy_head(const char *name)
867 {
868 dsl_destroy_head_arg_t ddha;
869 int error;
870 spa_t *spa;
871 boolean_t isenabled;
872
873 #ifdef _KERNEL
874 zfs_destroy_unmount_origin(name);
875 #endif
876
877 error = spa_open(name, &spa, FTAG);
878 if (error != 0)
879 return (error);
880 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
881 spa_close(spa, FTAG);
882
883 ddha.ddha_name = name;
884
885 if (!isenabled) {
886 objset_t *os;
887
888 error = dsl_sync_task(name, dsl_destroy_head_check,
889 dsl_destroy_head_begin_sync, &ddha, 0);
890 if (error != 0)
891 return (error);
892
893 /*
894 * Head deletion is processed in one txg on old pools;
895 * remove the objects from open context so that the txg sync
896 * is not too long.
897 */
898 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
899 if (error == 0) {
900 uint64_t obj;
901 uint64_t prev_snap_txg =
902 dmu_objset_ds(os)->ds_phys->ds_prev_snap_txg;
903 for (obj = 0; error == 0;
904 error = dmu_object_next(os, &obj, FALSE,
905 prev_snap_txg))
906 (void) dmu_free_long_object(os, obj);
907 /* sync out all frees */
908 txg_wait_synced(dmu_objset_pool(os), 0);
909 dmu_objset_disown(os, FTAG);
910 }
911 }
912
913 return (dsl_sync_task(name, dsl_destroy_head_check,
914 dsl_destroy_head_sync, &ddha, 0));
915 }
916
917 /*
918 * Note, this function is used as the callback for dmu_objset_find(). We
919 * always return 0 so that we will continue to find and process
920 * inconsistent datasets, even if we encounter an error trying to
921 * process one of them.
922 */
923 /* ARGSUSED */
924 int
925 dsl_destroy_inconsistent(const char *dsname, void *arg)
926 {
927 objset_t *os;
928
929 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
930 boolean_t inconsistent = DS_IS_INCONSISTENT(dmu_objset_ds(os));
931 dmu_objset_rele(os, FTAG);
932 if (inconsistent)
933 (void) dsl_destroy_head(dsname);
934 }
935 return (0);
936 }
937
938
939 #if defined(_KERNEL) && defined(HAVE_SPL)
940 EXPORT_SYMBOL(dsl_destroy_head);
941 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
942 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
943 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
944 EXPORT_SYMBOL(dsl_destroy_inconsistent);
945 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
946 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
947 #endif