]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_destroy.c
Illumos #4047
[mirror_zfs.git] / module / zfs / dsl_destroy.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dsl_userhold.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dmu_traverse.h>
35 #include <sys/dsl_scan.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/zap.h>
38 #include <sys/zfeature.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/dsl_deleg.h>
41
42 typedef struct dmu_snapshots_destroy_arg {
43 nvlist_t *dsda_snaps;
44 nvlist_t *dsda_successful_snaps;
45 boolean_t dsda_defer;
46 nvlist_t *dsda_errlist;
47 } dmu_snapshots_destroy_arg_t;
48
49 int
50 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
51 {
52 if (!dsl_dataset_is_snapshot(ds))
53 return (SET_ERROR(EINVAL));
54
55 if (dsl_dataset_long_held(ds))
56 return (SET_ERROR(EBUSY));
57
58 /*
59 * Only allow deferred destroy on pools that support it.
60 * NOTE: deferred destroy is only supported on snapshots.
61 */
62 if (defer) {
63 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
64 SPA_VERSION_USERREFS)
65 return (SET_ERROR(ENOTSUP));
66 return (0);
67 }
68
69 /*
70 * If this snapshot has an elevated user reference count,
71 * we can't destroy it yet.
72 */
73 if (ds->ds_userrefs > 0)
74 return (SET_ERROR(EBUSY));
75
76 /*
77 * Can't delete a branch point.
78 */
79 if (ds->ds_phys->ds_num_children > 1)
80 return (SET_ERROR(EEXIST));
81
82 return (0);
83 }
84
85 static int
86 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
87 {
88 dmu_snapshots_destroy_arg_t *dsda = arg;
89 dsl_pool_t *dp = dmu_tx_pool(tx);
90 nvpair_t *pair;
91 int error = 0;
92
93 if (!dmu_tx_is_syncing(tx))
94 return (0);
95
96 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
97 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
98 dsl_dataset_t *ds;
99
100 error = dsl_dataset_hold(dp, nvpair_name(pair),
101 FTAG, &ds);
102
103 /*
104 * If the snapshot does not exist, silently ignore it
105 * (it's "already destroyed").
106 */
107 if (error == ENOENT)
108 continue;
109
110 if (error == 0) {
111 error = dsl_destroy_snapshot_check_impl(ds,
112 dsda->dsda_defer);
113 dsl_dataset_rele(ds, FTAG);
114 }
115
116 if (error == 0) {
117 fnvlist_add_boolean(dsda->dsda_successful_snaps,
118 nvpair_name(pair));
119 } else {
120 fnvlist_add_int32(dsda->dsda_errlist,
121 nvpair_name(pair), error);
122 }
123 }
124
125 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
126 if (pair != NULL)
127 return (fnvpair_value_int32(pair));
128
129 return (0);
130 }
131
132 struct process_old_arg {
133 dsl_dataset_t *ds;
134 dsl_dataset_t *ds_prev;
135 boolean_t after_branch_point;
136 zio_t *pio;
137 uint64_t used, comp, uncomp;
138 };
139
140 static int
141 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
142 {
143 struct process_old_arg *poa = arg;
144 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
145
146 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
147 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
148 if (poa->ds_prev && !poa->after_branch_point &&
149 bp->blk_birth >
150 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
151 poa->ds_prev->ds_phys->ds_unique_bytes +=
152 bp_get_dsize_sync(dp->dp_spa, bp);
153 }
154 } else {
155 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
156 poa->comp += BP_GET_PSIZE(bp);
157 poa->uncomp += BP_GET_UCSIZE(bp);
158 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
159 }
160 return (0);
161 }
162
163 static void
164 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
165 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
166 {
167 struct process_old_arg poa = { 0 };
168 dsl_pool_t *dp = ds->ds_dir->dd_pool;
169 objset_t *mos = dp->dp_meta_objset;
170 uint64_t deadlist_obj;
171
172 ASSERT(ds->ds_deadlist.dl_oldfmt);
173 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
174
175 poa.ds = ds;
176 poa.ds_prev = ds_prev;
177 poa.after_branch_point = after_branch_point;
178 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
179 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
180 process_old_cb, &poa, tx));
181 VERIFY0(zio_wait(poa.pio));
182 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
183
184 /* change snapused */
185 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
186 -poa.used, -poa.comp, -poa.uncomp, tx);
187
188 /* swap next's deadlist to our deadlist */
189 dsl_deadlist_close(&ds->ds_deadlist);
190 dsl_deadlist_close(&ds_next->ds_deadlist);
191 deadlist_obj = ds->ds_phys->ds_deadlist_obj;
192 ds->ds_phys->ds_deadlist_obj = ds_next->ds_phys->ds_deadlist_obj;
193 ds_next->ds_phys->ds_deadlist_obj = deadlist_obj;
194 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
195 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
196 ds_next->ds_phys->ds_deadlist_obj);
197 }
198
199 static void
200 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
201 {
202 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
203 zap_cursor_t *zc;
204 zap_attribute_t *za;
205
206 /*
207 * If it is the old version, dd_clones doesn't exist so we can't
208 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
209 * doesn't matter.
210 */
211 if (ds->ds_dir->dd_phys->dd_clones == 0)
212 return;
213
214 zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE);
215 za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
216
217 for (zap_cursor_init(zc, mos, ds->ds_dir->dd_phys->dd_clones);
218 zap_cursor_retrieve(zc, za) == 0;
219 zap_cursor_advance(zc)) {
220 dsl_dataset_t *clone;
221
222 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
223 za->za_first_integer, FTAG, &clone));
224 if (clone->ds_dir->dd_origin_txg > mintxg) {
225 dsl_deadlist_remove_key(&clone->ds_deadlist,
226 mintxg, tx);
227 dsl_dataset_remove_clones_key(clone, mintxg, tx);
228 }
229 dsl_dataset_rele(clone, FTAG);
230 }
231 zap_cursor_fini(zc);
232
233 kmem_free(za, sizeof (zap_attribute_t));
234 kmem_free(zc, sizeof (zap_cursor_t));
235 }
236
237 void
238 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
239 {
240 #ifdef ZFS_DEBUG
241 int err;
242 #endif
243 int after_branch_point = FALSE;
244 dsl_pool_t *dp = ds->ds_dir->dd_pool;
245 objset_t *mos = dp->dp_meta_objset;
246 dsl_dataset_t *ds_prev = NULL;
247 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
248 dsl_dataset_t *ds_next, *ds_head, *hds;
249
250
251 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
252 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
253 ASSERT(refcount_is_zero(&ds->ds_longholds));
254
255 if (defer &&
256 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)) {
257 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
258 dmu_buf_will_dirty(ds->ds_dbuf, tx);
259 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
260 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
261 return;
262 }
263
264 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
265
266 /* We need to log before removing it from the namespace. */
267 spa_history_log_internal_ds(ds, "destroy", tx, "");
268
269 dsl_scan_ds_destroyed(ds, tx);
270
271 obj = ds->ds_object;
272
273 if (ds->ds_phys->ds_prev_snap_obj != 0) {
274 ASSERT3P(ds->ds_prev, ==, NULL);
275 VERIFY0(dsl_dataset_hold_obj(dp,
276 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
277 after_branch_point =
278 (ds_prev->ds_phys->ds_next_snap_obj != obj);
279
280 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
281 if (after_branch_point &&
282 ds_prev->ds_phys->ds_next_clones_obj != 0) {
283 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
284 if (ds->ds_phys->ds_next_snap_obj != 0) {
285 VERIFY0(zap_add_int(mos,
286 ds_prev->ds_phys->ds_next_clones_obj,
287 ds->ds_phys->ds_next_snap_obj, tx));
288 }
289 }
290 if (!after_branch_point) {
291 ds_prev->ds_phys->ds_next_snap_obj =
292 ds->ds_phys->ds_next_snap_obj;
293 }
294 }
295
296 VERIFY0(dsl_dataset_hold_obj(dp,
297 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
298 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
299
300 old_unique = ds_next->ds_phys->ds_unique_bytes;
301
302 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
303 ds_next->ds_phys->ds_prev_snap_obj =
304 ds->ds_phys->ds_prev_snap_obj;
305 ds_next->ds_phys->ds_prev_snap_txg =
306 ds->ds_phys->ds_prev_snap_txg;
307 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
308 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
309
310 if (ds_next->ds_deadlist.dl_oldfmt) {
311 process_old_deadlist(ds, ds_prev, ds_next,
312 after_branch_point, tx);
313 } else {
314 /* Adjust prev's unique space. */
315 if (ds_prev && !after_branch_point) {
316 dsl_deadlist_space_range(&ds_next->ds_deadlist,
317 ds_prev->ds_phys->ds_prev_snap_txg,
318 ds->ds_phys->ds_prev_snap_txg,
319 &used, &comp, &uncomp);
320 ds_prev->ds_phys->ds_unique_bytes += used;
321 }
322
323 /* Adjust snapused. */
324 dsl_deadlist_space_range(&ds_next->ds_deadlist,
325 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
326 &used, &comp, &uncomp);
327 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
328 -used, -comp, -uncomp, tx);
329
330 /* Move blocks to be freed to pool's free list. */
331 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
332 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
333 tx);
334 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
335 DD_USED_HEAD, used, comp, uncomp, tx);
336
337 /* Merge our deadlist into next's and free it. */
338 dsl_deadlist_merge(&ds_next->ds_deadlist,
339 ds->ds_phys->ds_deadlist_obj, tx);
340 }
341 dsl_deadlist_close(&ds->ds_deadlist);
342 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
343 dmu_buf_will_dirty(ds->ds_dbuf, tx);
344 ds->ds_phys->ds_deadlist_obj = 0;
345
346 /* Collapse range in clone heads */
347 dsl_dataset_remove_clones_key(ds,
348 ds->ds_phys->ds_creation_txg, tx);
349
350 if (dsl_dataset_is_snapshot(ds_next)) {
351 dsl_dataset_t *ds_nextnext;
352
353 /*
354 * Update next's unique to include blocks which
355 * were previously shared by only this snapshot
356 * and it. Those blocks will be born after the
357 * prev snap and before this snap, and will have
358 * died after the next snap and before the one
359 * after that (ie. be on the snap after next's
360 * deadlist).
361 */
362 VERIFY0(dsl_dataset_hold_obj(dp,
363 ds_next->ds_phys->ds_next_snap_obj, FTAG, &ds_nextnext));
364 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
365 ds->ds_phys->ds_prev_snap_txg,
366 ds->ds_phys->ds_creation_txg,
367 &used, &comp, &uncomp);
368 ds_next->ds_phys->ds_unique_bytes += used;
369 dsl_dataset_rele(ds_nextnext, FTAG);
370 ASSERT3P(ds_next->ds_prev, ==, NULL);
371
372 /* Collapse range in this head. */
373 VERIFY0(dsl_dataset_hold_obj(dp,
374 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds));
375 dsl_deadlist_remove_key(&hds->ds_deadlist,
376 ds->ds_phys->ds_creation_txg, tx);
377 dsl_dataset_rele(hds, FTAG);
378
379 } else {
380 ASSERT3P(ds_next->ds_prev, ==, ds);
381 dsl_dataset_rele(ds_next->ds_prev, ds_next);
382 ds_next->ds_prev = NULL;
383 if (ds_prev) {
384 VERIFY0(dsl_dataset_hold_obj(dp,
385 ds->ds_phys->ds_prev_snap_obj,
386 ds_next, &ds_next->ds_prev));
387 }
388
389 dsl_dataset_recalc_head_uniq(ds_next);
390
391 /*
392 * Reduce the amount of our unconsumed refreservation
393 * being charged to our parent by the amount of
394 * new unique data we have gained.
395 */
396 if (old_unique < ds_next->ds_reserved) {
397 int64_t mrsdelta;
398 uint64_t new_unique =
399 ds_next->ds_phys->ds_unique_bytes;
400
401 ASSERT(old_unique <= new_unique);
402 mrsdelta = MIN(new_unique - old_unique,
403 ds_next->ds_reserved - old_unique);
404 dsl_dir_diduse_space(ds->ds_dir,
405 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
406 }
407 }
408 dsl_dataset_rele(ds_next, FTAG);
409
410 /*
411 * This must be done after the dsl_traverse(), because it will
412 * re-open the objset.
413 */
414 if (ds->ds_objset) {
415 dmu_objset_evict(ds->ds_objset);
416 ds->ds_objset = NULL;
417 }
418
419 /* remove from snapshot namespace */
420 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
421 VERIFY0(dsl_dataset_hold_obj(dp,
422 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
423 VERIFY0(dsl_dataset_get_snapname(ds));
424 #ifdef ZFS_DEBUG
425 {
426 uint64_t val;
427
428 err = dsl_dataset_snap_lookup(ds_head,
429 ds->ds_snapname, &val);
430 ASSERT0(err);
431 ASSERT3U(val, ==, obj);
432 }
433 #endif
434 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx));
435 dsl_dataset_rele(ds_head, FTAG);
436
437 if (ds_prev != NULL)
438 dsl_dataset_rele(ds_prev, FTAG);
439
440 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
441
442 if (ds->ds_phys->ds_next_clones_obj != 0) {
443 ASSERTV(uint64_t count);
444 ASSERT0(zap_count(mos,
445 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
446 VERIFY0(dmu_object_free(mos,
447 ds->ds_phys->ds_next_clones_obj, tx));
448 }
449 if (ds->ds_phys->ds_props_obj != 0)
450 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
451 if (ds->ds_phys->ds_userrefs_obj != 0)
452 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
453 dsl_dir_rele(ds->ds_dir, ds);
454 ds->ds_dir = NULL;
455 VERIFY0(dmu_object_free(mos, obj, tx));
456 }
457
458 static void
459 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
460 {
461 dmu_snapshots_destroy_arg_t *dsda = arg;
462 dsl_pool_t *dp = dmu_tx_pool(tx);
463 nvpair_t *pair;
464
465 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
466 pair != NULL;
467 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
468 dsl_dataset_t *ds;
469
470 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
471
472 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
473 dsl_dataset_rele(ds, FTAG);
474 }
475 }
476
477 /*
478 * The semantics of this function are described in the comment above
479 * lzc_destroy_snaps(). To summarize:
480 *
481 * The snapshots must all be in the same pool.
482 *
483 * Snapshots that don't exist will be silently ignored (considered to be
484 * "already deleted").
485 *
486 * On success, all snaps will be destroyed and this will return 0.
487 * On failure, no snaps will be destroyed, the errlist will be filled in,
488 * and this will return an errno.
489 */
490 int
491 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
492 nvlist_t *errlist)
493 {
494 dmu_snapshots_destroy_arg_t dsda;
495 int error;
496 nvpair_t *pair;
497
498 pair = nvlist_next_nvpair(snaps, NULL);
499 if (pair == NULL)
500 return (0);
501
502 dsda.dsda_snaps = snaps;
503 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps, NV_UNIQUE_NAME, KM_PUSHPAGE));
504 dsda.dsda_defer = defer;
505 dsda.dsda_errlist = errlist;
506
507 error = dsl_sync_task(nvpair_name(pair),
508 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
509 &dsda, 0);
510 fnvlist_free(dsda.dsda_successful_snaps);
511
512 return (error);
513 }
514
515 int
516 dsl_destroy_snapshot(const char *name, boolean_t defer)
517 {
518 int error;
519 nvlist_t *nvl;
520 nvlist_t *errlist;
521
522 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
523 VERIFY0(nvlist_alloc(&errlist, NV_UNIQUE_NAME, KM_PUSHPAGE));
524
525 fnvlist_add_boolean(nvl, name);
526 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
527 fnvlist_free(errlist);
528 fnvlist_free(nvl);
529 return (error);
530 }
531
532 struct killarg {
533 dsl_dataset_t *ds;
534 dmu_tx_t *tx;
535 };
536
537 /* ARGSUSED */
538 static int
539 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
540 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
541 {
542 struct killarg *ka = arg;
543 dmu_tx_t *tx = ka->tx;
544
545 if (bp == NULL)
546 return (0);
547
548 if (zb->zb_level == ZB_ZIL_LEVEL) {
549 ASSERT(zilog != NULL);
550 /*
551 * It's a block in the intent log. It has no
552 * accounting, so just free it.
553 */
554 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
555 } else {
556 ASSERT(zilog == NULL);
557 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
558 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
559 }
560
561 return (0);
562 }
563
564 static void
565 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
566 {
567 struct killarg ka;
568
569 /*
570 * Free everything that we point to (that's born after
571 * the previous snapshot, if we are a clone)
572 *
573 * NB: this should be very quick, because we already
574 * freed all the objects in open context.
575 */
576 ka.ds = ds;
577 ka.tx = tx;
578 VERIFY0(traverse_dataset(ds,
579 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
580 kill_blkptr, &ka));
581 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
582 }
583
584 typedef struct dsl_destroy_head_arg {
585 const char *ddha_name;
586 } dsl_destroy_head_arg_t;
587
588 int
589 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
590 {
591 int error;
592 uint64_t count;
593 objset_t *mos;
594
595 if (dsl_dataset_is_snapshot(ds))
596 return (SET_ERROR(EINVAL));
597
598 if (refcount_count(&ds->ds_longholds) != expected_holds)
599 return (SET_ERROR(EBUSY));
600
601 mos = ds->ds_dir->dd_pool->dp_meta_objset;
602
603 /*
604 * Can't delete a head dataset if there are snapshots of it.
605 * (Except if the only snapshots are from the branch we cloned
606 * from.)
607 */
608 if (ds->ds_prev != NULL &&
609 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
610 return (SET_ERROR(EBUSY));
611
612 /*
613 * Can't delete if there are children of this fs.
614 */
615 error = zap_count(mos,
616 ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
617 if (error != 0)
618 return (error);
619 if (count != 0)
620 return (SET_ERROR(EEXIST));
621
622 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
623 ds->ds_prev->ds_phys->ds_num_children == 2 &&
624 ds->ds_prev->ds_userrefs == 0) {
625 /* We need to remove the origin snapshot as well. */
626 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
627 return (SET_ERROR(EBUSY));
628 }
629 return (0);
630 }
631
632 static int
633 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
634 {
635 dsl_destroy_head_arg_t *ddha = arg;
636 dsl_pool_t *dp = dmu_tx_pool(tx);
637 dsl_dataset_t *ds;
638 int error;
639
640 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
641 if (error != 0)
642 return (error);
643
644 error = dsl_destroy_head_check_impl(ds, 0);
645 dsl_dataset_rele(ds, FTAG);
646 return (error);
647 }
648
649 static void
650 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
651 {
652 dsl_dir_t *dd;
653 dsl_pool_t *dp = dmu_tx_pool(tx);
654 objset_t *mos = dp->dp_meta_objset;
655 dd_used_t t;
656
657 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
658
659 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
660
661 ASSERT0(dd->dd_phys->dd_head_dataset_obj);
662
663 /*
664 * Remove our reservation. The impl() routine avoids setting the
665 * actual property, which would require the (already destroyed) ds.
666 */
667 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
668
669 ASSERT0(dd->dd_phys->dd_used_bytes);
670 ASSERT0(dd->dd_phys->dd_reserved);
671 for (t = 0; t < DD_USED_NUM; t++)
672 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
673
674 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
675 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
676 VERIFY0(dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
677 VERIFY0(zap_remove(mos,
678 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
679
680 dsl_dir_rele(dd, FTAG);
681 VERIFY0(dmu_object_free(mos, ddobj, tx));
682 }
683
684 void
685 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
686 {
687 dsl_pool_t *dp = dmu_tx_pool(tx);
688 objset_t *mos = dp->dp_meta_objset;
689 uint64_t obj, ddobj, prevobj = 0;
690 boolean_t rmorigin;
691 zfeature_info_t *async_destroy;
692 objset_t *os;
693
694 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
695 ASSERT(ds->ds_prev == NULL ||
696 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
697 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
698 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
699
700 /* We need to log before removing it from the namespace. */
701 spa_history_log_internal_ds(ds, "destroy", tx, "");
702
703 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
704 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
705 ds->ds_prev->ds_phys->ds_num_children == 2 &&
706 ds->ds_prev->ds_userrefs == 0);
707
708 /* Remove our reservation */
709 if (ds->ds_reserved != 0) {
710 dsl_dataset_set_refreservation_sync_impl(ds,
711 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
712 0, tx);
713 ASSERT0(ds->ds_reserved);
714 }
715
716 dsl_scan_ds_destroyed(ds, tx);
717
718 obj = ds->ds_object;
719
720 if (ds->ds_phys->ds_prev_snap_obj != 0) {
721 /* This is a clone */
722 ASSERT(ds->ds_prev != NULL);
723 ASSERT3U(ds->ds_prev->ds_phys->ds_next_snap_obj, !=, obj);
724 ASSERT0(ds->ds_phys->ds_next_snap_obj);
725
726 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
727 if (ds->ds_prev->ds_phys->ds_next_clones_obj != 0) {
728 dsl_dataset_remove_from_next_clones(ds->ds_prev,
729 obj, tx);
730 }
731
732 ASSERT3U(ds->ds_prev->ds_phys->ds_num_children, >, 1);
733 ds->ds_prev->ds_phys->ds_num_children--;
734 }
735
736 async_destroy =
737 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
738
739 /*
740 * Destroy the deadlist. Unless it's a clone, the
741 * deadlist should be empty. (If it's a clone, it's
742 * safe to ignore the deadlist contents.)
743 */
744 dsl_deadlist_close(&ds->ds_deadlist);
745 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
746 dmu_buf_will_dirty(ds->ds_dbuf, tx);
747 ds->ds_phys->ds_deadlist_obj = 0;
748
749 VERIFY0(dmu_objset_from_ds(ds, &os));
750
751 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
752 old_synchronous_dataset_destroy(ds, tx);
753 } else {
754 /*
755 * Move the bptree into the pool's list of trees to
756 * clean up and update space accounting information.
757 */
758 uint64_t used, comp, uncomp;
759
760 zil_destroy_sync(dmu_objset_zil(os), tx);
761
762 if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
763 dsl_scan_t *scn = dp->dp_scan;
764
765 spa_feature_incr(dp->dp_spa, async_destroy, tx);
766 dp->dp_bptree_obj = bptree_alloc(mos, tx);
767 VERIFY0(zap_add(mos,
768 DMU_POOL_DIRECTORY_OBJECT,
769 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
770 &dp->dp_bptree_obj, tx));
771 ASSERT(!scn->scn_async_destroying);
772 scn->scn_async_destroying = B_TRUE;
773 }
774
775 used = ds->ds_dir->dd_phys->dd_used_bytes;
776 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
777 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
778
779 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
780 ds->ds_phys->ds_unique_bytes == used);
781
782 bptree_add(mos, dp->dp_bptree_obj,
783 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
784 used, comp, uncomp, tx);
785 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
786 -used, -comp, -uncomp, tx);
787 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
788 used, comp, uncomp, tx);
789 }
790
791 if (ds->ds_prev != NULL) {
792 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
793 VERIFY0(zap_remove_int(mos,
794 ds->ds_prev->ds_dir->dd_phys->dd_clones,
795 ds->ds_object, tx));
796 }
797 prevobj = ds->ds_prev->ds_object;
798 dsl_dataset_rele(ds->ds_prev, ds);
799 ds->ds_prev = NULL;
800 }
801
802 /*
803 * This must be done after the dsl_traverse(), because it will
804 * re-open the objset.
805 */
806 if (ds->ds_objset) {
807 dmu_objset_evict(ds->ds_objset);
808 ds->ds_objset = NULL;
809 }
810
811 /* Erase the link in the dir */
812 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
813 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
814 ddobj = ds->ds_dir->dd_object;
815 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
816 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx));
817
818 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
819
820 ASSERT0(ds->ds_phys->ds_next_clones_obj);
821 ASSERT0(ds->ds_phys->ds_props_obj);
822 ASSERT0(ds->ds_phys->ds_userrefs_obj);
823 dsl_dir_rele(ds->ds_dir, ds);
824 ds->ds_dir = NULL;
825 VERIFY0(dmu_object_free(mos, obj, tx));
826
827 dsl_dir_destroy_sync(ddobj, tx);
828
829 if (rmorigin) {
830 dsl_dataset_t *prev;
831 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
832 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
833 dsl_dataset_rele(prev, FTAG);
834 }
835 }
836
837 static void
838 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
839 {
840 dsl_destroy_head_arg_t *ddha = arg;
841 dsl_pool_t *dp = dmu_tx_pool(tx);
842 dsl_dataset_t *ds;
843
844 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
845 dsl_destroy_head_sync_impl(ds, tx);
846 dsl_dataset_rele(ds, FTAG);
847 }
848
849 static void
850 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
851 {
852 dsl_destroy_head_arg_t *ddha = arg;
853 dsl_pool_t *dp = dmu_tx_pool(tx);
854 dsl_dataset_t *ds;
855
856 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
857
858 /* Mark it as inconsistent on-disk, in case we crash */
859 dmu_buf_will_dirty(ds->ds_dbuf, tx);
860 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
861
862 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
863 dsl_dataset_rele(ds, FTAG);
864 }
865
866 int
867 dsl_destroy_head(const char *name)
868 {
869 dsl_destroy_head_arg_t ddha;
870 int error;
871 spa_t *spa;
872 boolean_t isenabled;
873
874 #ifdef _KERNEL
875 zfs_destroy_unmount_origin(name);
876 #endif
877
878 error = spa_open(name, &spa, FTAG);
879 if (error != 0)
880 return (error);
881 isenabled = spa_feature_is_enabled(spa,
882 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]);
883 spa_close(spa, FTAG);
884
885 ddha.ddha_name = name;
886
887 if (!isenabled) {
888 objset_t *os;
889
890 error = dsl_sync_task(name, dsl_destroy_head_check,
891 dsl_destroy_head_begin_sync, &ddha, 0);
892 if (error != 0)
893 return (error);
894
895 /*
896 * Head deletion is processed in one txg on old pools;
897 * remove the objects from open context so that the txg sync
898 * is not too long.
899 */
900 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
901 if (error == 0) {
902 uint64_t obj;
903 uint64_t prev_snap_txg =
904 dmu_objset_ds(os)->ds_phys->ds_prev_snap_txg;
905 for (obj = 0; error == 0;
906 error = dmu_object_next(os, &obj, FALSE,
907 prev_snap_txg))
908 (void) dmu_free_long_object(os, obj);
909 /* sync out all frees */
910 txg_wait_synced(dmu_objset_pool(os), 0);
911 dmu_objset_disown(os, FTAG);
912 }
913 }
914
915 return (dsl_sync_task(name, dsl_destroy_head_check,
916 dsl_destroy_head_sync, &ddha, 0));
917 }
918
919 /*
920 * Note, this function is used as the callback for dmu_objset_find(). We
921 * always return 0 so that we will continue to find and process
922 * inconsistent datasets, even if we encounter an error trying to
923 * process one of them.
924 */
925 /* ARGSUSED */
926 int
927 dsl_destroy_inconsistent(const char *dsname, void *arg)
928 {
929 objset_t *os;
930
931 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
932 boolean_t inconsistent = DS_IS_INCONSISTENT(dmu_objset_ds(os));
933 dmu_objset_rele(os, FTAG);
934 if (inconsistent)
935 (void) dsl_destroy_head(dsname);
936 }
937 return (0);
938 }
939
940
941 #if defined(_KERNEL) && defined(HAVE_SPL)
942 EXPORT_SYMBOL(dsl_destroy_head);
943 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
944 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
945 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
946 EXPORT_SYMBOL(dsl_destroy_inconsistent);
947 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
948 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
949 #endif