]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_destroy.c
Illumos #3598
[mirror_zfs.git] / module / zfs / dsl_destroy.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dsl_userhold.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dmu_traverse.h>
34 #include <sys/dsl_scan.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/zap.h>
37 #include <sys/zfeature.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/dsl_deleg.h>
40
41 typedef struct dmu_snapshots_destroy_arg {
42 nvlist_t *dsda_snaps;
43 nvlist_t *dsda_successful_snaps;
44 boolean_t dsda_defer;
45 nvlist_t *dsda_errlist;
46 } dmu_snapshots_destroy_arg_t;
47
48 /*
49 * ds must be owned.
50 */
51 static int
52 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
53 {
54 if (!dsl_dataset_is_snapshot(ds))
55 return (SET_ERROR(EINVAL));
56
57 if (dsl_dataset_long_held(ds))
58 return (SET_ERROR(EBUSY));
59
60 /*
61 * Only allow deferred destroy on pools that support it.
62 * NOTE: deferred destroy is only supported on snapshots.
63 */
64 if (defer) {
65 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
66 SPA_VERSION_USERREFS)
67 return (SET_ERROR(ENOTSUP));
68 return (0);
69 }
70
71 /*
72 * If this snapshot has an elevated user reference count,
73 * we can't destroy it yet.
74 */
75 if (ds->ds_userrefs > 0)
76 return (SET_ERROR(EBUSY));
77
78 /*
79 * Can't delete a branch point.
80 */
81 if (ds->ds_phys->ds_num_children > 1)
82 return (SET_ERROR(EEXIST));
83
84 return (0);
85 }
86
87 static int
88 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
89 {
90 dmu_snapshots_destroy_arg_t *dsda = arg;
91 dsl_pool_t *dp = dmu_tx_pool(tx);
92 nvpair_t *pair;
93 int error = 0;
94
95 if (!dmu_tx_is_syncing(tx))
96 return (0);
97
98 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
99 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
100 dsl_dataset_t *ds;
101
102 error = dsl_dataset_hold(dp, nvpair_name(pair),
103 FTAG, &ds);
104
105 /*
106 * If the snapshot does not exist, silently ignore it
107 * (it's "already destroyed").
108 */
109 if (error == ENOENT)
110 continue;
111
112 if (error == 0) {
113 error = dsl_destroy_snapshot_check_impl(ds,
114 dsda->dsda_defer);
115 dsl_dataset_rele(ds, FTAG);
116 }
117
118 if (error == 0) {
119 fnvlist_add_boolean(dsda->dsda_successful_snaps,
120 nvpair_name(pair));
121 } else {
122 fnvlist_add_int32(dsda->dsda_errlist,
123 nvpair_name(pair), error);
124 }
125 }
126
127 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
128 if (pair != NULL)
129 return (fnvpair_value_int32(pair));
130 return (0);
131 }
132
133 struct process_old_arg {
134 dsl_dataset_t *ds;
135 dsl_dataset_t *ds_prev;
136 boolean_t after_branch_point;
137 zio_t *pio;
138 uint64_t used, comp, uncomp;
139 };
140
141 static int
142 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
143 {
144 struct process_old_arg *poa = arg;
145 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
146
147 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
148 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
149 if (poa->ds_prev && !poa->after_branch_point &&
150 bp->blk_birth >
151 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
152 poa->ds_prev->ds_phys->ds_unique_bytes +=
153 bp_get_dsize_sync(dp->dp_spa, bp);
154 }
155 } else {
156 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
157 poa->comp += BP_GET_PSIZE(bp);
158 poa->uncomp += BP_GET_UCSIZE(bp);
159 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
160 }
161 return (0);
162 }
163
164 static void
165 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
166 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
167 {
168 struct process_old_arg poa = { 0 };
169 dsl_pool_t *dp = ds->ds_dir->dd_pool;
170 objset_t *mos = dp->dp_meta_objset;
171 uint64_t deadlist_obj;
172
173 ASSERT(ds->ds_deadlist.dl_oldfmt);
174 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
175
176 poa.ds = ds;
177 poa.ds_prev = ds_prev;
178 poa.after_branch_point = after_branch_point;
179 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
180 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
181 process_old_cb, &poa, tx));
182 VERIFY0(zio_wait(poa.pio));
183 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
184
185 /* change snapused */
186 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
187 -poa.used, -poa.comp, -poa.uncomp, tx);
188
189 /* swap next's deadlist to our deadlist */
190 dsl_deadlist_close(&ds->ds_deadlist);
191 dsl_deadlist_close(&ds_next->ds_deadlist);
192 deadlist_obj = ds->ds_phys->ds_deadlist_obj;
193 ds->ds_phys->ds_deadlist_obj = ds_next->ds_phys->ds_deadlist_obj;
194 ds_next->ds_phys->ds_deadlist_obj = deadlist_obj;
195 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
196 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
197 ds_next->ds_phys->ds_deadlist_obj);
198 }
199
200 static void
201 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
202 {
203 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
204 zap_cursor_t *zc;
205 zap_attribute_t *za;
206
207 /*
208 * If it is the old version, dd_clones doesn't exist so we can't
209 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
210 * doesn't matter.
211 */
212 if (ds->ds_dir->dd_phys->dd_clones == 0)
213 return;
214
215 zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE);
216 za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
217
218 for (zap_cursor_init(zc, mos, ds->ds_dir->dd_phys->dd_clones);
219 zap_cursor_retrieve(zc, za) == 0;
220 zap_cursor_advance(zc)) {
221 dsl_dataset_t *clone;
222
223 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
224 za->za_first_integer, FTAG, &clone));
225 if (clone->ds_dir->dd_origin_txg > mintxg) {
226 dsl_deadlist_remove_key(&clone->ds_deadlist,
227 mintxg, tx);
228 dsl_dataset_remove_clones_key(clone, mintxg, tx);
229 }
230 dsl_dataset_rele(clone, FTAG);
231 }
232 zap_cursor_fini(zc);
233
234 kmem_free(za, sizeof (zap_attribute_t));
235 kmem_free(zc, sizeof (zap_cursor_t));
236 }
237
238 void
239 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
240 {
241 #ifdef ZFS_DEBUG
242 int err;
243 #endif
244 int after_branch_point = FALSE;
245 dsl_pool_t *dp = ds->ds_dir->dd_pool;
246 objset_t *mos = dp->dp_meta_objset;
247 dsl_dataset_t *ds_prev = NULL;
248 uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
249 dsl_dataset_t *ds_next, *ds_head, *hds;
250
251
252 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
253 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
254 ASSERT(refcount_is_zero(&ds->ds_longholds));
255
256 if (defer &&
257 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)) {
258 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
259 dmu_buf_will_dirty(ds->ds_dbuf, tx);
260 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
261 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
262 return;
263 }
264
265 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
266
267 /* We need to log before removing it from the namespace. */
268 spa_history_log_internal_ds(ds, "destroy", tx, "");
269
270 dsl_scan_ds_destroyed(ds, tx);
271
272 obj = ds->ds_object;
273
274 if (ds->ds_phys->ds_prev_snap_obj != 0) {
275 ASSERT3P(ds->ds_prev, ==, NULL);
276 VERIFY0(dsl_dataset_hold_obj(dp,
277 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
278 after_branch_point =
279 (ds_prev->ds_phys->ds_next_snap_obj != obj);
280
281 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
282 if (after_branch_point &&
283 ds_prev->ds_phys->ds_next_clones_obj != 0) {
284 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
285 if (ds->ds_phys->ds_next_snap_obj != 0) {
286 VERIFY0(zap_add_int(mos,
287 ds_prev->ds_phys->ds_next_clones_obj,
288 ds->ds_phys->ds_next_snap_obj, tx));
289 }
290 }
291 if (!after_branch_point) {
292 ds_prev->ds_phys->ds_next_snap_obj =
293 ds->ds_phys->ds_next_snap_obj;
294 }
295 }
296
297 VERIFY0(dsl_dataset_hold_obj(dp,
298 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
299 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
300
301 old_unique = ds_next->ds_phys->ds_unique_bytes;
302
303 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
304 ds_next->ds_phys->ds_prev_snap_obj =
305 ds->ds_phys->ds_prev_snap_obj;
306 ds_next->ds_phys->ds_prev_snap_txg =
307 ds->ds_phys->ds_prev_snap_txg;
308 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
309 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
310
311 if (ds_next->ds_deadlist.dl_oldfmt) {
312 process_old_deadlist(ds, ds_prev, ds_next,
313 after_branch_point, tx);
314 } else {
315 /* Adjust prev's unique space. */
316 if (ds_prev && !after_branch_point) {
317 dsl_deadlist_space_range(&ds_next->ds_deadlist,
318 ds_prev->ds_phys->ds_prev_snap_txg,
319 ds->ds_phys->ds_prev_snap_txg,
320 &used, &comp, &uncomp);
321 ds_prev->ds_phys->ds_unique_bytes += used;
322 }
323
324 /* Adjust snapused. */
325 dsl_deadlist_space_range(&ds_next->ds_deadlist,
326 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
327 &used, &comp, &uncomp);
328 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
329 -used, -comp, -uncomp, tx);
330
331 /* Move blocks to be freed to pool's free list. */
332 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
333 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
334 tx);
335 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
336 DD_USED_HEAD, used, comp, uncomp, tx);
337
338 /* Merge our deadlist into next's and free it. */
339 dsl_deadlist_merge(&ds_next->ds_deadlist,
340 ds->ds_phys->ds_deadlist_obj, tx);
341 }
342 dsl_deadlist_close(&ds->ds_deadlist);
343 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
344 dmu_buf_will_dirty(ds->ds_dbuf, tx);
345 ds->ds_phys->ds_deadlist_obj = 0;
346
347 /* Collapse range in clone heads */
348 dsl_dataset_remove_clones_key(ds,
349 ds->ds_phys->ds_creation_txg, tx);
350
351 if (dsl_dataset_is_snapshot(ds_next)) {
352 dsl_dataset_t *ds_nextnext;
353
354 /*
355 * Update next's unique to include blocks which
356 * were previously shared by only this snapshot
357 * and it. Those blocks will be born after the
358 * prev snap and before this snap, and will have
359 * died after the next snap and before the one
360 * after that (ie. be on the snap after next's
361 * deadlist).
362 */
363 VERIFY0(dsl_dataset_hold_obj(dp,
364 ds_next->ds_phys->ds_next_snap_obj, FTAG, &ds_nextnext));
365 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
366 ds->ds_phys->ds_prev_snap_txg,
367 ds->ds_phys->ds_creation_txg,
368 &used, &comp, &uncomp);
369 ds_next->ds_phys->ds_unique_bytes += used;
370 dsl_dataset_rele(ds_nextnext, FTAG);
371 ASSERT3P(ds_next->ds_prev, ==, NULL);
372
373 /* Collapse range in this head. */
374 VERIFY0(dsl_dataset_hold_obj(dp,
375 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds));
376 dsl_deadlist_remove_key(&hds->ds_deadlist,
377 ds->ds_phys->ds_creation_txg, tx);
378 dsl_dataset_rele(hds, FTAG);
379
380 } else {
381 ASSERT3P(ds_next->ds_prev, ==, ds);
382 dsl_dataset_rele(ds_next->ds_prev, ds_next);
383 ds_next->ds_prev = NULL;
384 if (ds_prev) {
385 VERIFY0(dsl_dataset_hold_obj(dp,
386 ds->ds_phys->ds_prev_snap_obj,
387 ds_next, &ds_next->ds_prev));
388 }
389
390 dsl_dataset_recalc_head_uniq(ds_next);
391
392 /*
393 * Reduce the amount of our unconsumed refreservation
394 * being charged to our parent by the amount of
395 * new unique data we have gained.
396 */
397 if (old_unique < ds_next->ds_reserved) {
398 int64_t mrsdelta;
399 uint64_t new_unique =
400 ds_next->ds_phys->ds_unique_bytes;
401
402 ASSERT(old_unique <= new_unique);
403 mrsdelta = MIN(new_unique - old_unique,
404 ds_next->ds_reserved - old_unique);
405 dsl_dir_diduse_space(ds->ds_dir,
406 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
407 }
408 }
409 dsl_dataset_rele(ds_next, FTAG);
410
411 /*
412 * This must be done after the dsl_traverse(), because it will
413 * re-open the objset.
414 */
415 if (ds->ds_objset) {
416 dmu_objset_evict(ds->ds_objset);
417 ds->ds_objset = NULL;
418 }
419
420 /* remove from snapshot namespace */
421 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
422 VERIFY0(dsl_dataset_hold_obj(dp,
423 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
424 VERIFY0(dsl_dataset_get_snapname(ds));
425 #ifdef ZFS_DEBUG
426 {
427 uint64_t val;
428
429 err = dsl_dataset_snap_lookup(ds_head,
430 ds->ds_snapname, &val);
431 ASSERT0(err);
432 ASSERT3U(val, ==, obj);
433 }
434 #endif
435 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx));
436 dsl_dataset_rele(ds_head, FTAG);
437
438 if (ds_prev != NULL)
439 dsl_dataset_rele(ds_prev, FTAG);
440
441 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
442
443 if (ds->ds_phys->ds_next_clones_obj != 0) {
444 ASSERTV(uint64_t count);
445 ASSERT0(zap_count(mos,
446 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
447 VERIFY0(dmu_object_free(mos,
448 ds->ds_phys->ds_next_clones_obj, tx));
449 }
450 if (ds->ds_phys->ds_props_obj != 0)
451 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
452 if (ds->ds_phys->ds_userrefs_obj != 0)
453 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
454 dsl_dir_rele(ds->ds_dir, ds);
455 ds->ds_dir = NULL;
456 VERIFY0(dmu_object_free(mos, obj, tx));
457 }
458
459 static void
460 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
461 {
462 dmu_snapshots_destroy_arg_t *dsda = arg;
463 dsl_pool_t *dp = dmu_tx_pool(tx);
464 nvpair_t *pair;
465
466 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
467 pair != NULL;
468 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
469 dsl_dataset_t *ds;
470
471 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
472
473 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
474 dsl_dataset_rele(ds, FTAG);
475 }
476 }
477
478 /*
479 * The semantics of this function are described in the comment above
480 * lzc_destroy_snaps(). To summarize:
481 *
482 * The snapshots must all be in the same pool.
483 *
484 * Snapshots that don't exist will be silently ignored (considered to be
485 * "already deleted").
486 *
487 * On success, all snaps will be destroyed and this will return 0.
488 * On failure, no snaps will be destroyed, the errlist will be filled in,
489 * and this will return an errno.
490 */
491 int
492 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
493 nvlist_t *errlist)
494 {
495 dmu_snapshots_destroy_arg_t dsda;
496 int error;
497 nvpair_t *pair;
498
499 pair = nvlist_next_nvpair(snaps, NULL);
500 if (pair == NULL)
501 return (0);
502
503 dsda.dsda_snaps = snaps;
504 VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps, NV_UNIQUE_NAME, KM_PUSHPAGE));
505 dsda.dsda_defer = defer;
506 dsda.dsda_errlist = errlist;
507
508 error = dsl_sync_task(nvpair_name(pair),
509 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
510 &dsda, 0);
511 fnvlist_free(dsda.dsda_successful_snaps);
512
513 return (error);
514 }
515
516 int
517 dsl_destroy_snapshot(const char *name, boolean_t defer)
518 {
519 int error;
520 nvlist_t *nvl;
521 nvlist_t *errlist;
522
523 VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE));
524 VERIFY0(nvlist_alloc(&errlist, NV_UNIQUE_NAME, KM_PUSHPAGE));
525
526 fnvlist_add_boolean(nvl, name);
527 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
528 fnvlist_free(errlist);
529 fnvlist_free(nvl);
530 return (error);
531 }
532
533 struct killarg {
534 dsl_dataset_t *ds;
535 dmu_tx_t *tx;
536 };
537
538 /* ARGSUSED */
539 static int
540 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
541 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
542 {
543 struct killarg *ka = arg;
544 dmu_tx_t *tx = ka->tx;
545
546 if (bp == NULL)
547 return (0);
548
549 if (zb->zb_level == ZB_ZIL_LEVEL) {
550 ASSERT(zilog != NULL);
551 /*
552 * It's a block in the intent log. It has no
553 * accounting, so just free it.
554 */
555 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
556 } else {
557 ASSERT(zilog == NULL);
558 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
559 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
560 }
561
562 return (0);
563 }
564
565 static void
566 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
567 {
568 struct killarg ka;
569
570 /*
571 * Free everything that we point to (that's born after
572 * the previous snapshot, if we are a clone)
573 *
574 * NB: this should be very quick, because we already
575 * freed all the objects in open context.
576 */
577 ka.ds = ds;
578 ka.tx = tx;
579 VERIFY0(traverse_dataset(ds,
580 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
581 kill_blkptr, &ka));
582 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
583 }
584
585 typedef struct dsl_destroy_head_arg {
586 const char *ddha_name;
587 } dsl_destroy_head_arg_t;
588
589 int
590 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
591 {
592 int error;
593 uint64_t count;
594 objset_t *mos;
595
596 if (dsl_dataset_is_snapshot(ds))
597 return (SET_ERROR(EINVAL));
598
599 if (refcount_count(&ds->ds_longholds) != expected_holds)
600 return (SET_ERROR(EBUSY));
601
602 mos = ds->ds_dir->dd_pool->dp_meta_objset;
603
604 /*
605 * Can't delete a head dataset if there are snapshots of it.
606 * (Except if the only snapshots are from the branch we cloned
607 * from.)
608 */
609 if (ds->ds_prev != NULL &&
610 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
611 return (SET_ERROR(EBUSY));
612
613 /*
614 * Can't delete if there are children of this fs.
615 */
616 error = zap_count(mos,
617 ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
618 if (error != 0)
619 return (error);
620 if (count != 0)
621 return (SET_ERROR(EEXIST));
622
623 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
624 ds->ds_prev->ds_phys->ds_num_children == 2 &&
625 ds->ds_prev->ds_userrefs == 0) {
626 /* We need to remove the origin snapshot as well. */
627 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
628 return (SET_ERROR(EBUSY));
629 }
630 return (0);
631 }
632
633 static int
634 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
635 {
636 dsl_destroy_head_arg_t *ddha = arg;
637 dsl_pool_t *dp = dmu_tx_pool(tx);
638 dsl_dataset_t *ds;
639 int error;
640
641 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
642 if (error != 0)
643 return (error);
644
645 error = dsl_destroy_head_check_impl(ds, 0);
646 dsl_dataset_rele(ds, FTAG);
647 return (error);
648 }
649
650 static void
651 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
652 {
653 dsl_dir_t *dd;
654 dsl_pool_t *dp = dmu_tx_pool(tx);
655 objset_t *mos = dp->dp_meta_objset;
656 dd_used_t t;
657
658 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
659
660 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
661
662 ASSERT0(dd->dd_phys->dd_head_dataset_obj);
663
664 /*
665 * Remove our reservation. The impl() routine avoids setting the
666 * actual property, which would require the (already destroyed) ds.
667 */
668 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
669
670 ASSERT0(dd->dd_phys->dd_used_bytes);
671 ASSERT0(dd->dd_phys->dd_reserved);
672 for (t = 0; t < DD_USED_NUM; t++)
673 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
674
675 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
676 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
677 VERIFY0(dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
678 VERIFY0(zap_remove(mos,
679 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
680
681 dsl_dir_rele(dd, FTAG);
682 VERIFY0(dmu_object_free(mos, ddobj, tx));
683 }
684
685 void
686 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
687 {
688 dsl_pool_t *dp = dmu_tx_pool(tx);
689 objset_t *mos = dp->dp_meta_objset;
690 uint64_t obj, ddobj, prevobj = 0;
691 boolean_t rmorigin;
692 zfeature_info_t *async_destroy;
693 objset_t *os;
694
695 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
696 ASSERT(ds->ds_prev == NULL ||
697 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
698 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
699 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
700
701 /* We need to log before removing it from the namespace. */
702 spa_history_log_internal_ds(ds, "destroy", tx, "");
703
704 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
705 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
706 ds->ds_prev->ds_phys->ds_num_children == 2 &&
707 ds->ds_prev->ds_userrefs == 0);
708
709 /* Remove our reservation */
710 if (ds->ds_reserved != 0) {
711 dsl_dataset_set_refreservation_sync_impl(ds,
712 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
713 0, tx);
714 ASSERT0(ds->ds_reserved);
715 }
716
717 dsl_scan_ds_destroyed(ds, tx);
718
719 obj = ds->ds_object;
720
721 if (ds->ds_phys->ds_prev_snap_obj != 0) {
722 /* This is a clone */
723 ASSERT(ds->ds_prev != NULL);
724 ASSERT3U(ds->ds_prev->ds_phys->ds_next_snap_obj, !=, obj);
725 ASSERT0(ds->ds_phys->ds_next_snap_obj);
726
727 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
728 if (ds->ds_prev->ds_phys->ds_next_clones_obj != 0) {
729 dsl_dataset_remove_from_next_clones(ds->ds_prev,
730 obj, tx);
731 }
732
733 ASSERT3U(ds->ds_prev->ds_phys->ds_num_children, >, 1);
734 ds->ds_prev->ds_phys->ds_num_children--;
735 }
736
737 async_destroy =
738 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
739
740 /*
741 * Destroy the deadlist. Unless it's a clone, the
742 * deadlist should be empty. (If it's a clone, it's
743 * safe to ignore the deadlist contents.)
744 */
745 dsl_deadlist_close(&ds->ds_deadlist);
746 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
747 dmu_buf_will_dirty(ds->ds_dbuf, tx);
748 ds->ds_phys->ds_deadlist_obj = 0;
749
750 VERIFY0(dmu_objset_from_ds(ds, &os));
751
752 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
753 old_synchronous_dataset_destroy(ds, tx);
754 } else {
755 /*
756 * Move the bptree into the pool's list of trees to
757 * clean up and update space accounting information.
758 */
759 uint64_t used, comp, uncomp;
760
761 zil_destroy_sync(dmu_objset_zil(os), tx);
762
763 if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
764 spa_feature_incr(dp->dp_spa, async_destroy, tx);
765 dp->dp_bptree_obj = bptree_alloc(mos, tx);
766 VERIFY0(zap_add(mos,
767 DMU_POOL_DIRECTORY_OBJECT,
768 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
769 &dp->dp_bptree_obj, tx));
770 }
771
772 used = ds->ds_dir->dd_phys->dd_used_bytes;
773 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
774 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
775
776 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
777 ds->ds_phys->ds_unique_bytes == used);
778
779 bptree_add(mos, dp->dp_bptree_obj,
780 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
781 used, comp, uncomp, tx);
782 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
783 -used, -comp, -uncomp, tx);
784 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
785 used, comp, uncomp, tx);
786 }
787
788 if (ds->ds_prev != NULL) {
789 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
790 VERIFY0(zap_remove_int(mos,
791 ds->ds_prev->ds_dir->dd_phys->dd_clones,
792 ds->ds_object, tx));
793 }
794 prevobj = ds->ds_prev->ds_object;
795 dsl_dataset_rele(ds->ds_prev, ds);
796 ds->ds_prev = NULL;
797 }
798
799 /*
800 * This must be done after the dsl_traverse(), because it will
801 * re-open the objset.
802 */
803 if (ds->ds_objset) {
804 dmu_objset_evict(ds->ds_objset);
805 ds->ds_objset = NULL;
806 }
807
808 /* Erase the link in the dir */
809 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
810 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
811 ddobj = ds->ds_dir->dd_object;
812 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
813 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx));
814
815 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
816
817 ASSERT0(ds->ds_phys->ds_next_clones_obj);
818 ASSERT0(ds->ds_phys->ds_props_obj);
819 ASSERT0(ds->ds_phys->ds_userrefs_obj);
820 dsl_dir_rele(ds->ds_dir, ds);
821 ds->ds_dir = NULL;
822 VERIFY0(dmu_object_free(mos, obj, tx));
823
824 dsl_dir_destroy_sync(ddobj, tx);
825
826 if (rmorigin) {
827 dsl_dataset_t *prev;
828 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
829 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
830 dsl_dataset_rele(prev, FTAG);
831 }
832 }
833
834 static void
835 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
836 {
837 dsl_destroy_head_arg_t *ddha = arg;
838 dsl_pool_t *dp = dmu_tx_pool(tx);
839 dsl_dataset_t *ds;
840
841 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
842 dsl_destroy_head_sync_impl(ds, tx);
843 dsl_dataset_rele(ds, FTAG);
844 }
845
846 static void
847 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
848 {
849 dsl_destroy_head_arg_t *ddha = arg;
850 dsl_pool_t *dp = dmu_tx_pool(tx);
851 dsl_dataset_t *ds;
852
853 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
854
855 /* Mark it as inconsistent on-disk, in case we crash */
856 dmu_buf_will_dirty(ds->ds_dbuf, tx);
857 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
858
859 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
860 dsl_dataset_rele(ds, FTAG);
861 }
862
863 int
864 dsl_destroy_head(const char *name)
865 {
866 dsl_destroy_head_arg_t ddha;
867 int error;
868 spa_t *spa;
869 boolean_t isenabled;
870
871 #ifdef _KERNEL
872 zfs_destroy_unmount_origin(name);
873 #endif
874
875 error = spa_open(name, &spa, FTAG);
876 if (error != 0)
877 return (error);
878 isenabled = spa_feature_is_enabled(spa,
879 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]);
880 spa_close(spa, FTAG);
881
882 ddha.ddha_name = name;
883
884 if (!isenabled) {
885 objset_t *os;
886
887 error = dsl_sync_task(name, dsl_destroy_head_check,
888 dsl_destroy_head_begin_sync, &ddha, 0);
889 if (error != 0)
890 return (error);
891
892 /*
893 * Head deletion is processed in one txg on old pools;
894 * remove the objects from open context so that the txg sync
895 * is not too long.
896 */
897 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
898 if (error == 0) {
899 uint64_t obj;
900 uint64_t prev_snap_txg =
901 dmu_objset_ds(os)->ds_phys->ds_prev_snap_txg;
902 for (obj = 0; error == 0;
903 error = dmu_object_next(os, &obj, FALSE,
904 prev_snap_txg))
905 (void) dmu_free_object(os, obj);
906 /* sync out all frees */
907 txg_wait_synced(dmu_objset_pool(os), 0);
908 dmu_objset_disown(os, FTAG);
909 }
910 }
911
912 return (dsl_sync_task(name, dsl_destroy_head_check,
913 dsl_destroy_head_sync, &ddha, 0));
914 }
915
916 /*
917 * Note, this function is used as the callback for dmu_objset_find(). We
918 * always return 0 so that we will continue to find and process
919 * inconsistent datasets, even if we encounter an error trying to
920 * process one of them.
921 */
922 /* ARGSUSED */
923 int
924 dsl_destroy_inconsistent(const char *dsname, void *arg)
925 {
926 objset_t *os;
927
928 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
929 boolean_t inconsistent = DS_IS_INCONSISTENT(dmu_objset_ds(os));
930 dmu_objset_rele(os, FTAG);
931 if (inconsistent)
932 (void) dsl_destroy_head(dsname);
933 }
934 return (0);
935 }
936
937
938 #if defined(_KERNEL) && defined(HAVE_SPL)
939 EXPORT_SYMBOL(dsl_destroy_head);
940 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
941 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
942 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
943 EXPORT_SYMBOL(dsl_destroy_inconsistent);
944 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
945 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
946 #endif