]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/dsl_dataset.c
Fix stack dsl_dataset_destroy()
[mirror_zfs-debian.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/arc.h>
33 #include <sys/zio.h>
34 #include <sys/zap.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
38 #include <sys/spa.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/zfs_onexit.h>
41 #include <sys/zvol.h>
42 #include <sys/dsl_scan.h>
43 #include <sys/dsl_deadlist.h>
44
45 static char *dsl_reaper = "the grim reaper";
46
47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50
51 #define SWITCH64(x, y) \
52 { \
53 uint64_t __tmp = (x); \
54 (x) = (y); \
55 (y) = __tmp; \
56 }
57
58 #define DS_REF_MAX (1ULL << 62)
59
60 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
61
62 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
63
64
65 /*
66 * Figure out how much of this delta should be propogated to the dsl_dir
67 * layer. If there's a refreservation, that space has already been
68 * partially accounted for in our ancestors.
69 */
70 static int64_t
71 parent_delta(dsl_dataset_t *ds, int64_t delta)
72 {
73 uint64_t old_bytes, new_bytes;
74
75 if (ds->ds_reserved == 0)
76 return (delta);
77
78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
80
81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
82 return (new_bytes - old_bytes);
83 }
84
85 void
86 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
87 {
88 int used, compressed, uncompressed;
89 int64_t delta;
90
91 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
92 compressed = BP_GET_PSIZE(bp);
93 uncompressed = BP_GET_UCSIZE(bp);
94
95 dprintf_bp(bp, "ds=%p", ds);
96
97 ASSERT(dmu_tx_is_syncing(tx));
98 /* It could have been compressed away to nothing */
99 if (BP_IS_HOLE(bp))
100 return;
101 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
102 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
103 if (ds == NULL) {
104 /*
105 * Account for the meta-objset space in its placeholder
106 * dsl_dir.
107 */
108 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
109 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
110 used, compressed, uncompressed, tx);
111 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
112 return;
113 }
114 dmu_buf_will_dirty(ds->ds_dbuf, tx);
115
116 mutex_enter(&ds->ds_dir->dd_lock);
117 mutex_enter(&ds->ds_lock);
118 delta = parent_delta(ds, used);
119 ds->ds_phys->ds_used_bytes += used;
120 ds->ds_phys->ds_compressed_bytes += compressed;
121 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
122 ds->ds_phys->ds_unique_bytes += used;
123 mutex_exit(&ds->ds_lock);
124 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
125 compressed, uncompressed, tx);
126 dsl_dir_transfer_space(ds->ds_dir, used - delta,
127 DD_USED_REFRSRV, DD_USED_HEAD, tx);
128 mutex_exit(&ds->ds_dir->dd_lock);
129 }
130
131 int
132 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
133 boolean_t async)
134 {
135 int used, compressed, uncompressed;
136
137 if (BP_IS_HOLE(bp))
138 return (0);
139
140 ASSERT(dmu_tx_is_syncing(tx));
141 ASSERT(bp->blk_birth <= tx->tx_txg);
142
143 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
144 compressed = BP_GET_PSIZE(bp);
145 uncompressed = BP_GET_UCSIZE(bp);
146
147 ASSERT(used > 0);
148 if (ds == NULL) {
149 /*
150 * Account for the meta-objset space in its placeholder
151 * dataset.
152 */
153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
154
155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
156 -used, -compressed, -uncompressed, tx);
157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
158 return (used);
159 }
160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
161
162 ASSERT(!dsl_dataset_is_snapshot(ds));
163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
164
165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
166 int64_t delta;
167
168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
170
171 mutex_enter(&ds->ds_dir->dd_lock);
172 mutex_enter(&ds->ds_lock);
173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
174 !DS_UNIQUE_IS_ACCURATE(ds));
175 delta = parent_delta(ds, -used);
176 ds->ds_phys->ds_unique_bytes -= used;
177 mutex_exit(&ds->ds_lock);
178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
179 delta, -compressed, -uncompressed, tx);
180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
182 mutex_exit(&ds->ds_dir->dd_lock);
183 } else {
184 dprintf_bp(bp, "putting on dead list: %s", "");
185 if (async) {
186 /*
187 * We are here as part of zio's write done callback,
188 * which means we're a zio interrupt thread. We can't
189 * call dsl_deadlist_insert() now because it may block
190 * waiting for I/O. Instead, put bp on the deferred
191 * queue and let dsl_pool_sync() finish the job.
192 */
193 bplist_append(&ds->ds_pending_deadlist, bp);
194 } else {
195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
196 }
197 ASSERT3U(ds->ds_prev->ds_object, ==,
198 ds->ds_phys->ds_prev_snap_obj);
199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
202 ds->ds_object && bp->blk_birth >
203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
205 mutex_enter(&ds->ds_prev->ds_lock);
206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
207 mutex_exit(&ds->ds_prev->ds_lock);
208 }
209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
210 dsl_dir_transfer_space(ds->ds_dir, used,
211 DD_USED_HEAD, DD_USED_SNAP, tx);
212 }
213 }
214 mutex_enter(&ds->ds_lock);
215 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
216 ds->ds_phys->ds_used_bytes -= used;
217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
218 ds->ds_phys->ds_compressed_bytes -= compressed;
219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
221 mutex_exit(&ds->ds_lock);
222
223 return (used);
224 }
225
226 uint64_t
227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
228 {
229 uint64_t trysnap = 0;
230
231 if (ds == NULL)
232 return (0);
233 /*
234 * The snapshot creation could fail, but that would cause an
235 * incorrect FALSE return, which would only result in an
236 * overestimation of the amount of space that an operation would
237 * consume, which is OK.
238 *
239 * There's also a small window where we could miss a pending
240 * snapshot, because we could set the sync task in the quiescing
241 * phase. So this should only be used as a guess.
242 */
243 if (ds->ds_trysnap_txg >
244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
245 trysnap = ds->ds_trysnap_txg;
246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
247 }
248
249 boolean_t
250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
251 uint64_t blk_birth)
252 {
253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
254 return (B_FALSE);
255
256 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
257
258 return (B_TRUE);
259 }
260
261 /* ARGSUSED */
262 static void
263 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
264 {
265 dsl_dataset_t *ds = dsv;
266
267 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
268
269 unique_remove(ds->ds_fsid_guid);
270
271 if (ds->ds_objset != NULL)
272 dmu_objset_evict(ds->ds_objset);
273
274 if (ds->ds_prev) {
275 dsl_dataset_drop_ref(ds->ds_prev, ds);
276 ds->ds_prev = NULL;
277 }
278
279 bplist_destroy(&ds->ds_pending_deadlist);
280 if (db != NULL) {
281 dsl_deadlist_close(&ds->ds_deadlist);
282 } else {
283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
285 }
286 if (ds->ds_dir)
287 dsl_dir_close(ds->ds_dir, ds);
288
289 ASSERT(!list_link_active(&ds->ds_synced_link));
290
291 mutex_destroy(&ds->ds_lock);
292 mutex_destroy(&ds->ds_recvlock);
293 mutex_destroy(&ds->ds_opening_lock);
294 rw_destroy(&ds->ds_rwlock);
295 cv_destroy(&ds->ds_exclusive_cv);
296
297 kmem_free(ds, sizeof (dsl_dataset_t));
298 }
299
300 static int
301 dsl_dataset_get_snapname(dsl_dataset_t *ds)
302 {
303 dsl_dataset_phys_t *headphys;
304 int err;
305 dmu_buf_t *headdbuf;
306 dsl_pool_t *dp = ds->ds_dir->dd_pool;
307 objset_t *mos = dp->dp_meta_objset;
308
309 if (ds->ds_snapname[0])
310 return (0);
311 if (ds->ds_phys->ds_next_snap_obj == 0)
312 return (0);
313
314 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
315 FTAG, &headdbuf);
316 if (err)
317 return (err);
318 headphys = headdbuf->db_data;
319 err = zap_value_search(dp->dp_meta_objset,
320 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
321 dmu_buf_rele(headdbuf, FTAG);
322 return (err);
323 }
324
325 static int
326 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
327 {
328 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
329 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
330 matchtype_t mt;
331 int err;
332
333 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
334 mt = MT_FIRST;
335 else
336 mt = MT_EXACT;
337
338 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
339 value, mt, NULL, 0, NULL);
340 if (err == ENOTSUP && mt == MT_FIRST)
341 err = zap_lookup(mos, snapobj, name, 8, 1, value);
342 return (err);
343 }
344
345 static int
346 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
347 {
348 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
349 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
350 matchtype_t mt;
351 int err;
352
353 dsl_dir_snap_cmtime_update(ds->ds_dir);
354
355 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
356 mt = MT_FIRST;
357 else
358 mt = MT_EXACT;
359
360 err = zap_remove_norm(mos, snapobj, name, mt, tx);
361 if (err == ENOTSUP && mt == MT_FIRST)
362 err = zap_remove(mos, snapobj, name, tx);
363 return (err);
364 }
365
366 static int
367 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
368 dsl_dataset_t **dsp)
369 {
370 objset_t *mos = dp->dp_meta_objset;
371 dmu_buf_t *dbuf;
372 dsl_dataset_t *ds;
373 int err;
374 dmu_object_info_t doi;
375
376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
377 dsl_pool_sync_context(dp));
378
379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
380 if (err)
381 return (err);
382
383 /* Make sure dsobj has the correct object type. */
384 dmu_object_info_from_db(dbuf, &doi);
385 if (doi.doi_type != DMU_OT_DSL_DATASET)
386 return (EINVAL);
387
388 ds = dmu_buf_get_user(dbuf);
389 if (ds == NULL) {
390 dsl_dataset_t *winner = NULL;
391
392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
393 ds->ds_dbuf = dbuf;
394 ds->ds_object = dsobj;
395 ds->ds_phys = dbuf->db_data;
396 list_link_init(&ds->ds_synced_link);
397
398 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
399 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
401 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
402 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
403
404 bplist_create(&ds->ds_pending_deadlist);
405 dsl_deadlist_open(&ds->ds_deadlist,
406 mos, ds->ds_phys->ds_deadlist_obj);
407
408 if (err == 0) {
409 err = dsl_dir_open_obj(dp,
410 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
411 }
412 if (err) {
413 mutex_destroy(&ds->ds_lock);
414 mutex_destroy(&ds->ds_recvlock);
415 mutex_destroy(&ds->ds_opening_lock);
416 rw_destroy(&ds->ds_rwlock);
417 cv_destroy(&ds->ds_exclusive_cv);
418 bplist_destroy(&ds->ds_pending_deadlist);
419 dsl_deadlist_close(&ds->ds_deadlist);
420 kmem_free(ds, sizeof (dsl_dataset_t));
421 dmu_buf_rele(dbuf, tag);
422 return (err);
423 }
424
425 if (!dsl_dataset_is_snapshot(ds)) {
426 ds->ds_snapname[0] = '\0';
427 if (ds->ds_phys->ds_prev_snap_obj) {
428 err = dsl_dataset_get_ref(dp,
429 ds->ds_phys->ds_prev_snap_obj,
430 ds, &ds->ds_prev);
431 }
432 } else {
433 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
434 err = dsl_dataset_get_snapname(ds);
435 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
436 err = zap_count(
437 ds->ds_dir->dd_pool->dp_meta_objset,
438 ds->ds_phys->ds_userrefs_obj,
439 &ds->ds_userrefs);
440 }
441 }
442
443 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
444 /*
445 * In sync context, we're called with either no lock
446 * or with the write lock. If we're not syncing,
447 * we're always called with the read lock held.
448 */
449 boolean_t need_lock =
450 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
451 dsl_pool_sync_context(dp);
452
453 if (need_lock)
454 rw_enter(&dp->dp_config_rwlock, RW_READER);
455
456 err = dsl_prop_get_ds(ds,
457 "refreservation", sizeof (uint64_t), 1,
458 &ds->ds_reserved, NULL);
459 if (err == 0) {
460 err = dsl_prop_get_ds(ds,
461 "refquota", sizeof (uint64_t), 1,
462 &ds->ds_quota, NULL);
463 }
464
465 if (need_lock)
466 rw_exit(&dp->dp_config_rwlock);
467 } else {
468 ds->ds_reserved = ds->ds_quota = 0;
469 }
470
471 if (err == 0) {
472 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
473 dsl_dataset_evict);
474 }
475 if (err || winner) {
476 bplist_destroy(&ds->ds_pending_deadlist);
477 dsl_deadlist_close(&ds->ds_deadlist);
478 if (ds->ds_prev)
479 dsl_dataset_drop_ref(ds->ds_prev, ds);
480 dsl_dir_close(ds->ds_dir, ds);
481 mutex_destroy(&ds->ds_lock);
482 mutex_destroy(&ds->ds_recvlock);
483 mutex_destroy(&ds->ds_opening_lock);
484 rw_destroy(&ds->ds_rwlock);
485 cv_destroy(&ds->ds_exclusive_cv);
486 kmem_free(ds, sizeof (dsl_dataset_t));
487 if (err) {
488 dmu_buf_rele(dbuf, tag);
489 return (err);
490 }
491 ds = winner;
492 } else {
493 ds->ds_fsid_guid =
494 unique_insert(ds->ds_phys->ds_fsid_guid);
495 }
496 }
497 ASSERT3P(ds->ds_dbuf, ==, dbuf);
498 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
499 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
500 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
501 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
502 mutex_enter(&ds->ds_lock);
503 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
504 mutex_exit(&ds->ds_lock);
505 dmu_buf_rele(ds->ds_dbuf, tag);
506 return (ENOENT);
507 }
508 mutex_exit(&ds->ds_lock);
509 *dsp = ds;
510 return (0);
511 }
512
513 static int
514 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
515 {
516 dsl_pool_t *dp = ds->ds_dir->dd_pool;
517
518 /*
519 * In syncing context we don't want the rwlock lock: there
520 * may be an existing writer waiting for sync phase to
521 * finish. We don't need to worry about such writers, since
522 * sync phase is single-threaded, so the writer can't be
523 * doing anything while we are active.
524 */
525 if (dsl_pool_sync_context(dp)) {
526 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
527 return (0);
528 }
529
530 /*
531 * Normal users will hold the ds_rwlock as a READER until they
532 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
533 * drop their READER lock after they set the ds_owner field.
534 *
535 * If the dataset is being destroyed, the destroy thread will
536 * obtain a WRITER lock for exclusive access after it's done its
537 * open-context work and then change the ds_owner to
538 * dsl_reaper once destruction is assured. So threads
539 * may block here temporarily, until the "destructability" of
540 * the dataset is determined.
541 */
542 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
543 mutex_enter(&ds->ds_lock);
544 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
545 rw_exit(&dp->dp_config_rwlock);
546 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
547 if (DSL_DATASET_IS_DESTROYED(ds)) {
548 mutex_exit(&ds->ds_lock);
549 dsl_dataset_drop_ref(ds, tag);
550 rw_enter(&dp->dp_config_rwlock, RW_READER);
551 return (ENOENT);
552 }
553 /*
554 * The dp_config_rwlock lives above the ds_lock. And
555 * we need to check DSL_DATASET_IS_DESTROYED() while
556 * holding the ds_lock, so we have to drop and reacquire
557 * the ds_lock here.
558 */
559 mutex_exit(&ds->ds_lock);
560 rw_enter(&dp->dp_config_rwlock, RW_READER);
561 mutex_enter(&ds->ds_lock);
562 }
563 mutex_exit(&ds->ds_lock);
564 return (0);
565 }
566
567 int
568 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
569 dsl_dataset_t **dsp)
570 {
571 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
572
573 if (err)
574 return (err);
575 return (dsl_dataset_hold_ref(*dsp, tag));
576 }
577
578 int
579 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
580 void *tag, dsl_dataset_t **dsp)
581 {
582 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
583 if (err)
584 return (err);
585 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
586 dsl_dataset_rele(*dsp, tag);
587 *dsp = NULL;
588 return (EBUSY);
589 }
590 return (0);
591 }
592
593 int
594 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
595 {
596 dsl_dir_t *dd;
597 dsl_pool_t *dp;
598 const char *snapname;
599 uint64_t obj;
600 int err = 0;
601
602 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
603 if (err)
604 return (err);
605
606 dp = dd->dd_pool;
607 obj = dd->dd_phys->dd_head_dataset_obj;
608 rw_enter(&dp->dp_config_rwlock, RW_READER);
609 if (obj)
610 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
611 else
612 err = ENOENT;
613 if (err)
614 goto out;
615
616 err = dsl_dataset_hold_ref(*dsp, tag);
617
618 /* we may be looking for a snapshot */
619 if (err == 0 && snapname != NULL) {
620 dsl_dataset_t *ds = NULL;
621
622 if (*snapname++ != '@') {
623 dsl_dataset_rele(*dsp, tag);
624 err = ENOENT;
625 goto out;
626 }
627
628 dprintf("looking for snapshot '%s'\n", snapname);
629 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
630 if (err == 0)
631 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
632 dsl_dataset_rele(*dsp, tag);
633
634 ASSERT3U((err == 0), ==, (ds != NULL));
635
636 if (ds) {
637 mutex_enter(&ds->ds_lock);
638 if (ds->ds_snapname[0] == 0)
639 (void) strlcpy(ds->ds_snapname, snapname,
640 sizeof (ds->ds_snapname));
641 mutex_exit(&ds->ds_lock);
642 err = dsl_dataset_hold_ref(ds, tag);
643 *dsp = err ? NULL : ds;
644 }
645 }
646 out:
647 rw_exit(&dp->dp_config_rwlock);
648 dsl_dir_close(dd, FTAG);
649 return (err);
650 }
651
652 int
653 dsl_dataset_own(const char *name, boolean_t inconsistentok,
654 void *tag, dsl_dataset_t **dsp)
655 {
656 int err = dsl_dataset_hold(name, tag, dsp);
657 if (err)
658 return (err);
659 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
660 dsl_dataset_rele(*dsp, tag);
661 return (EBUSY);
662 }
663 return (0);
664 }
665
666 void
667 dsl_dataset_name(dsl_dataset_t *ds, char *name)
668 {
669 if (ds == NULL) {
670 (void) strcpy(name, "mos");
671 } else {
672 dsl_dir_name(ds->ds_dir, name);
673 VERIFY(0 == dsl_dataset_get_snapname(ds));
674 if (ds->ds_snapname[0]) {
675 (void) strcat(name, "@");
676 /*
677 * We use a "recursive" mutex so that we
678 * can call dprintf_ds() with ds_lock held.
679 */
680 if (!MUTEX_HELD(&ds->ds_lock)) {
681 mutex_enter(&ds->ds_lock);
682 (void) strcat(name, ds->ds_snapname);
683 mutex_exit(&ds->ds_lock);
684 } else {
685 (void) strcat(name, ds->ds_snapname);
686 }
687 }
688 }
689 }
690
691 static int
692 dsl_dataset_namelen(dsl_dataset_t *ds)
693 {
694 int result;
695
696 if (ds == NULL) {
697 result = 3; /* "mos" */
698 } else {
699 result = dsl_dir_namelen(ds->ds_dir);
700 VERIFY(0 == dsl_dataset_get_snapname(ds));
701 if (ds->ds_snapname[0]) {
702 ++result; /* adding one for the @-sign */
703 if (!MUTEX_HELD(&ds->ds_lock)) {
704 mutex_enter(&ds->ds_lock);
705 result += strlen(ds->ds_snapname);
706 mutex_exit(&ds->ds_lock);
707 } else {
708 result += strlen(ds->ds_snapname);
709 }
710 }
711 }
712
713 return (result);
714 }
715
716 void
717 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
718 {
719 dmu_buf_rele(ds->ds_dbuf, tag);
720 }
721
722 void
723 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
724 {
725 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
726 rw_exit(&ds->ds_rwlock);
727 }
728 dsl_dataset_drop_ref(ds, tag);
729 }
730
731 void
732 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
733 {
734 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
735 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
736
737 mutex_enter(&ds->ds_lock);
738 ds->ds_owner = NULL;
739 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
740 rw_exit(&ds->ds_rwlock);
741 cv_broadcast(&ds->ds_exclusive_cv);
742 }
743 mutex_exit(&ds->ds_lock);
744 if (ds->ds_dbuf)
745 dsl_dataset_drop_ref(ds, tag);
746 else
747 dsl_dataset_evict(NULL, ds);
748 }
749
750 boolean_t
751 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
752 {
753 boolean_t gotit = FALSE;
754
755 mutex_enter(&ds->ds_lock);
756 if (ds->ds_owner == NULL &&
757 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
758 ds->ds_owner = tag;
759 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
760 rw_exit(&ds->ds_rwlock);
761 gotit = TRUE;
762 }
763 mutex_exit(&ds->ds_lock);
764 return (gotit);
765 }
766
767 void
768 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
769 {
770 ASSERT3P(owner, ==, ds->ds_owner);
771 if (!RW_WRITE_HELD(&ds->ds_rwlock))
772 rw_enter(&ds->ds_rwlock, RW_WRITER);
773 }
774
775 uint64_t
776 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
777 uint64_t flags, dmu_tx_t *tx)
778 {
779 dsl_pool_t *dp = dd->dd_pool;
780 dmu_buf_t *dbuf;
781 dsl_dataset_phys_t *dsphys;
782 uint64_t dsobj;
783 objset_t *mos = dp->dp_meta_objset;
784
785 if (origin == NULL)
786 origin = dp->dp_origin_snap;
787
788 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
789 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
790 ASSERT(dmu_tx_is_syncing(tx));
791 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
792
793 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
794 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
795 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
796 dmu_buf_will_dirty(dbuf, tx);
797 dsphys = dbuf->db_data;
798 bzero(dsphys, sizeof (dsl_dataset_phys_t));
799 dsphys->ds_dir_obj = dd->dd_object;
800 dsphys->ds_flags = flags;
801 dsphys->ds_fsid_guid = unique_create();
802 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
803 sizeof (dsphys->ds_guid));
804 dsphys->ds_snapnames_zapobj =
805 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
806 DMU_OT_NONE, 0, tx);
807 dsphys->ds_creation_time = gethrestime_sec();
808 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
809
810 if (origin == NULL) {
811 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
812 } else {
813 dsl_dataset_t *ohds;
814
815 dsphys->ds_prev_snap_obj = origin->ds_object;
816 dsphys->ds_prev_snap_txg =
817 origin->ds_phys->ds_creation_txg;
818 dsphys->ds_used_bytes =
819 origin->ds_phys->ds_used_bytes;
820 dsphys->ds_compressed_bytes =
821 origin->ds_phys->ds_compressed_bytes;
822 dsphys->ds_uncompressed_bytes =
823 origin->ds_phys->ds_uncompressed_bytes;
824 dsphys->ds_bp = origin->ds_phys->ds_bp;
825 dsphys->ds_flags |= origin->ds_phys->ds_flags;
826
827 dmu_buf_will_dirty(origin->ds_dbuf, tx);
828 origin->ds_phys->ds_num_children++;
829
830 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
831 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
832 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
833 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
834 dsl_dataset_rele(ohds, FTAG);
835
836 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
837 if (origin->ds_phys->ds_next_clones_obj == 0) {
838 origin->ds_phys->ds_next_clones_obj =
839 zap_create(mos,
840 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
841 }
842 VERIFY(0 == zap_add_int(mos,
843 origin->ds_phys->ds_next_clones_obj,
844 dsobj, tx));
845 }
846
847 dmu_buf_will_dirty(dd->dd_dbuf, tx);
848 dd->dd_phys->dd_origin_obj = origin->ds_object;
849 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
850 if (origin->ds_dir->dd_phys->dd_clones == 0) {
851 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
852 origin->ds_dir->dd_phys->dd_clones =
853 zap_create(mos,
854 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
855 }
856 VERIFY3U(0, ==, zap_add_int(mos,
857 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
858 }
859 }
860
861 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
862 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
863
864 dmu_buf_rele(dbuf, FTAG);
865
866 dmu_buf_will_dirty(dd->dd_dbuf, tx);
867 dd->dd_phys->dd_head_dataset_obj = dsobj;
868
869 return (dsobj);
870 }
871
872 uint64_t
873 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
874 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
875 {
876 dsl_pool_t *dp = pdd->dd_pool;
877 uint64_t dsobj, ddobj;
878 dsl_dir_t *dd;
879
880 ASSERT(lastname[0] != '@');
881
882 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
883 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
884
885 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
886
887 dsl_deleg_set_create_perms(dd, tx, cr);
888
889 dsl_dir_close(dd, FTAG);
890
891 /*
892 * If we are creating a clone, make sure we zero out any stale
893 * data from the origin snapshots zil header.
894 */
895 if (origin != NULL) {
896 dsl_dataset_t *ds;
897 objset_t *os;
898
899 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
900 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
901 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
902 dsl_dataset_dirty(ds, tx);
903 dsl_dataset_rele(ds, FTAG);
904 }
905
906 return (dsobj);
907 }
908
909 struct destroyarg {
910 dsl_sync_task_group_t *dstg;
911 char *snapname;
912 char *failed;
913 boolean_t defer;
914 };
915
916 static int
917 dsl_snapshot_destroy_one(const char *name, void *arg)
918 {
919 struct destroyarg *da = arg;
920 dsl_dataset_t *ds;
921 int err;
922 char *dsname;
923
924 dsname = kmem_asprintf("%s@%s", name, da->snapname);
925 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
926 strfree(dsname);
927 if (err == 0) {
928 struct dsl_ds_destroyarg *dsda;
929
930 dsl_dataset_make_exclusive(ds, da->dstg);
931 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
932 dsda->ds = ds;
933 dsda->defer = da->defer;
934 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
935 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
936 } else if (err == ENOENT) {
937 err = 0;
938 } else {
939 (void) strcpy(da->failed, name);
940 }
941 return (err);
942 }
943
944 /*
945 * Destroy 'snapname' in all descendants of 'fsname'.
946 */
947 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
948 int
949 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
950 {
951 int err;
952 struct destroyarg da;
953 dsl_sync_task_t *dst;
954 spa_t *spa;
955
956 err = spa_open(fsname, &spa, FTAG);
957 if (err)
958 return (err);
959 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
960 da.snapname = snapname;
961 da.failed = fsname;
962 da.defer = defer;
963
964 err = dmu_objset_find(fsname,
965 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
966
967 if (err == 0)
968 err = dsl_sync_task_group_wait(da.dstg);
969
970 for (dst = list_head(&da.dstg->dstg_tasks); dst;
971 dst = list_next(&da.dstg->dstg_tasks, dst)) {
972 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
973 dsl_dataset_t *ds = dsda->ds;
974
975 /*
976 * Return the file system name that triggered the error
977 */
978 if (dst->dst_err) {
979 dsl_dataset_name(ds, fsname);
980 *strchr(fsname, '@') = '\0';
981 }
982 ASSERT3P(dsda->rm_origin, ==, NULL);
983 dsl_dataset_disown(ds, da.dstg);
984 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
985 }
986
987 dsl_sync_task_group_destroy(da.dstg);
988 spa_close(spa, FTAG);
989 return (err);
990 }
991
992 static boolean_t
993 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
994 {
995 boolean_t might_destroy = B_FALSE;
996
997 mutex_enter(&ds->ds_lock);
998 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
999 DS_IS_DEFER_DESTROY(ds))
1000 might_destroy = B_TRUE;
1001 mutex_exit(&ds->ds_lock);
1002
1003 return (might_destroy);
1004 }
1005
1006 /*
1007 * If we're removing a clone, and these three conditions are true:
1008 * 1) the clone's origin has no other children
1009 * 2) the clone's origin has no user references
1010 * 3) the clone's origin has been marked for deferred destruction
1011 * Then, prepare to remove the origin as part of this sync task group.
1012 */
1013 static int
1014 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1015 {
1016 dsl_dataset_t *ds = dsda->ds;
1017 dsl_dataset_t *origin = ds->ds_prev;
1018
1019 if (dsl_dataset_might_destroy_origin(origin)) {
1020 char *name;
1021 int namelen;
1022 int error;
1023
1024 namelen = dsl_dataset_namelen(origin) + 1;
1025 name = kmem_alloc(namelen, KM_SLEEP);
1026 dsl_dataset_name(origin, name);
1027 #ifdef _KERNEL
1028 error = zfs_unmount_snap(name, NULL);
1029 if (error) {
1030 kmem_free(name, namelen);
1031 return (error);
1032 }
1033 #endif
1034 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1035 kmem_free(name, namelen);
1036 if (error)
1037 return (error);
1038 dsda->rm_origin = origin;
1039 dsl_dataset_make_exclusive(origin, tag);
1040 }
1041
1042 return (0);
1043 }
1044
1045 /*
1046 * ds must be opened as OWNER. On return (whether successful or not),
1047 * ds will be closed and caller can no longer dereference it.
1048 */
1049 int
1050 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1051 {
1052 int err;
1053 dsl_sync_task_group_t *dstg;
1054 objset_t *os;
1055 dsl_dir_t *dd;
1056 uint64_t obj;
1057 struct dsl_ds_destroyarg dsda = { 0 };
1058 dsl_dataset_t *dummy_ds;
1059
1060 dsda.ds = ds;
1061
1062 if (dsl_dataset_is_snapshot(ds)) {
1063 /* Destroying a snapshot is simpler */
1064 dsl_dataset_make_exclusive(ds, tag);
1065
1066 dsda.defer = defer;
1067 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1068 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1069 &dsda, tag, 0);
1070 ASSERT3P(dsda.rm_origin, ==, NULL);
1071 goto out;
1072 } else if (defer) {
1073 err = EINVAL;
1074 goto out;
1075 }
1076
1077 dd = ds->ds_dir;
1078 dummy_ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
1079 dummy_ds->ds_dir = dd;
1080 dummy_ds->ds_object = ds->ds_object;
1081
1082 /*
1083 * Check for errors and mark this ds as inconsistent, in
1084 * case we crash while freeing the objects.
1085 */
1086 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1087 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1088 if (err)
1089 goto out_free;
1090
1091 err = dmu_objset_from_ds(ds, &os);
1092 if (err)
1093 goto out_free;
1094
1095 /*
1096 * remove the objects in open context, so that we won't
1097 * have too much to do in syncing context.
1098 */
1099 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1100 ds->ds_phys->ds_prev_snap_txg)) {
1101 /*
1102 * Ignore errors, if there is not enough disk space
1103 * we will deal with it in dsl_dataset_destroy_sync().
1104 */
1105 (void) dmu_free_object(os, obj);
1106 }
1107 if (err != ESRCH)
1108 goto out_free;
1109
1110 /*
1111 * Only the ZIL knows how to free log blocks.
1112 */
1113 zil_destroy(dmu_objset_zil(os), B_FALSE);
1114
1115 /*
1116 * Sync out all in-flight IO.
1117 */
1118 txg_wait_synced(dd->dd_pool, 0);
1119
1120 /*
1121 * If we managed to free all the objects in open
1122 * context, the user space accounting should be zero.
1123 */
1124 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1125 dmu_objset_userused_enabled(os)) {
1126 ASSERTV(uint64_t count);
1127 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1128 count == 0);
1129 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1130 count == 0);
1131 }
1132
1133 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1134 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1135 rw_exit(&dd->dd_pool->dp_config_rwlock);
1136
1137 if (err)
1138 goto out_free;
1139
1140 /*
1141 * Blow away the dsl_dir + head dataset.
1142 */
1143 dsl_dataset_make_exclusive(ds, tag);
1144 /*
1145 * If we're removing a clone, we might also need to remove its
1146 * origin.
1147 */
1148 do {
1149 dsda.need_prep = B_FALSE;
1150 if (dsl_dir_is_clone(dd)) {
1151 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1152 if (err) {
1153 dsl_dir_close(dd, FTAG);
1154 goto out_free;
1155 }
1156 }
1157
1158 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1159 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1160 dsl_dataset_destroy_sync, &dsda, tag, 0);
1161 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1162 dsl_dir_destroy_sync, dummy_ds, FTAG, 0);
1163 err = dsl_sync_task_group_wait(dstg);
1164 dsl_sync_task_group_destroy(dstg);
1165
1166 /*
1167 * We could be racing against 'zfs release' or 'zfs destroy -d'
1168 * on the origin snap, in which case we can get EBUSY if we
1169 * needed to destroy the origin snap but were not ready to
1170 * do so.
1171 */
1172 if (dsda.need_prep) {
1173 ASSERT(err == EBUSY);
1174 ASSERT(dsl_dir_is_clone(dd));
1175 ASSERT(dsda.rm_origin == NULL);
1176 }
1177 } while (dsda.need_prep);
1178
1179 if (dsda.rm_origin != NULL)
1180 dsl_dataset_disown(dsda.rm_origin, tag);
1181
1182 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1183 if (err)
1184 dsl_dir_close(dd, FTAG);
1185
1186 out_free:
1187 kmem_free(dummy_ds, sizeof (dsl_dataset_t));
1188 out:
1189 dsl_dataset_disown(ds, tag);
1190 return (err);
1191 }
1192
1193 blkptr_t *
1194 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1195 {
1196 return (&ds->ds_phys->ds_bp);
1197 }
1198
1199 void
1200 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1201 {
1202 ASSERT(dmu_tx_is_syncing(tx));
1203 /* If it's the meta-objset, set dp_meta_rootbp */
1204 if (ds == NULL) {
1205 tx->tx_pool->dp_meta_rootbp = *bp;
1206 } else {
1207 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1208 ds->ds_phys->ds_bp = *bp;
1209 }
1210 }
1211
1212 spa_t *
1213 dsl_dataset_get_spa(dsl_dataset_t *ds)
1214 {
1215 return (ds->ds_dir->dd_pool->dp_spa);
1216 }
1217
1218 void
1219 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1220 {
1221 dsl_pool_t *dp;
1222
1223 if (ds == NULL) /* this is the meta-objset */
1224 return;
1225
1226 ASSERT(ds->ds_objset != NULL);
1227
1228 if (ds->ds_phys->ds_next_snap_obj != 0)
1229 panic("dirtying snapshot!");
1230
1231 dp = ds->ds_dir->dd_pool;
1232
1233 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1234 /* up the hold count until we can be written out */
1235 dmu_buf_add_ref(ds->ds_dbuf, ds);
1236 }
1237 }
1238
1239 /*
1240 * The unique space in the head dataset can be calculated by subtracting
1241 * the space used in the most recent snapshot, that is still being used
1242 * in this file system, from the space currently in use. To figure out
1243 * the space in the most recent snapshot still in use, we need to take
1244 * the total space used in the snapshot and subtract out the space that
1245 * has been freed up since the snapshot was taken.
1246 */
1247 static void
1248 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1249 {
1250 uint64_t mrs_used;
1251 uint64_t dlused, dlcomp, dluncomp;
1252
1253 ASSERT(!dsl_dataset_is_snapshot(ds));
1254
1255 if (ds->ds_phys->ds_prev_snap_obj != 0)
1256 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1257 else
1258 mrs_used = 0;
1259
1260 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1261
1262 ASSERT3U(dlused, <=, mrs_used);
1263 ds->ds_phys->ds_unique_bytes =
1264 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1265
1266 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1267 SPA_VERSION_UNIQUE_ACCURATE)
1268 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1269 }
1270
1271 struct killarg {
1272 dsl_dataset_t *ds;
1273 dmu_tx_t *tx;
1274 };
1275
1276 /* ARGSUSED */
1277 static int
1278 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1279 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1280 {
1281 struct killarg *ka = arg;
1282 dmu_tx_t *tx = ka->tx;
1283
1284 if (bp == NULL)
1285 return (0);
1286
1287 if (zb->zb_level == ZB_ZIL_LEVEL) {
1288 ASSERT(zilog != NULL);
1289 /*
1290 * It's a block in the intent log. It has no
1291 * accounting, so just free it.
1292 */
1293 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1294 } else {
1295 ASSERT(zilog == NULL);
1296 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1297 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1298 }
1299
1300 return (0);
1301 }
1302
1303 /* ARGSUSED */
1304 static int
1305 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1306 {
1307 dsl_dataset_t *ds = arg1;
1308 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1309 uint64_t count;
1310 int err;
1311
1312 /*
1313 * Can't delete a head dataset if there are snapshots of it.
1314 * (Except if the only snapshots are from the branch we cloned
1315 * from.)
1316 */
1317 if (ds->ds_prev != NULL &&
1318 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1319 return (EBUSY);
1320
1321 /*
1322 * This is really a dsl_dir thing, but check it here so that
1323 * we'll be less likely to leave this dataset inconsistent &
1324 * nearly destroyed.
1325 */
1326 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1327 if (err)
1328 return (err);
1329 if (count != 0)
1330 return (EEXIST);
1331
1332 return (0);
1333 }
1334
1335 /* ARGSUSED */
1336 static void
1337 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1338 {
1339 dsl_dataset_t *ds = arg1;
1340 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1341
1342 /* Mark it as inconsistent on-disk, in case we crash */
1343 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1344 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1345
1346 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1347 "dataset = %llu", ds->ds_object);
1348 }
1349
1350 static int
1351 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1352 dmu_tx_t *tx)
1353 {
1354 dsl_dataset_t *ds = dsda->ds;
1355 dsl_dataset_t *ds_prev = ds->ds_prev;
1356
1357 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1358 struct dsl_ds_destroyarg ndsda = {0};
1359
1360 /*
1361 * If we're not prepared to remove the origin, don't remove
1362 * the clone either.
1363 */
1364 if (dsda->rm_origin == NULL) {
1365 dsda->need_prep = B_TRUE;
1366 return (EBUSY);
1367 }
1368
1369 ndsda.ds = ds_prev;
1370 ndsda.is_origin_rm = B_TRUE;
1371 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1372 }
1373
1374 /*
1375 * If we're not going to remove the origin after all,
1376 * undo the open context setup.
1377 */
1378 if (dsda->rm_origin != NULL) {
1379 dsl_dataset_disown(dsda->rm_origin, tag);
1380 dsda->rm_origin = NULL;
1381 }
1382
1383 return (0);
1384 }
1385
1386 /*
1387 * If you add new checks here, you may need to add
1388 * additional checks to the "temporary" case in
1389 * snapshot_check() in dmu_objset.c.
1390 */
1391 /* ARGSUSED */
1392 int
1393 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1394 {
1395 struct dsl_ds_destroyarg *dsda = arg1;
1396 dsl_dataset_t *ds = dsda->ds;
1397
1398 /* we have an owner hold, so noone else can destroy us */
1399 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1400
1401 /*
1402 * Only allow deferred destroy on pools that support it.
1403 * NOTE: deferred destroy is only supported on snapshots.
1404 */
1405 if (dsda->defer) {
1406 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1407 SPA_VERSION_USERREFS)
1408 return (ENOTSUP);
1409 ASSERT(dsl_dataset_is_snapshot(ds));
1410 return (0);
1411 }
1412
1413 /*
1414 * Can't delete a head dataset if there are snapshots of it.
1415 * (Except if the only snapshots are from the branch we cloned
1416 * from.)
1417 */
1418 if (ds->ds_prev != NULL &&
1419 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1420 return (EBUSY);
1421
1422 /*
1423 * If we made changes this txg, traverse_dsl_dataset won't find
1424 * them. Try again.
1425 */
1426 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1427 return (EAGAIN);
1428
1429 if (dsl_dataset_is_snapshot(ds)) {
1430 /*
1431 * If this snapshot has an elevated user reference count,
1432 * we can't destroy it yet.
1433 */
1434 if (ds->ds_userrefs > 0 && !dsda->releasing)
1435 return (EBUSY);
1436
1437 mutex_enter(&ds->ds_lock);
1438 /*
1439 * Can't delete a branch point. However, if we're destroying
1440 * a clone and removing its origin due to it having a user
1441 * hold count of 0 and having been marked for deferred destroy,
1442 * it's OK for the origin to have a single clone.
1443 */
1444 if (ds->ds_phys->ds_num_children >
1445 (dsda->is_origin_rm ? 2 : 1)) {
1446 mutex_exit(&ds->ds_lock);
1447 return (EEXIST);
1448 }
1449 mutex_exit(&ds->ds_lock);
1450 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1451 return (dsl_dataset_origin_check(dsda, arg2, tx));
1452 }
1453
1454 /* XXX we should do some i/o error checking... */
1455 return (0);
1456 }
1457
1458 struct refsarg {
1459 kmutex_t lock;
1460 boolean_t gone;
1461 kcondvar_t cv;
1462 };
1463
1464 /* ARGSUSED */
1465 static void
1466 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1467 {
1468 struct refsarg *arg = argv;
1469
1470 mutex_enter(&arg->lock);
1471 arg->gone = TRUE;
1472 cv_signal(&arg->cv);
1473 mutex_exit(&arg->lock);
1474 }
1475
1476 static void
1477 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1478 {
1479 struct refsarg arg;
1480
1481 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1482 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1483 arg.gone = FALSE;
1484 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1485 dsl_dataset_refs_gone);
1486 dmu_buf_rele(ds->ds_dbuf, tag);
1487 mutex_enter(&arg.lock);
1488 while (!arg.gone)
1489 cv_wait(&arg.cv, &arg.lock);
1490 ASSERT(arg.gone);
1491 mutex_exit(&arg.lock);
1492 ds->ds_dbuf = NULL;
1493 ds->ds_phys = NULL;
1494 mutex_destroy(&arg.lock);
1495 cv_destroy(&arg.cv);
1496 }
1497
1498 static void
1499 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1500 {
1501 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1502 int err;
1503 ASSERTV(uint64_t count);
1504
1505 ASSERT(ds->ds_phys->ds_num_children >= 2);
1506 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1507 /*
1508 * The err should not be ENOENT, but a bug in a previous version
1509 * of the code could cause upgrade_clones_cb() to not set
1510 * ds_next_snap_obj when it should, leading to a missing entry.
1511 * If we knew that the pool was created after
1512 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1513 * ENOENT. However, at least we can check that we don't have
1514 * too many entries in the next_clones_obj even after failing to
1515 * remove this one.
1516 */
1517 if (err != ENOENT) {
1518 VERIFY3U(err, ==, 0);
1519 }
1520 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1521 &count));
1522 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1523 }
1524
1525 static void
1526 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1527 {
1528 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1529 zap_cursor_t zc;
1530 zap_attribute_t za;
1531
1532 /*
1533 * If it is the old version, dd_clones doesn't exist so we can't
1534 * find the clones, but deadlist_remove_key() is a no-op so it
1535 * doesn't matter.
1536 */
1537 if (ds->ds_dir->dd_phys->dd_clones == 0)
1538 return;
1539
1540 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1541 zap_cursor_retrieve(&zc, &za) == 0;
1542 zap_cursor_advance(&zc)) {
1543 dsl_dataset_t *clone;
1544
1545 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1546 za.za_first_integer, FTAG, &clone));
1547 if (clone->ds_dir->dd_origin_txg > mintxg) {
1548 dsl_deadlist_remove_key(&clone->ds_deadlist,
1549 mintxg, tx);
1550 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1551 }
1552 dsl_dataset_rele(clone, FTAG);
1553 }
1554 zap_cursor_fini(&zc);
1555 }
1556
1557 struct process_old_arg {
1558 dsl_dataset_t *ds;
1559 dsl_dataset_t *ds_prev;
1560 boolean_t after_branch_point;
1561 zio_t *pio;
1562 uint64_t used, comp, uncomp;
1563 };
1564
1565 static int
1566 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1567 {
1568 struct process_old_arg *poa = arg;
1569 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1570
1571 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1572 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1573 if (poa->ds_prev && !poa->after_branch_point &&
1574 bp->blk_birth >
1575 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1576 poa->ds_prev->ds_phys->ds_unique_bytes +=
1577 bp_get_dsize_sync(dp->dp_spa, bp);
1578 }
1579 } else {
1580 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1581 poa->comp += BP_GET_PSIZE(bp);
1582 poa->uncomp += BP_GET_UCSIZE(bp);
1583 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1584 }
1585 return (0);
1586 }
1587
1588 static void
1589 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1590 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1591 {
1592 struct process_old_arg poa = { 0 };
1593 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1594 objset_t *mos = dp->dp_meta_objset;
1595
1596 ASSERT(ds->ds_deadlist.dl_oldfmt);
1597 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1598
1599 poa.ds = ds;
1600 poa.ds_prev = ds_prev;
1601 poa.after_branch_point = after_branch_point;
1602 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1603 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1604 process_old_cb, &poa, tx));
1605 VERIFY3U(zio_wait(poa.pio), ==, 0);
1606 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1607
1608 /* change snapused */
1609 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1610 -poa.used, -poa.comp, -poa.uncomp, tx);
1611
1612 /* swap next's deadlist to our deadlist */
1613 dsl_deadlist_close(&ds->ds_deadlist);
1614 dsl_deadlist_close(&ds_next->ds_deadlist);
1615 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1616 ds->ds_phys->ds_deadlist_obj);
1617 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1618 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1619 ds_next->ds_phys->ds_deadlist_obj);
1620 }
1621
1622 void
1623 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1624 {
1625 struct dsl_ds_destroyarg *dsda = arg1;
1626 dsl_dataset_t *ds = dsda->ds;
1627 int err;
1628 int after_branch_point = FALSE;
1629 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1630 objset_t *mos = dp->dp_meta_objset;
1631 dsl_dataset_t *ds_prev = NULL;
1632 boolean_t wont_destroy;
1633 uint64_t obj;
1634
1635 wont_destroy = (dsda->defer &&
1636 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1637
1638 ASSERT(ds->ds_owner || wont_destroy);
1639 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1640 ASSERT(ds->ds_prev == NULL ||
1641 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1642 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1643
1644 if (wont_destroy) {
1645 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1646 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1647 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1648 return;
1649 }
1650
1651 /* signal any waiters that this dataset is going away */
1652 mutex_enter(&ds->ds_lock);
1653 ds->ds_owner = dsl_reaper;
1654 cv_broadcast(&ds->ds_exclusive_cv);
1655 mutex_exit(&ds->ds_lock);
1656
1657 /* Remove our reservation */
1658 if (ds->ds_reserved != 0) {
1659 dsl_prop_setarg_t psa;
1660 uint64_t value = 0;
1661
1662 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1663 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1664 &value);
1665 psa.psa_effective_value = 0; /* predict default value */
1666
1667 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1668 ASSERT3U(ds->ds_reserved, ==, 0);
1669 }
1670
1671 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1672
1673 dsl_scan_ds_destroyed(ds, tx);
1674
1675 obj = ds->ds_object;
1676
1677 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1678 if (ds->ds_prev) {
1679 ds_prev = ds->ds_prev;
1680 } else {
1681 VERIFY(0 == dsl_dataset_hold_obj(dp,
1682 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1683 }
1684 after_branch_point =
1685 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1686
1687 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1688 if (after_branch_point &&
1689 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1690 remove_from_next_clones(ds_prev, obj, tx);
1691 if (ds->ds_phys->ds_next_snap_obj != 0) {
1692 VERIFY(0 == zap_add_int(mos,
1693 ds_prev->ds_phys->ds_next_clones_obj,
1694 ds->ds_phys->ds_next_snap_obj, tx));
1695 }
1696 }
1697 if (after_branch_point &&
1698 ds->ds_phys->ds_next_snap_obj == 0) {
1699 /* This clone is toast. */
1700 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1701 ds_prev->ds_phys->ds_num_children--;
1702
1703 /*
1704 * If the clone's origin has no other clones, no
1705 * user holds, and has been marked for deferred
1706 * deletion, then we should have done the necessary
1707 * destroy setup for it.
1708 */
1709 if (ds_prev->ds_phys->ds_num_children == 1 &&
1710 ds_prev->ds_userrefs == 0 &&
1711 DS_IS_DEFER_DESTROY(ds_prev)) {
1712 ASSERT3P(dsda->rm_origin, !=, NULL);
1713 } else {
1714 ASSERT3P(dsda->rm_origin, ==, NULL);
1715 }
1716 } else if (!after_branch_point) {
1717 ds_prev->ds_phys->ds_next_snap_obj =
1718 ds->ds_phys->ds_next_snap_obj;
1719 }
1720 }
1721
1722 if (dsl_dataset_is_snapshot(ds)) {
1723 dsl_dataset_t *ds_next;
1724 uint64_t old_unique;
1725 uint64_t used = 0, comp = 0, uncomp = 0;
1726
1727 VERIFY(0 == dsl_dataset_hold_obj(dp,
1728 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1729 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1730
1731 old_unique = ds_next->ds_phys->ds_unique_bytes;
1732
1733 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1734 ds_next->ds_phys->ds_prev_snap_obj =
1735 ds->ds_phys->ds_prev_snap_obj;
1736 ds_next->ds_phys->ds_prev_snap_txg =
1737 ds->ds_phys->ds_prev_snap_txg;
1738 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1739 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1740
1741
1742 if (ds_next->ds_deadlist.dl_oldfmt) {
1743 process_old_deadlist(ds, ds_prev, ds_next,
1744 after_branch_point, tx);
1745 } else {
1746 /* Adjust prev's unique space. */
1747 if (ds_prev && !after_branch_point) {
1748 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1749 ds_prev->ds_phys->ds_prev_snap_txg,
1750 ds->ds_phys->ds_prev_snap_txg,
1751 &used, &comp, &uncomp);
1752 ds_prev->ds_phys->ds_unique_bytes += used;
1753 }
1754
1755 /* Adjust snapused. */
1756 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1757 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1758 &used, &comp, &uncomp);
1759 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1760 -used, -comp, -uncomp, tx);
1761
1762 /* Move blocks to be freed to pool's free list. */
1763 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1764 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1765 tx);
1766 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1767 DD_USED_HEAD, used, comp, uncomp, tx);
1768 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1769
1770 /* Merge our deadlist into next's and free it. */
1771 dsl_deadlist_merge(&ds_next->ds_deadlist,
1772 ds->ds_phys->ds_deadlist_obj, tx);
1773 }
1774 dsl_deadlist_close(&ds->ds_deadlist);
1775 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1776
1777 /* Collapse range in clone heads */
1778 dsl_dataset_remove_clones_key(ds,
1779 ds->ds_phys->ds_creation_txg, tx);
1780
1781 if (dsl_dataset_is_snapshot(ds_next)) {
1782 dsl_dataset_t *ds_nextnext;
1783 dsl_dataset_t *hds;
1784
1785 /*
1786 * Update next's unique to include blocks which
1787 * were previously shared by only this snapshot
1788 * and it. Those blocks will be born after the
1789 * prev snap and before this snap, and will have
1790 * died after the next snap and before the one
1791 * after that (ie. be on the snap after next's
1792 * deadlist).
1793 */
1794 VERIFY(0 == dsl_dataset_hold_obj(dp,
1795 ds_next->ds_phys->ds_next_snap_obj,
1796 FTAG, &ds_nextnext));
1797 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1798 ds->ds_phys->ds_prev_snap_txg,
1799 ds->ds_phys->ds_creation_txg,
1800 &used, &comp, &uncomp);
1801 ds_next->ds_phys->ds_unique_bytes += used;
1802 dsl_dataset_rele(ds_nextnext, FTAG);
1803 ASSERT3P(ds_next->ds_prev, ==, NULL);
1804
1805 /* Collapse range in this head. */
1806 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1807 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1808 FTAG, &hds));
1809 dsl_deadlist_remove_key(&hds->ds_deadlist,
1810 ds->ds_phys->ds_creation_txg, tx);
1811 dsl_dataset_rele(hds, FTAG);
1812
1813 } else {
1814 ASSERT3P(ds_next->ds_prev, ==, ds);
1815 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1816 ds_next->ds_prev = NULL;
1817 if (ds_prev) {
1818 VERIFY(0 == dsl_dataset_get_ref(dp,
1819 ds->ds_phys->ds_prev_snap_obj,
1820 ds_next, &ds_next->ds_prev));
1821 }
1822
1823 dsl_dataset_recalc_head_uniq(ds_next);
1824
1825 /*
1826 * Reduce the amount of our unconsmed refreservation
1827 * being charged to our parent by the amount of
1828 * new unique data we have gained.
1829 */
1830 if (old_unique < ds_next->ds_reserved) {
1831 int64_t mrsdelta;
1832 uint64_t new_unique =
1833 ds_next->ds_phys->ds_unique_bytes;
1834
1835 ASSERT(old_unique <= new_unique);
1836 mrsdelta = MIN(new_unique - old_unique,
1837 ds_next->ds_reserved - old_unique);
1838 dsl_dir_diduse_space(ds->ds_dir,
1839 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1840 }
1841 }
1842 dsl_dataset_rele(ds_next, FTAG);
1843 } else {
1844 /*
1845 * There's no next snapshot, so this is a head dataset.
1846 * Destroy the deadlist. Unless it's a clone, the
1847 * deadlist should be empty. (If it's a clone, it's
1848 * safe to ignore the deadlist contents.)
1849 */
1850 struct killarg ka;
1851
1852 dsl_deadlist_close(&ds->ds_deadlist);
1853 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1854 ds->ds_phys->ds_deadlist_obj = 0;
1855
1856 /*
1857 * Free everything that we point to (that's born after
1858 * the previous snapshot, if we are a clone)
1859 *
1860 * NB: this should be very quick, because we already
1861 * freed all the objects in open context.
1862 */
1863 ka.ds = ds;
1864 ka.tx = tx;
1865 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1866 TRAVERSE_POST, kill_blkptr, &ka);
1867 ASSERT3U(err, ==, 0);
1868 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1869 ds->ds_phys->ds_unique_bytes == 0);
1870
1871 if (ds->ds_prev != NULL) {
1872 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1873 VERIFY3U(0, ==, zap_remove_int(mos,
1874 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1875 ds->ds_object, tx));
1876 }
1877 dsl_dataset_rele(ds->ds_prev, ds);
1878 ds->ds_prev = ds_prev = NULL;
1879 }
1880 }
1881
1882 /*
1883 * This must be done after the dsl_traverse(), because it will
1884 * re-open the objset.
1885 */
1886 if (ds->ds_objset) {
1887 dmu_objset_evict(ds->ds_objset);
1888 ds->ds_objset = NULL;
1889 }
1890
1891 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1892 /* Erase the link in the dir */
1893 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1894 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1895 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1896 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1897 ASSERT(err == 0);
1898 } else {
1899 /* remove from snapshot namespace */
1900 dsl_dataset_t *ds_head;
1901 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1902 VERIFY(0 == dsl_dataset_hold_obj(dp,
1903 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1904 VERIFY(0 == dsl_dataset_get_snapname(ds));
1905 #ifdef ZFS_DEBUG
1906 {
1907 uint64_t val;
1908
1909 err = dsl_dataset_snap_lookup(ds_head,
1910 ds->ds_snapname, &val);
1911 ASSERT3U(err, ==, 0);
1912 ASSERT3U(val, ==, obj);
1913 }
1914 #endif
1915 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1916 ASSERT(err == 0);
1917 dsl_dataset_rele(ds_head, FTAG);
1918 }
1919
1920 if (ds_prev && ds->ds_prev != ds_prev)
1921 dsl_dataset_rele(ds_prev, FTAG);
1922
1923 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1924 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1925 "dataset = %llu", ds->ds_object);
1926
1927 if (ds->ds_phys->ds_next_clones_obj != 0) {
1928 ASSERTV(uint64_t count);
1929 ASSERT(0 == zap_count(mos,
1930 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1931 VERIFY(0 == dmu_object_free(mos,
1932 ds->ds_phys->ds_next_clones_obj, tx));
1933 }
1934 if (ds->ds_phys->ds_props_obj != 0)
1935 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1936 if (ds->ds_phys->ds_userrefs_obj != 0)
1937 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1938 dsl_dir_close(ds->ds_dir, ds);
1939 ds->ds_dir = NULL;
1940 dsl_dataset_drain_refs(ds, tag);
1941 VERIFY(0 == dmu_object_free(mos, obj, tx));
1942
1943 if (dsda->rm_origin) {
1944 /*
1945 * Remove the origin of the clone we just destroyed.
1946 */
1947 struct dsl_ds_destroyarg ndsda = {0};
1948
1949 ndsda.ds = dsda->rm_origin;
1950 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1951 }
1952 }
1953
1954 static int
1955 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1956 {
1957 uint64_t asize;
1958
1959 if (!dmu_tx_is_syncing(tx))
1960 return (0);
1961
1962 /*
1963 * If there's an fs-only reservation, any blocks that might become
1964 * owned by the snapshot dataset must be accommodated by space
1965 * outside of the reservation.
1966 */
1967 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1968 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1969 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1970 return (ENOSPC);
1971
1972 /*
1973 * Propogate any reserved space for this snapshot to other
1974 * snapshot checks in this sync group.
1975 */
1976 if (asize > 0)
1977 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1978
1979 return (0);
1980 }
1981
1982 int
1983 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1984 {
1985 dsl_dataset_t *ds = arg1;
1986 const char *snapname = arg2;
1987 int err;
1988 uint64_t value;
1989
1990 /*
1991 * We don't allow multiple snapshots of the same txg. If there
1992 * is already one, try again.
1993 */
1994 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1995 return (EAGAIN);
1996
1997 /*
1998 * Check for conflicting name snapshot name.
1999 */
2000 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2001 if (err == 0)
2002 return (EEXIST);
2003 if (err != ENOENT)
2004 return (err);
2005
2006 /*
2007 * Check that the dataset's name is not too long. Name consists
2008 * of the dataset's length + 1 for the @-sign + snapshot name's length
2009 */
2010 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2011 return (ENAMETOOLONG);
2012
2013 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2014 if (err)
2015 return (err);
2016
2017 ds->ds_trysnap_txg = tx->tx_txg;
2018 return (0);
2019 }
2020
2021 void
2022 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2023 {
2024 dsl_dataset_t *ds = arg1;
2025 const char *snapname = arg2;
2026 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2027 dmu_buf_t *dbuf;
2028 dsl_dataset_phys_t *dsphys;
2029 uint64_t dsobj, crtxg;
2030 objset_t *mos = dp->dp_meta_objset;
2031 int err;
2032
2033 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2034
2035 /*
2036 * The origin's ds_creation_txg has to be < TXG_INITIAL
2037 */
2038 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2039 crtxg = 1;
2040 else
2041 crtxg = tx->tx_txg;
2042
2043 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2044 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2045 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2046 dmu_buf_will_dirty(dbuf, tx);
2047 dsphys = dbuf->db_data;
2048 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2049 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2050 dsphys->ds_fsid_guid = unique_create();
2051 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2052 sizeof (dsphys->ds_guid));
2053 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2054 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2055 dsphys->ds_next_snap_obj = ds->ds_object;
2056 dsphys->ds_num_children = 1;
2057 dsphys->ds_creation_time = gethrestime_sec();
2058 dsphys->ds_creation_txg = crtxg;
2059 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2060 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2061 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2062 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2063 dsphys->ds_flags = ds->ds_phys->ds_flags;
2064 dsphys->ds_bp = ds->ds_phys->ds_bp;
2065 dmu_buf_rele(dbuf, FTAG);
2066
2067 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2068 if (ds->ds_prev) {
2069 uint64_t next_clones_obj =
2070 ds->ds_prev->ds_phys->ds_next_clones_obj;
2071 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2072 ds->ds_object ||
2073 ds->ds_prev->ds_phys->ds_num_children > 1);
2074 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2075 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2076 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2077 ds->ds_prev->ds_phys->ds_creation_txg);
2078 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2079 } else if (next_clones_obj != 0) {
2080 remove_from_next_clones(ds->ds_prev,
2081 dsphys->ds_next_snap_obj, tx);
2082 VERIFY3U(0, ==, zap_add_int(mos,
2083 next_clones_obj, dsobj, tx));
2084 }
2085 }
2086
2087 /*
2088 * If we have a reference-reservation on this dataset, we will
2089 * need to increase the amount of refreservation being charged
2090 * since our unique space is going to zero.
2091 */
2092 if (ds->ds_reserved) {
2093 int64_t delta;
2094 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2095 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2096 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2097 delta, 0, 0, tx);
2098 }
2099
2100 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2101 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2102 ds->ds_dir->dd_myname, snapname, dsobj,
2103 ds->ds_phys->ds_prev_snap_txg);
2104 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2105 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2106 dsl_deadlist_close(&ds->ds_deadlist);
2107 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2108 dsl_deadlist_add_key(&ds->ds_deadlist,
2109 ds->ds_phys->ds_prev_snap_txg, tx);
2110
2111 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2112 ds->ds_phys->ds_prev_snap_obj = dsobj;
2113 ds->ds_phys->ds_prev_snap_txg = crtxg;
2114 ds->ds_phys->ds_unique_bytes = 0;
2115 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2116 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2117
2118 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2119 snapname, 8, 1, &dsobj, tx);
2120 ASSERT(err == 0);
2121
2122 if (ds->ds_prev)
2123 dsl_dataset_drop_ref(ds->ds_prev, ds);
2124 VERIFY(0 == dsl_dataset_get_ref(dp,
2125 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2126
2127 dsl_scan_ds_snapshotted(ds, tx);
2128
2129 dsl_dir_snap_cmtime_update(ds->ds_dir);
2130
2131 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2132 "dataset = %llu", dsobj);
2133 }
2134
2135 void
2136 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2137 {
2138 ASSERT(dmu_tx_is_syncing(tx));
2139 ASSERT(ds->ds_objset != NULL);
2140 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2141
2142 /*
2143 * in case we had to change ds_fsid_guid when we opened it,
2144 * sync it out now.
2145 */
2146 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2147 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2148
2149 dsl_dir_dirty(ds->ds_dir, tx);
2150 dmu_objset_sync(ds->ds_objset, zio, tx);
2151 }
2152
2153 void
2154 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2155 {
2156 uint64_t refd, avail, uobjs, aobjs;
2157
2158 dsl_dir_stats(ds->ds_dir, nv);
2159
2160 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2161 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2162 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2163
2164 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2165 ds->ds_phys->ds_creation_time);
2166 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2167 ds->ds_phys->ds_creation_txg);
2168 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2169 ds->ds_quota);
2170 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2171 ds->ds_reserved);
2172 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2173 ds->ds_phys->ds_guid);
2174 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2175 ds->ds_phys->ds_unique_bytes);
2176 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2177 ds->ds_object);
2178 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2179 ds->ds_userrefs);
2180 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2181 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2182
2183 if (ds->ds_phys->ds_next_snap_obj) {
2184 /*
2185 * This is a snapshot; override the dd's space used with
2186 * our unique space and compression ratio.
2187 */
2188 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2189 ds->ds_phys->ds_unique_bytes);
2190 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2191 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2192 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2193 ds->ds_phys->ds_compressed_bytes));
2194 }
2195 }
2196
2197 void
2198 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2199 {
2200 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2201 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2202 stat->dds_guid = ds->ds_phys->ds_guid;
2203 if (ds->ds_phys->ds_next_snap_obj) {
2204 stat->dds_is_snapshot = B_TRUE;
2205 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2206 } else {
2207 stat->dds_is_snapshot = B_FALSE;
2208 stat->dds_num_clones = 0;
2209 }
2210
2211 /* clone origin is really a dsl_dir thing... */
2212 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2213 if (dsl_dir_is_clone(ds->ds_dir)) {
2214 dsl_dataset_t *ods;
2215
2216 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2217 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2218 dsl_dataset_name(ods, stat->dds_origin);
2219 dsl_dataset_drop_ref(ods, FTAG);
2220 } else {
2221 stat->dds_origin[0] = '\0';
2222 }
2223 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2224 }
2225
2226 uint64_t
2227 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2228 {
2229 return (ds->ds_fsid_guid);
2230 }
2231
2232 void
2233 dsl_dataset_space(dsl_dataset_t *ds,
2234 uint64_t *refdbytesp, uint64_t *availbytesp,
2235 uint64_t *usedobjsp, uint64_t *availobjsp)
2236 {
2237 *refdbytesp = ds->ds_phys->ds_used_bytes;
2238 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2239 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2240 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2241 if (ds->ds_quota != 0) {
2242 /*
2243 * Adjust available bytes according to refquota
2244 */
2245 if (*refdbytesp < ds->ds_quota)
2246 *availbytesp = MIN(*availbytesp,
2247 ds->ds_quota - *refdbytesp);
2248 else
2249 *availbytesp = 0;
2250 }
2251 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2252 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2253 }
2254
2255 boolean_t
2256 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2257 {
2258 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2259
2260 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2261 dsl_pool_sync_context(dp));
2262 if (ds->ds_prev == NULL)
2263 return (B_FALSE);
2264 if (ds->ds_phys->ds_bp.blk_birth >
2265 ds->ds_prev->ds_phys->ds_creation_txg) {
2266 objset_t *os, *os_prev;
2267 /*
2268 * It may be that only the ZIL differs, because it was
2269 * reset in the head. Don't count that as being
2270 * modified.
2271 */
2272 if (dmu_objset_from_ds(ds, &os) != 0)
2273 return (B_TRUE);
2274 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2275 return (B_TRUE);
2276 return (bcmp(&os->os_phys->os_meta_dnode,
2277 &os_prev->os_phys->os_meta_dnode,
2278 sizeof (os->os_phys->os_meta_dnode)) != 0);
2279 }
2280 return (B_FALSE);
2281 }
2282
2283 /* ARGSUSED */
2284 static int
2285 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2286 {
2287 dsl_dataset_t *ds = arg1;
2288 char *newsnapname = arg2;
2289 dsl_dir_t *dd = ds->ds_dir;
2290 dsl_dataset_t *hds;
2291 uint64_t val;
2292 int err;
2293
2294 err = dsl_dataset_hold_obj(dd->dd_pool,
2295 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2296 if (err)
2297 return (err);
2298
2299 /* new name better not be in use */
2300 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2301 dsl_dataset_rele(hds, FTAG);
2302
2303 if (err == 0)
2304 err = EEXIST;
2305 else if (err == ENOENT)
2306 err = 0;
2307
2308 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2309 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2310 err = ENAMETOOLONG;
2311
2312 return (err);
2313 }
2314
2315 static void
2316 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2317 {
2318 dsl_dataset_t *ds = arg1;
2319 const char *newsnapname = arg2;
2320 dsl_dir_t *dd = ds->ds_dir;
2321 objset_t *mos = dd->dd_pool->dp_meta_objset;
2322 dsl_dataset_t *hds;
2323 int err;
2324
2325 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2326
2327 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2328 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2329
2330 VERIFY(0 == dsl_dataset_get_snapname(ds));
2331 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2332 ASSERT3U(err, ==, 0);
2333 mutex_enter(&ds->ds_lock);
2334 (void) strcpy(ds->ds_snapname, newsnapname);
2335 mutex_exit(&ds->ds_lock);
2336 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2337 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2338 ASSERT3U(err, ==, 0);
2339
2340 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2341 "dataset = %llu", ds->ds_object);
2342 dsl_dataset_rele(hds, FTAG);
2343 }
2344
2345 struct renamesnaparg {
2346 dsl_sync_task_group_t *dstg;
2347 char failed[MAXPATHLEN];
2348 char *oldsnap;
2349 char *newsnap;
2350 };
2351
2352 static int
2353 dsl_snapshot_rename_one(const char *name, void *arg)
2354 {
2355 struct renamesnaparg *ra = arg;
2356 dsl_dataset_t *ds = NULL;
2357 char *snapname;
2358 int err;
2359
2360 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2361 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2362
2363 /*
2364 * For recursive snapshot renames the parent won't be changing
2365 * so we just pass name for both the to/from argument.
2366 */
2367 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2368 if (err != 0) {
2369 strfree(snapname);
2370 return (err == ENOENT ? 0 : err);
2371 }
2372
2373 #ifdef _KERNEL
2374 /*
2375 * For all filesystems undergoing rename, we'll need to unmount it.
2376 */
2377 (void) zfs_unmount_snap(snapname, NULL);
2378 #endif
2379 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2380 strfree(snapname);
2381 if (err != 0)
2382 return (err == ENOENT ? 0 : err);
2383
2384 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2385 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2386
2387 return (0);
2388 }
2389
2390 static int
2391 dsl_recursive_rename(char *oldname, const char *newname)
2392 {
2393 int err;
2394 struct renamesnaparg *ra;
2395 dsl_sync_task_t *dst;
2396 spa_t *spa;
2397 char *cp, *fsname = spa_strdup(oldname);
2398 int len = strlen(oldname) + 1;
2399
2400 /* truncate the snapshot name to get the fsname */
2401 cp = strchr(fsname, '@');
2402 *cp = '\0';
2403
2404 err = spa_open(fsname, &spa, FTAG);
2405 if (err) {
2406 kmem_free(fsname, len);
2407 return (err);
2408 }
2409 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2410 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2411
2412 ra->oldsnap = strchr(oldname, '@') + 1;
2413 ra->newsnap = strchr(newname, '@') + 1;
2414 *ra->failed = '\0';
2415
2416 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2417 DS_FIND_CHILDREN);
2418 kmem_free(fsname, len);
2419
2420 if (err == 0) {
2421 err = dsl_sync_task_group_wait(ra->dstg);
2422 }
2423
2424 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2425 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2426 dsl_dataset_t *ds = dst->dst_arg1;
2427 if (dst->dst_err) {
2428 dsl_dir_name(ds->ds_dir, ra->failed);
2429 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2430 (void) strlcat(ra->failed, ra->newsnap,
2431 sizeof (ra->failed));
2432 }
2433 dsl_dataset_rele(ds, ra->dstg);
2434 }
2435
2436 if (err)
2437 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2438
2439 dsl_sync_task_group_destroy(ra->dstg);
2440 kmem_free(ra, sizeof (struct renamesnaparg));
2441 spa_close(spa, FTAG);
2442 return (err);
2443 }
2444
2445 static int
2446 dsl_valid_rename(const char *oldname, void *arg)
2447 {
2448 int delta = *(int *)arg;
2449
2450 if (strlen(oldname) + delta >= MAXNAMELEN)
2451 return (ENAMETOOLONG);
2452
2453 return (0);
2454 }
2455
2456 #pragma weak dmu_objset_rename = dsl_dataset_rename
2457 int
2458 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2459 {
2460 dsl_dir_t *dd;
2461 dsl_dataset_t *ds;
2462 const char *tail;
2463 int err;
2464
2465 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2466 if (err)
2467 return (err);
2468
2469 if (tail == NULL) {
2470 int delta = strlen(newname) - strlen(oldname);
2471
2472 /* if we're growing, validate child name lengths */
2473 if (delta > 0)
2474 err = dmu_objset_find(oldname, dsl_valid_rename,
2475 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2476
2477 if (err == 0)
2478 err = dsl_dir_rename(dd, newname);
2479 dsl_dir_close(dd, FTAG);
2480 return (err);
2481 }
2482
2483 if (tail[0] != '@') {
2484 /* the name ended in a nonexistent component */
2485 dsl_dir_close(dd, FTAG);
2486 return (ENOENT);
2487 }
2488
2489 dsl_dir_close(dd, FTAG);
2490
2491 /* new name must be snapshot in same filesystem */
2492 tail = strchr(newname, '@');
2493 if (tail == NULL)
2494 return (EINVAL);
2495 tail++;
2496 if (strncmp(oldname, newname, tail - newname) != 0)
2497 return (EXDEV);
2498
2499 if (recursive) {
2500 err = dsl_recursive_rename(oldname, newname);
2501 } else {
2502 err = dsl_dataset_hold(oldname, FTAG, &ds);
2503 if (err)
2504 return (err);
2505
2506 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2507 dsl_dataset_snapshot_rename_check,
2508 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2509
2510 dsl_dataset_rele(ds, FTAG);
2511 }
2512
2513 return (err);
2514 }
2515
2516 struct promotenode {
2517 list_node_t link;
2518 dsl_dataset_t *ds;
2519 };
2520
2521 struct promotearg {
2522 list_t shared_snaps, origin_snaps, clone_snaps;
2523 dsl_dataset_t *origin_origin;
2524 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2525 char *err_ds;
2526 };
2527
2528 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2529
2530 static int
2531 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2532 {
2533 dsl_dataset_t *hds = arg1;
2534 struct promotearg *pa = arg2;
2535 struct promotenode *snap = list_head(&pa->shared_snaps);
2536 dsl_dataset_t *origin_ds = snap->ds;
2537 int err;
2538 uint64_t unused;
2539
2540 /* Check that it is a real clone */
2541 if (!dsl_dir_is_clone(hds->ds_dir))
2542 return (EINVAL);
2543
2544 /* Since this is so expensive, don't do the preliminary check */
2545 if (!dmu_tx_is_syncing(tx))
2546 return (0);
2547
2548 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2549 return (EXDEV);
2550
2551 /* compute origin's new unique space */
2552 snap = list_tail(&pa->clone_snaps);
2553 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2554 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2555 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2556 &pa->unique, &unused, &unused);
2557
2558 /*
2559 * Walk the snapshots that we are moving
2560 *
2561 * Compute space to transfer. Consider the incremental changes
2562 * to used for each snapshot:
2563 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2564 * So each snapshot gave birth to:
2565 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2566 * So a sequence would look like:
2567 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2568 * Which simplifies to:
2569 * uN + kN + kN-1 + ... + k1 + k0
2570 * Note however, if we stop before we reach the ORIGIN we get:
2571 * uN + kN + kN-1 + ... + kM - uM-1
2572 */
2573 pa->used = origin_ds->ds_phys->ds_used_bytes;
2574 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2575 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2576 for (snap = list_head(&pa->shared_snaps); snap;
2577 snap = list_next(&pa->shared_snaps, snap)) {
2578 uint64_t val, dlused, dlcomp, dluncomp;
2579 dsl_dataset_t *ds = snap->ds;
2580
2581 /* Check that the snapshot name does not conflict */
2582 VERIFY(0 == dsl_dataset_get_snapname(ds));
2583 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2584 if (err == 0) {
2585 err = EEXIST;
2586 goto out;
2587 }
2588 if (err != ENOENT)
2589 goto out;
2590
2591 /* The very first snapshot does not have a deadlist */
2592 if (ds->ds_phys->ds_prev_snap_obj == 0)
2593 continue;
2594
2595 dsl_deadlist_space(&ds->ds_deadlist,
2596 &dlused, &dlcomp, &dluncomp);
2597 pa->used += dlused;
2598 pa->comp += dlcomp;
2599 pa->uncomp += dluncomp;
2600 }
2601
2602 /*
2603 * If we are a clone of a clone then we never reached ORIGIN,
2604 * so we need to subtract out the clone origin's used space.
2605 */
2606 if (pa->origin_origin) {
2607 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2608 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2609 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2610 }
2611
2612 /* Check that there is enough space here */
2613 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2614 pa->used);
2615 if (err)
2616 return (err);
2617
2618 /*
2619 * Compute the amounts of space that will be used by snapshots
2620 * after the promotion (for both origin and clone). For each,
2621 * it is the amount of space that will be on all of their
2622 * deadlists (that was not born before their new origin).
2623 */
2624 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2625 uint64_t space;
2626
2627 /*
2628 * Note, typically this will not be a clone of a clone,
2629 * so dd_origin_txg will be < TXG_INITIAL, so
2630 * these snaplist_space() -> dsl_deadlist_space_range()
2631 * calls will be fast because they do not have to
2632 * iterate over all bps.
2633 */
2634 snap = list_head(&pa->origin_snaps);
2635 err = snaplist_space(&pa->shared_snaps,
2636 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2637 if (err)
2638 return (err);
2639
2640 err = snaplist_space(&pa->clone_snaps,
2641 snap->ds->ds_dir->dd_origin_txg, &space);
2642 if (err)
2643 return (err);
2644 pa->cloneusedsnap += space;
2645 }
2646 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2647 err = snaplist_space(&pa->origin_snaps,
2648 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2649 if (err)
2650 return (err);
2651 }
2652
2653 return (0);
2654 out:
2655 pa->err_ds = snap->ds->ds_snapname;
2656 return (err);
2657 }
2658
2659 static void
2660 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2661 {
2662 dsl_dataset_t *hds = arg1;
2663 struct promotearg *pa = arg2;
2664 struct promotenode *snap = list_head(&pa->shared_snaps);
2665 dsl_dataset_t *origin_ds = snap->ds;
2666 dsl_dataset_t *origin_head;
2667 dsl_dir_t *dd = hds->ds_dir;
2668 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2669 dsl_dir_t *odd = NULL;
2670 uint64_t oldnext_obj;
2671 int64_t delta;
2672
2673 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2674
2675 snap = list_head(&pa->origin_snaps);
2676 origin_head = snap->ds;
2677
2678 /*
2679 * We need to explicitly open odd, since origin_ds's dd will be
2680 * changing.
2681 */
2682 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2683 NULL, FTAG, &odd));
2684
2685 /* change origin's next snap */
2686 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2687 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2688 snap = list_tail(&pa->clone_snaps);
2689 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2690 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2691
2692 /* change the origin's next clone */
2693 if (origin_ds->ds_phys->ds_next_clones_obj) {
2694 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2695 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2696 origin_ds->ds_phys->ds_next_clones_obj,
2697 oldnext_obj, tx));
2698 }
2699
2700 /* change origin */
2701 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2702 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2703 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2704 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2705 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2706 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2707 origin_head->ds_dir->dd_origin_txg =
2708 origin_ds->ds_phys->ds_creation_txg;
2709
2710 /* change dd_clone entries */
2711 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2712 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2713 odd->dd_phys->dd_clones, hds->ds_object, tx));
2714 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2715 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2716 hds->ds_object, tx));
2717
2718 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2719 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2720 origin_head->ds_object, tx));
2721 if (dd->dd_phys->dd_clones == 0) {
2722 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2723 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2724 }
2725 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2726 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2727
2728 }
2729
2730 /* move snapshots to this dir */
2731 for (snap = list_head(&pa->shared_snaps); snap;
2732 snap = list_next(&pa->shared_snaps, snap)) {
2733 dsl_dataset_t *ds = snap->ds;
2734
2735 /* unregister props as dsl_dir is changing */
2736 if (ds->ds_objset) {
2737 dmu_objset_evict(ds->ds_objset);
2738 ds->ds_objset = NULL;
2739 }
2740 /* move snap name entry */
2741 VERIFY(0 == dsl_dataset_get_snapname(ds));
2742 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2743 ds->ds_snapname, tx));
2744 VERIFY(0 == zap_add(dp->dp_meta_objset,
2745 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2746 8, 1, &ds->ds_object, tx));
2747
2748 /* change containing dsl_dir */
2749 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2750 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2751 ds->ds_phys->ds_dir_obj = dd->dd_object;
2752 ASSERT3P(ds->ds_dir, ==, odd);
2753 dsl_dir_close(ds->ds_dir, ds);
2754 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2755 NULL, ds, &ds->ds_dir));
2756
2757 /* move any clone references */
2758 if (ds->ds_phys->ds_next_clones_obj &&
2759 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2760 zap_cursor_t zc;
2761 zap_attribute_t za;
2762
2763 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2764 ds->ds_phys->ds_next_clones_obj);
2765 zap_cursor_retrieve(&zc, &za) == 0;
2766 zap_cursor_advance(&zc)) {
2767 dsl_dataset_t *cnds;
2768 uint64_t o;
2769
2770 if (za.za_first_integer == oldnext_obj) {
2771 /*
2772 * We've already moved the
2773 * origin's reference.
2774 */
2775 continue;
2776 }
2777
2778 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2779 za.za_first_integer, FTAG, &cnds));
2780 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2781
2782 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2783 odd->dd_phys->dd_clones, o, tx), ==, 0);
2784 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2785 dd->dd_phys->dd_clones, o, tx), ==, 0);
2786 dsl_dataset_rele(cnds, FTAG);
2787 }
2788 zap_cursor_fini(&zc);
2789 }
2790
2791 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2792 }
2793
2794 /*
2795 * Change space accounting.
2796 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2797 * both be valid, or both be 0 (resulting in delta == 0). This
2798 * is true for each of {clone,origin} independently.
2799 */
2800
2801 delta = pa->cloneusedsnap -
2802 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2803 ASSERT3S(delta, >=, 0);
2804 ASSERT3U(pa->used, >=, delta);
2805 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2806 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2807 pa->used - delta, pa->comp, pa->uncomp, tx);
2808
2809 delta = pa->originusedsnap -
2810 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2811 ASSERT3S(delta, <=, 0);
2812 ASSERT3U(pa->used, >=, -delta);
2813 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2814 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2815 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2816
2817 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2818
2819 /* log history record */
2820 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2821 "dataset = %llu", hds->ds_object);
2822
2823 dsl_dir_close(odd, FTAG);
2824 }
2825
2826 static char *snaplist_tag = "snaplist";
2827 /*
2828 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2829 * (exclusive) and last_obj (inclusive). The list will be in reverse
2830 * order (last_obj will be the list_head()). If first_obj == 0, do all
2831 * snapshots back to this dataset's origin.
2832 */
2833 static int
2834 snaplist_make(dsl_pool_t *dp, boolean_t own,
2835 uint64_t first_obj, uint64_t last_obj, list_t *l)
2836 {
2837 uint64_t obj = last_obj;
2838
2839 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2840
2841 list_create(l, sizeof (struct promotenode),
2842 offsetof(struct promotenode, link));
2843
2844 while (obj != first_obj) {
2845 dsl_dataset_t *ds;
2846 struct promotenode *snap;
2847 int err;
2848
2849 if (own) {
2850 err = dsl_dataset_own_obj(dp, obj,
2851 0, snaplist_tag, &ds);
2852 if (err == 0)
2853 dsl_dataset_make_exclusive(ds, snaplist_tag);
2854 } else {
2855 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2856 }
2857 if (err == ENOENT) {
2858 /* lost race with snapshot destroy */
2859 struct promotenode *last = list_tail(l);
2860 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2861 obj = last->ds->ds_phys->ds_prev_snap_obj;
2862 continue;
2863 } else if (err) {
2864 return (err);
2865 }
2866
2867 if (first_obj == 0)
2868 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2869
2870 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2871 snap->ds = ds;
2872 list_insert_tail(l, snap);
2873 obj = ds->ds_phys->ds_prev_snap_obj;
2874 }
2875
2876 return (0);
2877 }
2878
2879 static int
2880 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2881 {
2882 struct promotenode *snap;
2883
2884 *spacep = 0;
2885 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2886 uint64_t used, comp, uncomp;
2887 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2888 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2889 *spacep += used;
2890 }
2891 return (0);
2892 }
2893
2894 static void
2895 snaplist_destroy(list_t *l, boolean_t own)
2896 {
2897 struct promotenode *snap;
2898
2899 if (!l || !list_link_active(&l->list_head))
2900 return;
2901
2902 while ((snap = list_tail(l)) != NULL) {
2903 list_remove(l, snap);
2904 if (own)
2905 dsl_dataset_disown(snap->ds, snaplist_tag);
2906 else
2907 dsl_dataset_rele(snap->ds, snaplist_tag);
2908 kmem_free(snap, sizeof (struct promotenode));
2909 }
2910 list_destroy(l);
2911 }
2912
2913 /*
2914 * Promote a clone. Nomenclature note:
2915 * "clone" or "cds": the original clone which is being promoted
2916 * "origin" or "ods": the snapshot which is originally clone's origin
2917 * "origin head" or "ohds": the dataset which is the head
2918 * (filesystem/volume) for the origin
2919 * "origin origin": the origin of the origin's filesystem (typically
2920 * NULL, indicating that the clone is not a clone of a clone).
2921 */
2922 int
2923 dsl_dataset_promote(const char *name, char *conflsnap)
2924 {
2925 dsl_dataset_t *ds;
2926 dsl_dir_t *dd;
2927 dsl_pool_t *dp;
2928 dmu_object_info_t doi;
2929 struct promotearg pa;
2930 struct promotenode *snap;
2931 int err;
2932
2933 bzero(&pa, sizeof(struct promotearg));
2934 err = dsl_dataset_hold(name, FTAG, &ds);
2935 if (err)
2936 return (err);
2937 dd = ds->ds_dir;
2938 dp = dd->dd_pool;
2939
2940 err = dmu_object_info(dp->dp_meta_objset,
2941 ds->ds_phys->ds_snapnames_zapobj, &doi);
2942 if (err) {
2943 dsl_dataset_rele(ds, FTAG);
2944 return (err);
2945 }
2946
2947 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2948 dsl_dataset_rele(ds, FTAG);
2949 return (EINVAL);
2950 }
2951
2952 /*
2953 * We are going to inherit all the snapshots taken before our
2954 * origin (i.e., our new origin will be our parent's origin).
2955 * Take ownership of them so that we can rename them into our
2956 * namespace.
2957 */
2958 rw_enter(&dp->dp_config_rwlock, RW_READER);
2959
2960 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2961 &pa.shared_snaps);
2962 if (err != 0)
2963 goto out;
2964
2965 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2966 if (err != 0)
2967 goto out;
2968
2969 snap = list_head(&pa.shared_snaps);
2970 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2971 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2972 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2973 if (err != 0)
2974 goto out;
2975
2976 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2977 err = dsl_dataset_hold_obj(dp,
2978 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2979 FTAG, &pa.origin_origin);
2980 if (err != 0)
2981 goto out;
2982 }
2983
2984 out:
2985 rw_exit(&dp->dp_config_rwlock);
2986
2987 /*
2988 * Add in 128x the snapnames zapobj size, since we will be moving
2989 * a bunch of snapnames to the promoted ds, and dirtying their
2990 * bonus buffers.
2991 */
2992 if (err == 0) {
2993 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2994 dsl_dataset_promote_sync, ds, &pa,
2995 2 + 2 * doi.doi_physical_blocks_512);
2996 if (err && pa.err_ds && conflsnap)
2997 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2998 }
2999
3000 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3001 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3002 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3003 if (pa.origin_origin)
3004 dsl_dataset_rele(pa.origin_origin, FTAG);
3005 dsl_dataset_rele(ds, FTAG);
3006 return (err);
3007 }
3008
3009 struct cloneswaparg {
3010 dsl_dataset_t *cds; /* clone dataset */
3011 dsl_dataset_t *ohds; /* origin's head dataset */
3012 boolean_t force;
3013 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3014 };
3015
3016 /* ARGSUSED */
3017 static int
3018 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3019 {
3020 struct cloneswaparg *csa = arg1;
3021
3022 /* they should both be heads */
3023 if (dsl_dataset_is_snapshot(csa->cds) ||
3024 dsl_dataset_is_snapshot(csa->ohds))
3025 return (EINVAL);
3026
3027 /* the branch point should be just before them */
3028 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3029 return (EINVAL);
3030
3031 /* cds should be the clone (unless they are unrelated) */
3032 if (csa->cds->ds_prev != NULL &&
3033 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3034 csa->ohds->ds_object !=
3035 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3036 return (EINVAL);
3037
3038 /* the clone should be a child of the origin */
3039 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3040 return (EINVAL);
3041
3042 /* ohds shouldn't be modified unless 'force' */
3043 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3044 return (ETXTBSY);
3045
3046 /* adjust amount of any unconsumed refreservation */
3047 csa->unused_refres_delta =
3048 (int64_t)MIN(csa->ohds->ds_reserved,
3049 csa->ohds->ds_phys->ds_unique_bytes) -
3050 (int64_t)MIN(csa->ohds->ds_reserved,
3051 csa->cds->ds_phys->ds_unique_bytes);
3052
3053 if (csa->unused_refres_delta > 0 &&
3054 csa->unused_refres_delta >
3055 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3056 return (ENOSPC);
3057
3058 if (csa->ohds->ds_quota != 0 &&
3059 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3060 return (EDQUOT);
3061
3062 return (0);
3063 }
3064
3065 /* ARGSUSED */
3066 static void
3067 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3068 {
3069 struct cloneswaparg *csa = arg1;
3070 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3071
3072 ASSERT(csa->cds->ds_reserved == 0);
3073 ASSERT(csa->ohds->ds_quota == 0 ||
3074 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3075
3076 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3077 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3078
3079 if (csa->cds->ds_objset != NULL) {
3080 dmu_objset_evict(csa->cds->ds_objset);
3081 csa->cds->ds_objset = NULL;
3082 }
3083
3084 if (csa->ohds->ds_objset != NULL) {
3085 dmu_objset_evict(csa->ohds->ds_objset);
3086 csa->ohds->ds_objset = NULL;
3087 }
3088
3089 /*
3090 * Reset origin's unique bytes, if it exists.
3091 */
3092 if (csa->cds->ds_prev) {
3093 dsl_dataset_t *origin = csa->cds->ds_prev;
3094 uint64_t comp, uncomp;
3095
3096 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3097 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3098 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3099 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3100 }
3101
3102 /* swap blkptrs */
3103 {
3104 blkptr_t tmp;
3105 tmp = csa->ohds->ds_phys->ds_bp;
3106 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3107 csa->cds->ds_phys->ds_bp = tmp;
3108 }
3109
3110 /* set dd_*_bytes */
3111 {
3112 int64_t dused, dcomp, duncomp;
3113 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3114 uint64_t odl_used, odl_comp, odl_uncomp;
3115
3116 ASSERT3U(csa->cds->ds_dir->dd_phys->
3117 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3118
3119 dsl_deadlist_space(&csa->cds->ds_deadlist,
3120 &cdl_used, &cdl_comp, &cdl_uncomp);
3121 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3122 &odl_used, &odl_comp, &odl_uncomp);
3123
3124 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3125 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3126 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3127 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3128 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3129 cdl_uncomp -
3130 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3131
3132 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3133 dused, dcomp, duncomp, tx);
3134 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3135 -dused, -dcomp, -duncomp, tx);
3136
3137 /*
3138 * The difference in the space used by snapshots is the
3139 * difference in snapshot space due to the head's
3140 * deadlist (since that's the only thing that's
3141 * changing that affects the snapused).
3142 */
3143 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3144 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3145 &cdl_used, &cdl_comp, &cdl_uncomp);
3146 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3147 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3148 &odl_used, &odl_comp, &odl_uncomp);
3149 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3150 DD_USED_HEAD, DD_USED_SNAP, tx);
3151 }
3152
3153 /* swap ds_*_bytes */
3154 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3155 csa->cds->ds_phys->ds_used_bytes);
3156 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3157 csa->cds->ds_phys->ds_compressed_bytes);
3158 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3159 csa->cds->ds_phys->ds_uncompressed_bytes);
3160 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3161 csa->cds->ds_phys->ds_unique_bytes);
3162
3163 /* apply any parent delta for change in unconsumed refreservation */
3164 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3165 csa->unused_refres_delta, 0, 0, tx);
3166
3167 /*
3168 * Swap deadlists.
3169 */
3170 dsl_deadlist_close(&csa->cds->ds_deadlist);
3171 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3172 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3173 csa->cds->ds_phys->ds_deadlist_obj);
3174 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3175 csa->cds->ds_phys->ds_deadlist_obj);
3176 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3177 csa->ohds->ds_phys->ds_deadlist_obj);
3178
3179 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3180 }
3181
3182 /*
3183 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3184 * recv" into an existing fs to swizzle the file system to the new
3185 * version, and by "zfs rollback". Can also be used to swap two
3186 * independent head datasets if neither has any snapshots.
3187 */
3188 int
3189 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3190 boolean_t force)
3191 {
3192 struct cloneswaparg csa;
3193 int error;
3194
3195 ASSERT(clone->ds_owner);
3196 ASSERT(origin_head->ds_owner);
3197 retry:
3198 /*
3199 * Need exclusive access for the swap. If we're swapping these
3200 * datasets back after an error, we already hold the locks.
3201 */
3202 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3203 rw_enter(&clone->ds_rwlock, RW_WRITER);
3204 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3205 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3206 rw_exit(&clone->ds_rwlock);
3207 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3208 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3209 rw_exit(&origin_head->ds_rwlock);
3210 goto retry;
3211 }
3212 }
3213 csa.cds = clone;
3214 csa.ohds = origin_head;
3215 csa.force = force;
3216 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3217 dsl_dataset_clone_swap_check,
3218 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3219 return (error);
3220 }
3221
3222 /*
3223 * Given a pool name and a dataset object number in that pool,
3224 * return the name of that dataset.
3225 */
3226 int
3227 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3228 {
3229 spa_t *spa;
3230 dsl_pool_t *dp;
3231 dsl_dataset_t *ds;
3232 int error;
3233
3234 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3235 return (error);
3236 dp = spa_get_dsl(spa);
3237 rw_enter(&dp->dp_config_rwlock, RW_READER);
3238 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3239 dsl_dataset_name(ds, buf);
3240 dsl_dataset_rele(ds, FTAG);
3241 }
3242 rw_exit(&dp->dp_config_rwlock);
3243 spa_close(spa, FTAG);
3244
3245 return (error);
3246 }
3247
3248 int
3249 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3250 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3251 {
3252 int error = 0;
3253
3254 ASSERT3S(asize, >, 0);
3255
3256 /*
3257 * *ref_rsrv is the portion of asize that will come from any
3258 * unconsumed refreservation space.
3259 */
3260 *ref_rsrv = 0;
3261
3262 mutex_enter(&ds->ds_lock);
3263 /*
3264 * Make a space adjustment for reserved bytes.
3265 */
3266 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3267 ASSERT3U(*used, >=,
3268 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3269 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3270 *ref_rsrv =
3271 asize - MIN(asize, parent_delta(ds, asize + inflight));
3272 }
3273
3274 if (!check_quota || ds->ds_quota == 0) {
3275 mutex_exit(&ds->ds_lock);
3276 return (0);
3277 }
3278 /*
3279 * If they are requesting more space, and our current estimate
3280 * is over quota, they get to try again unless the actual
3281 * on-disk is over quota and there are no pending changes (which
3282 * may free up space for us).
3283 */
3284 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3285 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3286 error = ERESTART;
3287 else
3288 error = EDQUOT;
3289 }
3290 mutex_exit(&ds->ds_lock);
3291
3292 return (error);
3293 }
3294
3295 /* ARGSUSED */
3296 static int
3297 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3298 {
3299 dsl_dataset_t *ds = arg1;
3300 dsl_prop_setarg_t *psa = arg2;
3301 int err;
3302
3303 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3304 return (ENOTSUP);
3305
3306 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3307 return (err);
3308
3309 if (psa->psa_effective_value == 0)
3310 return (0);
3311
3312 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3313 psa->psa_effective_value < ds->ds_reserved)
3314 return (ENOSPC);
3315
3316 return (0);
3317 }
3318
3319 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3320
3321 void
3322 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3323 {
3324 dsl_dataset_t *ds = arg1;
3325 dsl_prop_setarg_t *psa = arg2;
3326 uint64_t effective_value = psa->psa_effective_value;
3327
3328 dsl_prop_set_sync(ds, psa, tx);
3329 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3330
3331 if (ds->ds_quota != effective_value) {
3332 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3333 ds->ds_quota = effective_value;
3334
3335 spa_history_log_internal(LOG_DS_REFQUOTA,
3336 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3337 (longlong_t)ds->ds_quota, ds->ds_object);
3338 }
3339 }
3340
3341 int
3342 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3343 {
3344 dsl_dataset_t *ds;
3345 dsl_prop_setarg_t psa;
3346 int err;
3347
3348 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3349
3350 err = dsl_dataset_hold(dsname, FTAG, &ds);
3351 if (err)
3352 return (err);
3353
3354 /*
3355 * If someone removes a file, then tries to set the quota, we
3356 * want to make sure the file freeing takes effect.
3357 */
3358 txg_wait_open(ds->ds_dir->dd_pool, 0);
3359
3360 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3361 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3362 ds, &psa, 0);
3363
3364 dsl_dataset_rele(ds, FTAG);
3365 return (err);
3366 }
3367
3368 static int
3369 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3370 {
3371 dsl_dataset_t *ds = arg1;
3372 dsl_prop_setarg_t *psa = arg2;
3373 uint64_t effective_value;
3374 uint64_t unique;
3375 int err;
3376
3377 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3378 SPA_VERSION_REFRESERVATION)
3379 return (ENOTSUP);
3380
3381 if (dsl_dataset_is_snapshot(ds))
3382 return (EINVAL);
3383
3384 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3385 return (err);
3386
3387 effective_value = psa->psa_effective_value;
3388
3389 /*
3390 * If we are doing the preliminary check in open context, the
3391 * space estimates may be inaccurate.
3392 */
3393 if (!dmu_tx_is_syncing(tx))
3394 return (0);
3395
3396 mutex_enter(&ds->ds_lock);
3397 if (!DS_UNIQUE_IS_ACCURATE(ds))
3398 dsl_dataset_recalc_head_uniq(ds);
3399 unique = ds->ds_phys->ds_unique_bytes;
3400 mutex_exit(&ds->ds_lock);
3401
3402 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3403 uint64_t delta = MAX(unique, effective_value) -
3404 MAX(unique, ds->ds_reserved);
3405
3406 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3407 return (ENOSPC);
3408 if (ds->ds_quota > 0 &&
3409 effective_value > ds->ds_quota)
3410 return (ENOSPC);
3411 }
3412
3413 return (0);
3414 }
3415
3416 static void
3417 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3418 {
3419 dsl_dataset_t *ds = arg1;
3420 dsl_prop_setarg_t *psa = arg2;
3421 uint64_t effective_value = psa->psa_effective_value;
3422 uint64_t unique;
3423 int64_t delta;
3424
3425 dsl_prop_set_sync(ds, psa, tx);
3426 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3427
3428 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3429
3430 mutex_enter(&ds->ds_dir->dd_lock);
3431 mutex_enter(&ds->ds_lock);
3432 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3433 unique = ds->ds_phys->ds_unique_bytes;
3434 delta = MAX(0, (int64_t)(effective_value - unique)) -
3435 MAX(0, (int64_t)(ds->ds_reserved - unique));
3436 ds->ds_reserved = effective_value;
3437 mutex_exit(&ds->ds_lock);
3438
3439 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3440 mutex_exit(&ds->ds_dir->dd_lock);
3441
3442 spa_history_log_internal(LOG_DS_REFRESERV,
3443 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3444 (longlong_t)effective_value, ds->ds_object);
3445 }
3446
3447 int
3448 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3449 uint64_t reservation)
3450 {
3451 dsl_dataset_t *ds;
3452 dsl_prop_setarg_t psa;
3453 int err;
3454
3455 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3456 &reservation);
3457
3458 err = dsl_dataset_hold(dsname, FTAG, &ds);
3459 if (err)
3460 return (err);
3461
3462 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3463 dsl_dataset_set_reservation_check,
3464 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3465
3466 dsl_dataset_rele(ds, FTAG);
3467 return (err);
3468 }
3469
3470 typedef struct zfs_hold_cleanup_arg {
3471 dsl_pool_t *dp;
3472 uint64_t dsobj;
3473 char htag[MAXNAMELEN];
3474 } zfs_hold_cleanup_arg_t;
3475
3476 static void
3477 dsl_dataset_user_release_onexit(void *arg)
3478 {
3479 zfs_hold_cleanup_arg_t *ca = arg;
3480
3481 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3482 B_TRUE);
3483 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3484 }
3485
3486 void
3487 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3488 minor_t minor)
3489 {
3490 zfs_hold_cleanup_arg_t *ca;
3491
3492 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3493 ca->dp = ds->ds_dir->dd_pool;
3494 ca->dsobj = ds->ds_object;
3495 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3496 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3497 dsl_dataset_user_release_onexit, ca, NULL));
3498 }
3499
3500 /*
3501 * If you add new checks here, you may need to add
3502 * additional checks to the "temporary" case in
3503 * snapshot_check() in dmu_objset.c.
3504 */
3505 static int
3506 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3507 {
3508 dsl_dataset_t *ds = arg1;
3509 struct dsl_ds_holdarg *ha = arg2;
3510 char *htag = ha->htag;
3511 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3512 int error = 0;
3513
3514 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3515 return (ENOTSUP);
3516
3517 if (!dsl_dataset_is_snapshot(ds))
3518 return (EINVAL);
3519
3520 /* tags must be unique */
3521 mutex_enter(&ds->ds_lock);
3522 if (ds->ds_phys->ds_userrefs_obj) {
3523 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3524 8, 1, tx);
3525 if (error == 0)
3526 error = EEXIST;
3527 else if (error == ENOENT)
3528 error = 0;
3529 }
3530 mutex_exit(&ds->ds_lock);
3531
3532 if (error == 0 && ha->temphold &&
3533 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3534 error = E2BIG;
3535
3536 return (error);
3537 }
3538
3539 void
3540 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3541 {
3542 dsl_dataset_t *ds = arg1;
3543 struct dsl_ds_holdarg *ha = arg2;
3544 char *htag = ha->htag;
3545 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3546 objset_t *mos = dp->dp_meta_objset;
3547 uint64_t now = gethrestime_sec();
3548 uint64_t zapobj;
3549
3550 mutex_enter(&ds->ds_lock);
3551 if (ds->ds_phys->ds_userrefs_obj == 0) {
3552 /*
3553 * This is the first user hold for this dataset. Create
3554 * the userrefs zap object.
3555 */
3556 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3557 zapobj = ds->ds_phys->ds_userrefs_obj =
3558 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3559 } else {
3560 zapobj = ds->ds_phys->ds_userrefs_obj;
3561 }
3562 ds->ds_userrefs++;
3563 mutex_exit(&ds->ds_lock);
3564
3565 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3566
3567 if (ha->temphold) {
3568 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3569 htag, &now, tx));
3570 }
3571
3572 spa_history_log_internal(LOG_DS_USER_HOLD,
3573 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3574 (int)ha->temphold, ds->ds_object);
3575 }
3576
3577 static int
3578 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3579 {
3580 struct dsl_ds_holdarg *ha = arg;
3581 dsl_dataset_t *ds;
3582 int error;
3583 char *name;
3584
3585 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3586 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3587 error = dsl_dataset_hold(name, ha->dstg, &ds);
3588 strfree(name);
3589 if (error == 0) {
3590 ha->gotone = B_TRUE;
3591 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3592 dsl_dataset_user_hold_sync, ds, ha, 0);
3593 } else if (error == ENOENT && ha->recursive) {
3594 error = 0;
3595 } else {
3596 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3597 }
3598 return (error);
3599 }
3600
3601 int
3602 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3603 boolean_t temphold)
3604 {
3605 struct dsl_ds_holdarg *ha;
3606 int error;
3607
3608 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3609 ha->htag = htag;
3610 ha->temphold = temphold;
3611 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3612 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3613 ds, ha, 0);
3614 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3615
3616 return (error);
3617 }
3618
3619 int
3620 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3621 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3622 {
3623 struct dsl_ds_holdarg *ha;
3624 dsl_sync_task_t *dst;
3625 spa_t *spa;
3626 int error;
3627 minor_t minor = 0;
3628
3629 if (cleanup_fd != -1) {
3630 /* Currently we only support cleanup-on-exit of tempholds. */
3631 if (!temphold)
3632 return (EINVAL);
3633 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3634 if (error)
3635 return (error);
3636 }
3637
3638 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3639
3640 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3641
3642 error = spa_open(dsname, &spa, FTAG);
3643 if (error) {
3644 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3645 if (cleanup_fd != -1)
3646 zfs_onexit_fd_rele(cleanup_fd);
3647 return (error);
3648 }
3649
3650 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3651 ha->htag = htag;
3652 ha->snapname = snapname;
3653 ha->recursive = recursive;
3654 ha->temphold = temphold;
3655
3656 if (recursive) {
3657 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3658 ha, DS_FIND_CHILDREN);
3659 } else {
3660 error = dsl_dataset_user_hold_one(dsname, ha);
3661 }
3662 if (error == 0)
3663 error = dsl_sync_task_group_wait(ha->dstg);
3664
3665 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3666 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3667 dsl_dataset_t *ds = dst->dst_arg1;
3668
3669 if (dst->dst_err) {
3670 dsl_dataset_name(ds, ha->failed);
3671 *strchr(ha->failed, '@') = '\0';
3672 } else if (error == 0 && minor != 0 && temphold) {
3673 /*
3674 * If this hold is to be released upon process exit,
3675 * register that action now.
3676 */
3677 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3678 }
3679 dsl_dataset_rele(ds, ha->dstg);
3680 }
3681
3682 if (error == 0 && recursive && !ha->gotone)
3683 error = ENOENT;
3684
3685 if (error)
3686 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3687
3688 dsl_sync_task_group_destroy(ha->dstg);
3689
3690 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3691 spa_close(spa, FTAG);
3692 if (cleanup_fd != -1)
3693 zfs_onexit_fd_rele(cleanup_fd);
3694 return (error);
3695 }
3696
3697 struct dsl_ds_releasearg {
3698 dsl_dataset_t *ds;
3699 const char *htag;
3700 boolean_t own; /* do we own or just hold ds? */
3701 };
3702
3703 static int
3704 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3705 boolean_t *might_destroy)
3706 {
3707 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3708 uint64_t zapobj;
3709 uint64_t tmp;
3710 int error;
3711
3712 *might_destroy = B_FALSE;
3713
3714 mutex_enter(&ds->ds_lock);
3715 zapobj = ds->ds_phys->ds_userrefs_obj;
3716 if (zapobj == 0) {
3717 /* The tag can't possibly exist */
3718 mutex_exit(&ds->ds_lock);
3719 return (ESRCH);
3720 }
3721
3722 /* Make sure the tag exists */
3723 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3724 if (error) {
3725 mutex_exit(&ds->ds_lock);
3726 if (error == ENOENT)
3727 error = ESRCH;
3728 return (error);
3729 }
3730
3731 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3732 DS_IS_DEFER_DESTROY(ds))
3733 *might_destroy = B_TRUE;
3734
3735 mutex_exit(&ds->ds_lock);
3736 return (0);
3737 }
3738
3739 static int
3740 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3741 {
3742 struct dsl_ds_releasearg *ra = arg1;
3743 dsl_dataset_t *ds = ra->ds;
3744 boolean_t might_destroy;
3745 int error;
3746
3747 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3748 return (ENOTSUP);
3749
3750 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3751 if (error)
3752 return (error);
3753
3754 if (might_destroy) {
3755 struct dsl_ds_destroyarg dsda = {0};
3756
3757 if (dmu_tx_is_syncing(tx)) {
3758 /*
3759 * If we're not prepared to remove the snapshot,
3760 * we can't allow the release to happen right now.
3761 */
3762 if (!ra->own)
3763 return (EBUSY);
3764 }
3765 dsda.ds = ds;
3766 dsda.releasing = B_TRUE;
3767 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3768 }
3769
3770 return (0);
3771 }
3772
3773 static void
3774 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3775 {
3776 struct dsl_ds_releasearg *ra = arg1;
3777 dsl_dataset_t *ds = ra->ds;
3778 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3779 objset_t *mos = dp->dp_meta_objset;
3780 uint64_t zapobj;
3781 uint64_t dsobj = ds->ds_object;
3782 uint64_t refs;
3783 int error;
3784
3785 mutex_enter(&ds->ds_lock);
3786 ds->ds_userrefs--;
3787 refs = ds->ds_userrefs;
3788 mutex_exit(&ds->ds_lock);
3789 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3790 VERIFY(error == 0 || error == ENOENT);
3791 zapobj = ds->ds_phys->ds_userrefs_obj;
3792 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3793 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3794 DS_IS_DEFER_DESTROY(ds)) {
3795 struct dsl_ds_destroyarg dsda = {0};
3796
3797 ASSERT(ra->own);
3798 dsda.ds = ds;
3799 dsda.releasing = B_TRUE;
3800 /* We already did the destroy_check */
3801 dsl_dataset_destroy_sync(&dsda, tag, tx);
3802 }
3803
3804 spa_history_log_internal(LOG_DS_USER_RELEASE,
3805 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3806 ra->htag, (longlong_t)refs, dsobj);
3807 }
3808
3809 static int
3810 dsl_dataset_user_release_one(const char *dsname, void *arg)
3811 {
3812 struct dsl_ds_holdarg *ha = arg;
3813 struct dsl_ds_releasearg *ra;
3814 dsl_dataset_t *ds;
3815 int error;
3816 void *dtag = ha->dstg;
3817 char *name;
3818 boolean_t own = B_FALSE;
3819 boolean_t might_destroy;
3820
3821 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3822 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3823 error = dsl_dataset_hold(name, dtag, &ds);
3824 strfree(name);
3825 if (error == ENOENT && ha->recursive)
3826 return (0);
3827 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3828 if (error)
3829 return (error);
3830
3831 ha->gotone = B_TRUE;
3832
3833 ASSERT(dsl_dataset_is_snapshot(ds));
3834
3835 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3836 if (error) {
3837 dsl_dataset_rele(ds, dtag);
3838 return (error);
3839 }
3840
3841 if (might_destroy) {
3842 #ifdef _KERNEL
3843 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3844 error = zfs_unmount_snap(name, NULL);
3845 strfree(name);
3846 if (error) {
3847 dsl_dataset_rele(ds, dtag);
3848 return (error);
3849 }
3850 #endif
3851 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3852 dsl_dataset_rele(ds, dtag);
3853 return (EBUSY);
3854 } else {
3855 own = B_TRUE;
3856 dsl_dataset_make_exclusive(ds, dtag);
3857 }
3858 }
3859
3860 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3861 ra->ds = ds;
3862 ra->htag = ha->htag;
3863 ra->own = own;
3864 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3865 dsl_dataset_user_release_sync, ra, dtag, 0);
3866
3867 return (0);
3868 }
3869
3870 int
3871 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3872 boolean_t recursive)
3873 {
3874 struct dsl_ds_holdarg *ha;
3875 dsl_sync_task_t *dst;
3876 spa_t *spa;
3877 int error;
3878
3879 top:
3880 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3881
3882 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3883
3884 error = spa_open(dsname, &spa, FTAG);
3885 if (error) {
3886 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3887 return (error);
3888 }
3889
3890 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3891 ha->htag = htag;
3892 ha->snapname = snapname;
3893 ha->recursive = recursive;
3894 if (recursive) {
3895 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3896 ha, DS_FIND_CHILDREN);
3897 } else {
3898 error = dsl_dataset_user_release_one(dsname, ha);
3899 }
3900 if (error == 0)
3901 error = dsl_sync_task_group_wait(ha->dstg);
3902
3903 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3904 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3905 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3906 dsl_dataset_t *ds = ra->ds;
3907
3908 if (dst->dst_err)
3909 dsl_dataset_name(ds, ha->failed);
3910
3911 if (ra->own)
3912 dsl_dataset_disown(ds, ha->dstg);
3913 else
3914 dsl_dataset_rele(ds, ha->dstg);
3915
3916 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3917 }
3918
3919 if (error == 0 && recursive && !ha->gotone)
3920 error = ENOENT;
3921
3922 if (error && error != EBUSY)
3923 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3924
3925 dsl_sync_task_group_destroy(ha->dstg);
3926 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3927 spa_close(spa, FTAG);
3928
3929 /*
3930 * We can get EBUSY if we were racing with deferred destroy and
3931 * dsl_dataset_user_release_check() hadn't done the necessary
3932 * open context setup. We can also get EBUSY if we're racing
3933 * with destroy and that thread is the ds_owner. Either way
3934 * the busy condition should be transient, and we should retry
3935 * the release operation.
3936 */
3937 if (error == EBUSY)
3938 goto top;
3939
3940 return (error);
3941 }
3942
3943 /*
3944 * Called at spa_load time (with retry == B_FALSE) to release a stale
3945 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3946 */
3947 int
3948 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
3949 boolean_t retry)
3950 {
3951 dsl_dataset_t *ds;
3952 char *snap;
3953 char *name;
3954 int namelen;
3955 int error;
3956
3957 do {
3958 rw_enter(&dp->dp_config_rwlock, RW_READER);
3959 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3960 rw_exit(&dp->dp_config_rwlock);
3961 if (error)
3962 return (error);
3963 namelen = dsl_dataset_namelen(ds)+1;
3964 name = kmem_alloc(namelen, KM_SLEEP);
3965 dsl_dataset_name(ds, name);
3966 dsl_dataset_rele(ds, FTAG);
3967
3968 snap = strchr(name, '@');
3969 *snap = '\0';
3970 ++snap;
3971 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
3972 kmem_free(name, namelen);
3973
3974 /*
3975 * The object can't have been destroyed because we have a hold,
3976 * but it might have been renamed, resulting in ENOENT. Retry
3977 * if we've been requested to do so.
3978 *
3979 * It would be nice if we could use the dsobj all the way
3980 * through and avoid ENOENT entirely. But we might need to
3981 * unmount the snapshot, and there's currently no way to lookup
3982 * a vfsp using a ZFS object id.
3983 */
3984 } while ((error == ENOENT) && retry);
3985
3986 return (error);
3987 }
3988
3989 int
3990 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3991 {
3992 dsl_dataset_t *ds;
3993 int err;
3994
3995 err = dsl_dataset_hold(dsname, FTAG, &ds);
3996 if (err)
3997 return (err);
3998
3999 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4000 if (ds->ds_phys->ds_userrefs_obj != 0) {
4001 zap_attribute_t *za;
4002 zap_cursor_t zc;
4003
4004 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4005 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4006 ds->ds_phys->ds_userrefs_obj);
4007 zap_cursor_retrieve(&zc, za) == 0;
4008 zap_cursor_advance(&zc)) {
4009 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4010 za->za_first_integer));
4011 }
4012 zap_cursor_fini(&zc);
4013 kmem_free(za, sizeof (zap_attribute_t));
4014 }
4015 dsl_dataset_rele(ds, FTAG);
4016 return (0);
4017 }
4018
4019 /*
4020 * Note, this fuction is used as the callback for dmu_objset_find(). We
4021 * always return 0 so that we will continue to find and process
4022 * inconsistent datasets, even if we encounter an error trying to
4023 * process one of them.
4024 */
4025 /* ARGSUSED */
4026 int
4027 dsl_destroy_inconsistent(const char *dsname, void *arg)
4028 {
4029 dsl_dataset_t *ds;
4030
4031 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4032 if (DS_IS_INCONSISTENT(ds))
4033 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4034 else
4035 dsl_dataset_disown(ds, FTAG);
4036 }
4037 return (0);
4038 }