]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/dsl_dataset.c
Illumos #1644, #1645, #1646, #1647, #1708
[mirror_zfs-debian.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 */
25
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_onexit.h>
42 #include <sys/zvol.h>
43 #include <sys/dsl_scan.h>
44 #include <sys/dsl_deadlist.h>
45
46 static char *dsl_reaper = "the grim reaper";
47
48 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
49 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
50 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
51
52 #define SWITCH64(x, y) \
53 { \
54 uint64_t __tmp = (x); \
55 (x) = (y); \
56 (y) = __tmp; \
57 }
58
59 #define DS_REF_MAX (1ULL << 62)
60
61 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
62
63 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
64
65
66 /*
67 * Figure out how much of this delta should be propogated to the dsl_dir
68 * layer. If there's a refreservation, that space has already been
69 * partially accounted for in our ancestors.
70 */
71 static int64_t
72 parent_delta(dsl_dataset_t *ds, int64_t delta)
73 {
74 uint64_t old_bytes, new_bytes;
75
76 if (ds->ds_reserved == 0)
77 return (delta);
78
79 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
80 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
81
82 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
83 return (new_bytes - old_bytes);
84 }
85
86 void
87 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
88 {
89 int used, compressed, uncompressed;
90 int64_t delta;
91
92 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 compressed = BP_GET_PSIZE(bp);
94 uncompressed = BP_GET_UCSIZE(bp);
95
96 dprintf_bp(bp, "ds=%p", ds);
97
98 ASSERT(dmu_tx_is_syncing(tx));
99 /* It could have been compressed away to nothing */
100 if (BP_IS_HOLE(bp))
101 return;
102 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
103 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
104 if (ds == NULL) {
105 /*
106 * Account for the meta-objset space in its placeholder
107 * dsl_dir.
108 */
109 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
110 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
111 used, compressed, uncompressed, tx);
112 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
113 return;
114 }
115 dmu_buf_will_dirty(ds->ds_dbuf, tx);
116
117 mutex_enter(&ds->ds_dir->dd_lock);
118 mutex_enter(&ds->ds_lock);
119 delta = parent_delta(ds, used);
120 ds->ds_phys->ds_used_bytes += used;
121 ds->ds_phys->ds_compressed_bytes += compressed;
122 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
123 ds->ds_phys->ds_unique_bytes += used;
124 mutex_exit(&ds->ds_lock);
125 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
126 compressed, uncompressed, tx);
127 dsl_dir_transfer_space(ds->ds_dir, used - delta,
128 DD_USED_REFRSRV, DD_USED_HEAD, tx);
129 mutex_exit(&ds->ds_dir->dd_lock);
130 }
131
132 int
133 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
134 boolean_t async)
135 {
136 int used, compressed, uncompressed;
137
138 if (BP_IS_HOLE(bp))
139 return (0);
140
141 ASSERT(dmu_tx_is_syncing(tx));
142 ASSERT(bp->blk_birth <= tx->tx_txg);
143
144 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
145 compressed = BP_GET_PSIZE(bp);
146 uncompressed = BP_GET_UCSIZE(bp);
147
148 ASSERT(used > 0);
149 if (ds == NULL) {
150 /*
151 * Account for the meta-objset space in its placeholder
152 * dataset.
153 */
154 dsl_free(tx->tx_pool, tx->tx_txg, bp);
155
156 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
157 -used, -compressed, -uncompressed, tx);
158 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
159 return (used);
160 }
161 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
162
163 ASSERT(!dsl_dataset_is_snapshot(ds));
164 dmu_buf_will_dirty(ds->ds_dbuf, tx);
165
166 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
167 int64_t delta;
168
169 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
170 dsl_free(tx->tx_pool, tx->tx_txg, bp);
171
172 mutex_enter(&ds->ds_dir->dd_lock);
173 mutex_enter(&ds->ds_lock);
174 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
175 !DS_UNIQUE_IS_ACCURATE(ds));
176 delta = parent_delta(ds, -used);
177 ds->ds_phys->ds_unique_bytes -= used;
178 mutex_exit(&ds->ds_lock);
179 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
180 delta, -compressed, -uncompressed, tx);
181 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
182 DD_USED_REFRSRV, DD_USED_HEAD, tx);
183 mutex_exit(&ds->ds_dir->dd_lock);
184 } else {
185 dprintf_bp(bp, "putting on dead list: %s", "");
186 if (async) {
187 /*
188 * We are here as part of zio's write done callback,
189 * which means we're a zio interrupt thread. We can't
190 * call dsl_deadlist_insert() now because it may block
191 * waiting for I/O. Instead, put bp on the deferred
192 * queue and let dsl_pool_sync() finish the job.
193 */
194 bplist_append(&ds->ds_pending_deadlist, bp);
195 } else {
196 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
197 }
198 ASSERT3U(ds->ds_prev->ds_object, ==,
199 ds->ds_phys->ds_prev_snap_obj);
200 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
201 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
202 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
203 ds->ds_object && bp->blk_birth >
204 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
205 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
206 mutex_enter(&ds->ds_prev->ds_lock);
207 ds->ds_prev->ds_phys->ds_unique_bytes += used;
208 mutex_exit(&ds->ds_prev->ds_lock);
209 }
210 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
211 dsl_dir_transfer_space(ds->ds_dir, used,
212 DD_USED_HEAD, DD_USED_SNAP, tx);
213 }
214 }
215 mutex_enter(&ds->ds_lock);
216 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
217 ds->ds_phys->ds_used_bytes -= used;
218 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
219 ds->ds_phys->ds_compressed_bytes -= compressed;
220 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
221 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
222 mutex_exit(&ds->ds_lock);
223
224 return (used);
225 }
226
227 uint64_t
228 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
229 {
230 uint64_t trysnap = 0;
231
232 if (ds == NULL)
233 return (0);
234 /*
235 * The snapshot creation could fail, but that would cause an
236 * incorrect FALSE return, which would only result in an
237 * overestimation of the amount of space that an operation would
238 * consume, which is OK.
239 *
240 * There's also a small window where we could miss a pending
241 * snapshot, because we could set the sync task in the quiescing
242 * phase. So this should only be used as a guess.
243 */
244 if (ds->ds_trysnap_txg >
245 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
246 trysnap = ds->ds_trysnap_txg;
247 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
248 }
249
250 boolean_t
251 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
252 uint64_t blk_birth)
253 {
254 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
255 return (B_FALSE);
256
257 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
258
259 return (B_TRUE);
260 }
261
262 /* ARGSUSED */
263 static void
264 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
265 {
266 dsl_dataset_t *ds = dsv;
267
268 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
269
270 unique_remove(ds->ds_fsid_guid);
271
272 if (ds->ds_objset != NULL)
273 dmu_objset_evict(ds->ds_objset);
274
275 if (ds->ds_prev) {
276 dsl_dataset_drop_ref(ds->ds_prev, ds);
277 ds->ds_prev = NULL;
278 }
279
280 bplist_destroy(&ds->ds_pending_deadlist);
281 if (db != NULL) {
282 dsl_deadlist_close(&ds->ds_deadlist);
283 } else {
284 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
285 ASSERT(!ds->ds_deadlist.dl_oldfmt);
286 }
287 if (ds->ds_dir)
288 dsl_dir_close(ds->ds_dir, ds);
289
290 ASSERT(!list_link_active(&ds->ds_synced_link));
291
292 mutex_destroy(&ds->ds_lock);
293 mutex_destroy(&ds->ds_recvlock);
294 mutex_destroy(&ds->ds_opening_lock);
295 rw_destroy(&ds->ds_rwlock);
296 cv_destroy(&ds->ds_exclusive_cv);
297
298 kmem_free(ds, sizeof (dsl_dataset_t));
299 }
300
301 static int
302 dsl_dataset_get_snapname(dsl_dataset_t *ds)
303 {
304 dsl_dataset_phys_t *headphys;
305 int err;
306 dmu_buf_t *headdbuf;
307 dsl_pool_t *dp = ds->ds_dir->dd_pool;
308 objset_t *mos = dp->dp_meta_objset;
309
310 if (ds->ds_snapname[0])
311 return (0);
312 if (ds->ds_phys->ds_next_snap_obj == 0)
313 return (0);
314
315 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
316 FTAG, &headdbuf);
317 if (err)
318 return (err);
319 headphys = headdbuf->db_data;
320 err = zap_value_search(dp->dp_meta_objset,
321 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
322 dmu_buf_rele(headdbuf, FTAG);
323 return (err);
324 }
325
326 static int
327 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
328 {
329 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
330 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
331 matchtype_t mt;
332 int err;
333
334 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
335 mt = MT_FIRST;
336 else
337 mt = MT_EXACT;
338
339 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
340 value, mt, NULL, 0, NULL);
341 if (err == ENOTSUP && mt == MT_FIRST)
342 err = zap_lookup(mos, snapobj, name, 8, 1, value);
343 return (err);
344 }
345
346 static int
347 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
348 {
349 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
350 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
351 matchtype_t mt;
352 int err;
353
354 dsl_dir_snap_cmtime_update(ds->ds_dir);
355
356 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
357 mt = MT_FIRST;
358 else
359 mt = MT_EXACT;
360
361 err = zap_remove_norm(mos, snapobj, name, mt, tx);
362 if (err == ENOTSUP && mt == MT_FIRST)
363 err = zap_remove(mos, snapobj, name, tx);
364 return (err);
365 }
366
367 static int
368 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
369 dsl_dataset_t **dsp)
370 {
371 objset_t *mos = dp->dp_meta_objset;
372 dmu_buf_t *dbuf;
373 dsl_dataset_t *ds;
374 int err;
375 dmu_object_info_t doi;
376
377 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
378 dsl_pool_sync_context(dp));
379
380 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
381 if (err)
382 return (err);
383
384 /* Make sure dsobj has the correct object type. */
385 dmu_object_info_from_db(dbuf, &doi);
386 if (doi.doi_type != DMU_OT_DSL_DATASET)
387 return (EINVAL);
388
389 ds = dmu_buf_get_user(dbuf);
390 if (ds == NULL) {
391 dsl_dataset_t *winner = NULL;
392
393 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
394 ds->ds_dbuf = dbuf;
395 ds->ds_object = dsobj;
396 ds->ds_phys = dbuf->db_data;
397 list_link_init(&ds->ds_synced_link);
398
399 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
401 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
402 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
403 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
404
405 bplist_create(&ds->ds_pending_deadlist);
406 dsl_deadlist_open(&ds->ds_deadlist,
407 mos, ds->ds_phys->ds_deadlist_obj);
408
409 if (err == 0) {
410 err = dsl_dir_open_obj(dp,
411 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
412 }
413 if (err) {
414 mutex_destroy(&ds->ds_lock);
415 mutex_destroy(&ds->ds_recvlock);
416 mutex_destroy(&ds->ds_opening_lock);
417 rw_destroy(&ds->ds_rwlock);
418 cv_destroy(&ds->ds_exclusive_cv);
419 bplist_destroy(&ds->ds_pending_deadlist);
420 dsl_deadlist_close(&ds->ds_deadlist);
421 kmem_free(ds, sizeof (dsl_dataset_t));
422 dmu_buf_rele(dbuf, tag);
423 return (err);
424 }
425
426 if (!dsl_dataset_is_snapshot(ds)) {
427 ds->ds_snapname[0] = '\0';
428 if (ds->ds_phys->ds_prev_snap_obj) {
429 err = dsl_dataset_get_ref(dp,
430 ds->ds_phys->ds_prev_snap_obj,
431 ds, &ds->ds_prev);
432 }
433 } else {
434 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
435 err = dsl_dataset_get_snapname(ds);
436 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
437 err = zap_count(
438 ds->ds_dir->dd_pool->dp_meta_objset,
439 ds->ds_phys->ds_userrefs_obj,
440 &ds->ds_userrefs);
441 }
442 }
443
444 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
445 /*
446 * In sync context, we're called with either no lock
447 * or with the write lock. If we're not syncing,
448 * we're always called with the read lock held.
449 */
450 boolean_t need_lock =
451 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
452 dsl_pool_sync_context(dp);
453
454 if (need_lock)
455 rw_enter(&dp->dp_config_rwlock, RW_READER);
456
457 err = dsl_prop_get_ds(ds,
458 "refreservation", sizeof (uint64_t), 1,
459 &ds->ds_reserved, NULL);
460 if (err == 0) {
461 err = dsl_prop_get_ds(ds,
462 "refquota", sizeof (uint64_t), 1,
463 &ds->ds_quota, NULL);
464 }
465
466 if (need_lock)
467 rw_exit(&dp->dp_config_rwlock);
468 } else {
469 ds->ds_reserved = ds->ds_quota = 0;
470 }
471
472 if (err == 0) {
473 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
474 dsl_dataset_evict);
475 }
476 if (err || winner) {
477 bplist_destroy(&ds->ds_pending_deadlist);
478 dsl_deadlist_close(&ds->ds_deadlist);
479 if (ds->ds_prev)
480 dsl_dataset_drop_ref(ds->ds_prev, ds);
481 dsl_dir_close(ds->ds_dir, ds);
482 mutex_destroy(&ds->ds_lock);
483 mutex_destroy(&ds->ds_recvlock);
484 mutex_destroy(&ds->ds_opening_lock);
485 rw_destroy(&ds->ds_rwlock);
486 cv_destroy(&ds->ds_exclusive_cv);
487 kmem_free(ds, sizeof (dsl_dataset_t));
488 if (err) {
489 dmu_buf_rele(dbuf, tag);
490 return (err);
491 }
492 ds = winner;
493 } else {
494 ds->ds_fsid_guid =
495 unique_insert(ds->ds_phys->ds_fsid_guid);
496 }
497 }
498 ASSERT3P(ds->ds_dbuf, ==, dbuf);
499 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
500 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
501 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
502 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
503 mutex_enter(&ds->ds_lock);
504 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
505 mutex_exit(&ds->ds_lock);
506 dmu_buf_rele(ds->ds_dbuf, tag);
507 return (ENOENT);
508 }
509 mutex_exit(&ds->ds_lock);
510 *dsp = ds;
511 return (0);
512 }
513
514 static int
515 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
516 {
517 dsl_pool_t *dp = ds->ds_dir->dd_pool;
518
519 /*
520 * In syncing context we don't want the rwlock lock: there
521 * may be an existing writer waiting for sync phase to
522 * finish. We don't need to worry about such writers, since
523 * sync phase is single-threaded, so the writer can't be
524 * doing anything while we are active.
525 */
526 if (dsl_pool_sync_context(dp)) {
527 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
528 return (0);
529 }
530
531 /*
532 * Normal users will hold the ds_rwlock as a READER until they
533 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
534 * drop their READER lock after they set the ds_owner field.
535 *
536 * If the dataset is being destroyed, the destroy thread will
537 * obtain a WRITER lock for exclusive access after it's done its
538 * open-context work and then change the ds_owner to
539 * dsl_reaper once destruction is assured. So threads
540 * may block here temporarily, until the "destructability" of
541 * the dataset is determined.
542 */
543 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
544 mutex_enter(&ds->ds_lock);
545 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
546 rw_exit(&dp->dp_config_rwlock);
547 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
548 if (DSL_DATASET_IS_DESTROYED(ds)) {
549 mutex_exit(&ds->ds_lock);
550 dsl_dataset_drop_ref(ds, tag);
551 rw_enter(&dp->dp_config_rwlock, RW_READER);
552 return (ENOENT);
553 }
554 /*
555 * The dp_config_rwlock lives above the ds_lock. And
556 * we need to check DSL_DATASET_IS_DESTROYED() while
557 * holding the ds_lock, so we have to drop and reacquire
558 * the ds_lock here.
559 */
560 mutex_exit(&ds->ds_lock);
561 rw_enter(&dp->dp_config_rwlock, RW_READER);
562 mutex_enter(&ds->ds_lock);
563 }
564 mutex_exit(&ds->ds_lock);
565 return (0);
566 }
567
568 int
569 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
570 dsl_dataset_t **dsp)
571 {
572 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
573
574 if (err)
575 return (err);
576 return (dsl_dataset_hold_ref(*dsp, tag));
577 }
578
579 int
580 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
581 void *tag, dsl_dataset_t **dsp)
582 {
583 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
584 if (err)
585 return (err);
586 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
587 dsl_dataset_rele(*dsp, tag);
588 *dsp = NULL;
589 return (EBUSY);
590 }
591 return (0);
592 }
593
594 int
595 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
596 {
597 dsl_dir_t *dd;
598 dsl_pool_t *dp;
599 const char *snapname;
600 uint64_t obj;
601 int err = 0;
602
603 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
604 if (err)
605 return (err);
606
607 dp = dd->dd_pool;
608 obj = dd->dd_phys->dd_head_dataset_obj;
609 rw_enter(&dp->dp_config_rwlock, RW_READER);
610 if (obj)
611 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
612 else
613 err = ENOENT;
614 if (err)
615 goto out;
616
617 err = dsl_dataset_hold_ref(*dsp, tag);
618
619 /* we may be looking for a snapshot */
620 if (err == 0 && snapname != NULL) {
621 dsl_dataset_t *ds = NULL;
622
623 if (*snapname++ != '@') {
624 dsl_dataset_rele(*dsp, tag);
625 err = ENOENT;
626 goto out;
627 }
628
629 dprintf("looking for snapshot '%s'\n", snapname);
630 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
631 if (err == 0)
632 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
633 dsl_dataset_rele(*dsp, tag);
634
635 ASSERT3U((err == 0), ==, (ds != NULL));
636
637 if (ds) {
638 mutex_enter(&ds->ds_lock);
639 if (ds->ds_snapname[0] == 0)
640 (void) strlcpy(ds->ds_snapname, snapname,
641 sizeof (ds->ds_snapname));
642 mutex_exit(&ds->ds_lock);
643 err = dsl_dataset_hold_ref(ds, tag);
644 *dsp = err ? NULL : ds;
645 }
646 }
647 out:
648 rw_exit(&dp->dp_config_rwlock);
649 dsl_dir_close(dd, FTAG);
650 return (err);
651 }
652
653 int
654 dsl_dataset_own(const char *name, boolean_t inconsistentok,
655 void *tag, dsl_dataset_t **dsp)
656 {
657 int err = dsl_dataset_hold(name, tag, dsp);
658 if (err)
659 return (err);
660 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
661 dsl_dataset_rele(*dsp, tag);
662 return (EBUSY);
663 }
664 return (0);
665 }
666
667 void
668 dsl_dataset_name(dsl_dataset_t *ds, char *name)
669 {
670 if (ds == NULL) {
671 (void) strcpy(name, "mos");
672 } else {
673 dsl_dir_name(ds->ds_dir, name);
674 VERIFY(0 == dsl_dataset_get_snapname(ds));
675 if (ds->ds_snapname[0]) {
676 (void) strcat(name, "@");
677 /*
678 * We use a "recursive" mutex so that we
679 * can call dprintf_ds() with ds_lock held.
680 */
681 if (!MUTEX_HELD(&ds->ds_lock)) {
682 mutex_enter(&ds->ds_lock);
683 (void) strcat(name, ds->ds_snapname);
684 mutex_exit(&ds->ds_lock);
685 } else {
686 (void) strcat(name, ds->ds_snapname);
687 }
688 }
689 }
690 }
691
692 static int
693 dsl_dataset_namelen(dsl_dataset_t *ds)
694 {
695 int result;
696
697 if (ds == NULL) {
698 result = 3; /* "mos" */
699 } else {
700 result = dsl_dir_namelen(ds->ds_dir);
701 VERIFY(0 == dsl_dataset_get_snapname(ds));
702 if (ds->ds_snapname[0]) {
703 ++result; /* adding one for the @-sign */
704 if (!MUTEX_HELD(&ds->ds_lock)) {
705 mutex_enter(&ds->ds_lock);
706 result += strlen(ds->ds_snapname);
707 mutex_exit(&ds->ds_lock);
708 } else {
709 result += strlen(ds->ds_snapname);
710 }
711 }
712 }
713
714 return (result);
715 }
716
717 void
718 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
719 {
720 dmu_buf_rele(ds->ds_dbuf, tag);
721 }
722
723 void
724 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
725 {
726 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
727 rw_exit(&ds->ds_rwlock);
728 }
729 dsl_dataset_drop_ref(ds, tag);
730 }
731
732 void
733 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
734 {
735 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
736 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
737
738 mutex_enter(&ds->ds_lock);
739 ds->ds_owner = NULL;
740 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
741 rw_exit(&ds->ds_rwlock);
742 cv_broadcast(&ds->ds_exclusive_cv);
743 }
744 mutex_exit(&ds->ds_lock);
745 if (ds->ds_dbuf)
746 dsl_dataset_drop_ref(ds, tag);
747 else
748 dsl_dataset_evict(NULL, ds);
749 }
750
751 boolean_t
752 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
753 {
754 boolean_t gotit = FALSE;
755
756 mutex_enter(&ds->ds_lock);
757 if (ds->ds_owner == NULL &&
758 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
759 ds->ds_owner = tag;
760 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
761 rw_exit(&ds->ds_rwlock);
762 gotit = TRUE;
763 }
764 mutex_exit(&ds->ds_lock);
765 return (gotit);
766 }
767
768 void
769 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
770 {
771 ASSERT3P(owner, ==, ds->ds_owner);
772 if (!RW_WRITE_HELD(&ds->ds_rwlock))
773 rw_enter(&ds->ds_rwlock, RW_WRITER);
774 }
775
776 uint64_t
777 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
778 uint64_t flags, dmu_tx_t *tx)
779 {
780 dsl_pool_t *dp = dd->dd_pool;
781 dmu_buf_t *dbuf;
782 dsl_dataset_phys_t *dsphys;
783 uint64_t dsobj;
784 objset_t *mos = dp->dp_meta_objset;
785
786 if (origin == NULL)
787 origin = dp->dp_origin_snap;
788
789 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
790 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
791 ASSERT(dmu_tx_is_syncing(tx));
792 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
793
794 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
795 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
796 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
797 dmu_buf_will_dirty(dbuf, tx);
798 dsphys = dbuf->db_data;
799 bzero(dsphys, sizeof (dsl_dataset_phys_t));
800 dsphys->ds_dir_obj = dd->dd_object;
801 dsphys->ds_flags = flags;
802 dsphys->ds_fsid_guid = unique_create();
803 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
804 sizeof (dsphys->ds_guid));
805 dsphys->ds_snapnames_zapobj =
806 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
807 DMU_OT_NONE, 0, tx);
808 dsphys->ds_creation_time = gethrestime_sec();
809 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
810
811 if (origin == NULL) {
812 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
813 } else {
814 dsl_dataset_t *ohds;
815
816 dsphys->ds_prev_snap_obj = origin->ds_object;
817 dsphys->ds_prev_snap_txg =
818 origin->ds_phys->ds_creation_txg;
819 dsphys->ds_used_bytes =
820 origin->ds_phys->ds_used_bytes;
821 dsphys->ds_compressed_bytes =
822 origin->ds_phys->ds_compressed_bytes;
823 dsphys->ds_uncompressed_bytes =
824 origin->ds_phys->ds_uncompressed_bytes;
825 dsphys->ds_bp = origin->ds_phys->ds_bp;
826 dsphys->ds_flags |= origin->ds_phys->ds_flags;
827
828 dmu_buf_will_dirty(origin->ds_dbuf, tx);
829 origin->ds_phys->ds_num_children++;
830
831 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
832 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
833 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
834 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
835 dsl_dataset_rele(ohds, FTAG);
836
837 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
838 if (origin->ds_phys->ds_next_clones_obj == 0) {
839 origin->ds_phys->ds_next_clones_obj =
840 zap_create(mos,
841 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
842 }
843 VERIFY(0 == zap_add_int(mos,
844 origin->ds_phys->ds_next_clones_obj,
845 dsobj, tx));
846 }
847
848 dmu_buf_will_dirty(dd->dd_dbuf, tx);
849 dd->dd_phys->dd_origin_obj = origin->ds_object;
850 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
851 if (origin->ds_dir->dd_phys->dd_clones == 0) {
852 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
853 origin->ds_dir->dd_phys->dd_clones =
854 zap_create(mos,
855 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
856 }
857 VERIFY3U(0, ==, zap_add_int(mos,
858 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
859 }
860 }
861
862 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
863 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
864
865 dmu_buf_rele(dbuf, FTAG);
866
867 dmu_buf_will_dirty(dd->dd_dbuf, tx);
868 dd->dd_phys->dd_head_dataset_obj = dsobj;
869
870 return (dsobj);
871 }
872
873 uint64_t
874 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
875 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
876 {
877 dsl_pool_t *dp = pdd->dd_pool;
878 uint64_t dsobj, ddobj;
879 dsl_dir_t *dd;
880
881 ASSERT(lastname[0] != '@');
882
883 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
884 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
885
886 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
887
888 dsl_deleg_set_create_perms(dd, tx, cr);
889
890 dsl_dir_close(dd, FTAG);
891
892 /*
893 * If we are creating a clone, make sure we zero out any stale
894 * data from the origin snapshots zil header.
895 */
896 if (origin != NULL) {
897 dsl_dataset_t *ds;
898 objset_t *os;
899
900 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
901 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
902 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
903 dsl_dataset_dirty(ds, tx);
904 dsl_dataset_rele(ds, FTAG);
905 }
906
907 return (dsobj);
908 }
909
910 /*
911 * The snapshots must all be in the same pool.
912 */
913 int
914 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
915 {
916 int err;
917 dsl_sync_task_t *dst;
918 spa_t *spa;
919 nvpair_t *pair;
920 dsl_sync_task_group_t *dstg;
921
922 pair = nvlist_next_nvpair(snaps, NULL);
923 if (pair == NULL)
924 return (0);
925
926 err = spa_open(nvpair_name(pair), &spa, FTAG);
927 if (err)
928 return (err);
929 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
930
931 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
932 pair = nvlist_next_nvpair(snaps, pair)) {
933 dsl_dataset_t *ds;
934 int err;
935
936 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
937 if (err == 0) {
938 struct dsl_ds_destroyarg *dsda;
939
940 dsl_dataset_make_exclusive(ds, dstg);
941 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
942 KM_SLEEP);
943 dsda->ds = ds;
944 dsda->defer = defer;
945 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
946 dsl_dataset_destroy_sync, dsda, dstg, 0);
947 } else if (err == ENOENT) {
948 err = 0;
949 } else {
950 (void) strcpy(failed, nvpair_name(pair));
951 break;
952 }
953 }
954
955 if (err == 0)
956 err = dsl_sync_task_group_wait(dstg);
957
958 for (dst = list_head(&dstg->dstg_tasks); dst;
959 dst = list_next(&dstg->dstg_tasks, dst)) {
960 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
961 dsl_dataset_t *ds = dsda->ds;
962
963 /*
964 * Return the file system name that triggered the error
965 */
966 if (dst->dst_err) {
967 dsl_dataset_name(ds, failed);
968 }
969 ASSERT3P(dsda->rm_origin, ==, NULL);
970 dsl_dataset_disown(ds, dstg);
971 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
972 }
973
974 dsl_sync_task_group_destroy(dstg);
975 spa_close(spa, FTAG);
976 return (err);
977
978 }
979
980 static boolean_t
981 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
982 {
983 boolean_t might_destroy = B_FALSE;
984
985 mutex_enter(&ds->ds_lock);
986 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
987 DS_IS_DEFER_DESTROY(ds))
988 might_destroy = B_TRUE;
989 mutex_exit(&ds->ds_lock);
990
991 return (might_destroy);
992 }
993
994 /*
995 * If we're removing a clone, and these three conditions are true:
996 * 1) the clone's origin has no other children
997 * 2) the clone's origin has no user references
998 * 3) the clone's origin has been marked for deferred destruction
999 * Then, prepare to remove the origin as part of this sync task group.
1000 */
1001 static int
1002 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1003 {
1004 dsl_dataset_t *ds = dsda->ds;
1005 dsl_dataset_t *origin = ds->ds_prev;
1006
1007 if (dsl_dataset_might_destroy_origin(origin)) {
1008 char *name;
1009 int namelen;
1010 int error;
1011
1012 namelen = dsl_dataset_namelen(origin) + 1;
1013 name = kmem_alloc(namelen, KM_SLEEP);
1014 dsl_dataset_name(origin, name);
1015 #ifdef _KERNEL
1016 error = zfs_unmount_snap(name, NULL);
1017 if (error) {
1018 kmem_free(name, namelen);
1019 return (error);
1020 }
1021 #endif
1022 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1023 kmem_free(name, namelen);
1024 if (error)
1025 return (error);
1026 dsda->rm_origin = origin;
1027 dsl_dataset_make_exclusive(origin, tag);
1028 }
1029
1030 return (0);
1031 }
1032
1033 /*
1034 * ds must be opened as OWNER. On return (whether successful or not),
1035 * ds will be closed and caller can no longer dereference it.
1036 */
1037 int
1038 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1039 {
1040 int err;
1041 dsl_sync_task_group_t *dstg;
1042 objset_t *os;
1043 dsl_dir_t *dd;
1044 uint64_t obj;
1045 struct dsl_ds_destroyarg dsda = { 0 };
1046 dsl_dataset_t *dummy_ds;
1047
1048 dsda.ds = ds;
1049
1050 if (dsl_dataset_is_snapshot(ds)) {
1051 /* Destroying a snapshot is simpler */
1052 dsl_dataset_make_exclusive(ds, tag);
1053
1054 dsda.defer = defer;
1055 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1056 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1057 &dsda, tag, 0);
1058 ASSERT3P(dsda.rm_origin, ==, NULL);
1059 goto out;
1060 } else if (defer) {
1061 err = EINVAL;
1062 goto out;
1063 }
1064
1065 dd = ds->ds_dir;
1066 dummy_ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
1067 dummy_ds->ds_dir = dd;
1068 dummy_ds->ds_object = ds->ds_object;
1069
1070 /*
1071 * Check for errors and mark this ds as inconsistent, in
1072 * case we crash while freeing the objects.
1073 */
1074 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1075 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1076 if (err)
1077 goto out_free;
1078
1079 err = dmu_objset_from_ds(ds, &os);
1080 if (err)
1081 goto out_free;
1082
1083 /*
1084 * remove the objects in open context, so that we won't
1085 * have too much to do in syncing context.
1086 */
1087 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1088 ds->ds_phys->ds_prev_snap_txg)) {
1089 /*
1090 * Ignore errors, if there is not enough disk space
1091 * we will deal with it in dsl_dataset_destroy_sync().
1092 */
1093 (void) dmu_free_object(os, obj);
1094 }
1095 if (err != ESRCH)
1096 goto out_free;
1097
1098 /*
1099 * Only the ZIL knows how to free log blocks.
1100 */
1101 zil_destroy(dmu_objset_zil(os), B_FALSE);
1102
1103 /*
1104 * Sync out all in-flight IO.
1105 */
1106 txg_wait_synced(dd->dd_pool, 0);
1107
1108 /*
1109 * If we managed to free all the objects in open
1110 * context, the user space accounting should be zero.
1111 */
1112 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1113 dmu_objset_userused_enabled(os)) {
1114 ASSERTV(uint64_t count);
1115 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1116 count == 0);
1117 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1118 count == 0);
1119 }
1120
1121 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1122 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1123 rw_exit(&dd->dd_pool->dp_config_rwlock);
1124
1125 if (err)
1126 goto out_free;
1127
1128 /*
1129 * Blow away the dsl_dir + head dataset.
1130 */
1131 dsl_dataset_make_exclusive(ds, tag);
1132 /*
1133 * If we're removing a clone, we might also need to remove its
1134 * origin.
1135 */
1136 do {
1137 dsda.need_prep = B_FALSE;
1138 if (dsl_dir_is_clone(dd)) {
1139 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1140 if (err) {
1141 dsl_dir_close(dd, FTAG);
1142 goto out_free;
1143 }
1144 }
1145
1146 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1147 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1148 dsl_dataset_destroy_sync, &dsda, tag, 0);
1149 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1150 dsl_dir_destroy_sync, dummy_ds, FTAG, 0);
1151 err = dsl_sync_task_group_wait(dstg);
1152 dsl_sync_task_group_destroy(dstg);
1153
1154 /*
1155 * We could be racing against 'zfs release' or 'zfs destroy -d'
1156 * on the origin snap, in which case we can get EBUSY if we
1157 * needed to destroy the origin snap but were not ready to
1158 * do so.
1159 */
1160 if (dsda.need_prep) {
1161 ASSERT(err == EBUSY);
1162 ASSERT(dsl_dir_is_clone(dd));
1163 ASSERT(dsda.rm_origin == NULL);
1164 }
1165 } while (dsda.need_prep);
1166
1167 if (dsda.rm_origin != NULL)
1168 dsl_dataset_disown(dsda.rm_origin, tag);
1169
1170 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1171 if (err)
1172 dsl_dir_close(dd, FTAG);
1173
1174 out_free:
1175 kmem_free(dummy_ds, sizeof (dsl_dataset_t));
1176 out:
1177 dsl_dataset_disown(ds, tag);
1178 return (err);
1179 }
1180
1181 blkptr_t *
1182 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1183 {
1184 return (&ds->ds_phys->ds_bp);
1185 }
1186
1187 void
1188 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1189 {
1190 ASSERT(dmu_tx_is_syncing(tx));
1191 /* If it's the meta-objset, set dp_meta_rootbp */
1192 if (ds == NULL) {
1193 tx->tx_pool->dp_meta_rootbp = *bp;
1194 } else {
1195 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1196 ds->ds_phys->ds_bp = *bp;
1197 }
1198 }
1199
1200 spa_t *
1201 dsl_dataset_get_spa(dsl_dataset_t *ds)
1202 {
1203 return (ds->ds_dir->dd_pool->dp_spa);
1204 }
1205
1206 void
1207 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1208 {
1209 dsl_pool_t *dp;
1210
1211 if (ds == NULL) /* this is the meta-objset */
1212 return;
1213
1214 ASSERT(ds->ds_objset != NULL);
1215
1216 if (ds->ds_phys->ds_next_snap_obj != 0)
1217 panic("dirtying snapshot!");
1218
1219 dp = ds->ds_dir->dd_pool;
1220
1221 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1222 /* up the hold count until we can be written out */
1223 dmu_buf_add_ref(ds->ds_dbuf, ds);
1224 }
1225 }
1226
1227 /*
1228 * The unique space in the head dataset can be calculated by subtracting
1229 * the space used in the most recent snapshot, that is still being used
1230 * in this file system, from the space currently in use. To figure out
1231 * the space in the most recent snapshot still in use, we need to take
1232 * the total space used in the snapshot and subtract out the space that
1233 * has been freed up since the snapshot was taken.
1234 */
1235 static void
1236 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1237 {
1238 uint64_t mrs_used;
1239 uint64_t dlused, dlcomp, dluncomp;
1240
1241 ASSERT(!dsl_dataset_is_snapshot(ds));
1242
1243 if (ds->ds_phys->ds_prev_snap_obj != 0)
1244 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1245 else
1246 mrs_used = 0;
1247
1248 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1249
1250 ASSERT3U(dlused, <=, mrs_used);
1251 ds->ds_phys->ds_unique_bytes =
1252 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1253
1254 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1255 SPA_VERSION_UNIQUE_ACCURATE)
1256 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1257 }
1258
1259 struct killarg {
1260 dsl_dataset_t *ds;
1261 dmu_tx_t *tx;
1262 };
1263
1264 /* ARGSUSED */
1265 static int
1266 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1267 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1268 {
1269 struct killarg *ka = arg;
1270 dmu_tx_t *tx = ka->tx;
1271
1272 if (bp == NULL)
1273 return (0);
1274
1275 if (zb->zb_level == ZB_ZIL_LEVEL) {
1276 ASSERT(zilog != NULL);
1277 /*
1278 * It's a block in the intent log. It has no
1279 * accounting, so just free it.
1280 */
1281 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1282 } else {
1283 ASSERT(zilog == NULL);
1284 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1285 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1286 }
1287
1288 return (0);
1289 }
1290
1291 /* ARGSUSED */
1292 static int
1293 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1294 {
1295 dsl_dataset_t *ds = arg1;
1296 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1297 uint64_t count;
1298 int err;
1299
1300 /*
1301 * Can't delete a head dataset if there are snapshots of it.
1302 * (Except if the only snapshots are from the branch we cloned
1303 * from.)
1304 */
1305 if (ds->ds_prev != NULL &&
1306 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1307 return (EBUSY);
1308
1309 /*
1310 * This is really a dsl_dir thing, but check it here so that
1311 * we'll be less likely to leave this dataset inconsistent &
1312 * nearly destroyed.
1313 */
1314 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1315 if (err)
1316 return (err);
1317 if (count != 0)
1318 return (EEXIST);
1319
1320 return (0);
1321 }
1322
1323 /* ARGSUSED */
1324 static void
1325 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1326 {
1327 dsl_dataset_t *ds = arg1;
1328 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1329
1330 /* Mark it as inconsistent on-disk, in case we crash */
1331 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1332 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1333
1334 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1335 "dataset = %llu", ds->ds_object);
1336 }
1337
1338 static int
1339 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1340 dmu_tx_t *tx)
1341 {
1342 dsl_dataset_t *ds = dsda->ds;
1343 dsl_dataset_t *ds_prev = ds->ds_prev;
1344
1345 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1346 struct dsl_ds_destroyarg ndsda = {0};
1347
1348 /*
1349 * If we're not prepared to remove the origin, don't remove
1350 * the clone either.
1351 */
1352 if (dsda->rm_origin == NULL) {
1353 dsda->need_prep = B_TRUE;
1354 return (EBUSY);
1355 }
1356
1357 ndsda.ds = ds_prev;
1358 ndsda.is_origin_rm = B_TRUE;
1359 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1360 }
1361
1362 /*
1363 * If we're not going to remove the origin after all,
1364 * undo the open context setup.
1365 */
1366 if (dsda->rm_origin != NULL) {
1367 dsl_dataset_disown(dsda->rm_origin, tag);
1368 dsda->rm_origin = NULL;
1369 }
1370
1371 return (0);
1372 }
1373
1374 /*
1375 * If you add new checks here, you may need to add
1376 * additional checks to the "temporary" case in
1377 * snapshot_check() in dmu_objset.c.
1378 */
1379 /* ARGSUSED */
1380 int
1381 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1382 {
1383 struct dsl_ds_destroyarg *dsda = arg1;
1384 dsl_dataset_t *ds = dsda->ds;
1385
1386 /* we have an owner hold, so noone else can destroy us */
1387 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1388
1389 /*
1390 * Only allow deferred destroy on pools that support it.
1391 * NOTE: deferred destroy is only supported on snapshots.
1392 */
1393 if (dsda->defer) {
1394 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1395 SPA_VERSION_USERREFS)
1396 return (ENOTSUP);
1397 ASSERT(dsl_dataset_is_snapshot(ds));
1398 return (0);
1399 }
1400
1401 /*
1402 * Can't delete a head dataset if there are snapshots of it.
1403 * (Except if the only snapshots are from the branch we cloned
1404 * from.)
1405 */
1406 if (ds->ds_prev != NULL &&
1407 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1408 return (EBUSY);
1409
1410 /*
1411 * If we made changes this txg, traverse_dsl_dataset won't find
1412 * them. Try again.
1413 */
1414 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1415 return (EAGAIN);
1416
1417 if (dsl_dataset_is_snapshot(ds)) {
1418 /*
1419 * If this snapshot has an elevated user reference count,
1420 * we can't destroy it yet.
1421 */
1422 if (ds->ds_userrefs > 0 && !dsda->releasing)
1423 return (EBUSY);
1424
1425 mutex_enter(&ds->ds_lock);
1426 /*
1427 * Can't delete a branch point. However, if we're destroying
1428 * a clone and removing its origin due to it having a user
1429 * hold count of 0 and having been marked for deferred destroy,
1430 * it's OK for the origin to have a single clone.
1431 */
1432 if (ds->ds_phys->ds_num_children >
1433 (dsda->is_origin_rm ? 2 : 1)) {
1434 mutex_exit(&ds->ds_lock);
1435 return (EEXIST);
1436 }
1437 mutex_exit(&ds->ds_lock);
1438 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1439 return (dsl_dataset_origin_check(dsda, arg2, tx));
1440 }
1441
1442 /* XXX we should do some i/o error checking... */
1443 return (0);
1444 }
1445
1446 struct refsarg {
1447 kmutex_t lock;
1448 boolean_t gone;
1449 kcondvar_t cv;
1450 };
1451
1452 /* ARGSUSED */
1453 static void
1454 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1455 {
1456 struct refsarg *arg = argv;
1457
1458 mutex_enter(&arg->lock);
1459 arg->gone = TRUE;
1460 cv_signal(&arg->cv);
1461 mutex_exit(&arg->lock);
1462 }
1463
1464 static void
1465 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1466 {
1467 struct refsarg arg;
1468
1469 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1470 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1471 arg.gone = FALSE;
1472 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1473 dsl_dataset_refs_gone);
1474 dmu_buf_rele(ds->ds_dbuf, tag);
1475 mutex_enter(&arg.lock);
1476 while (!arg.gone)
1477 cv_wait(&arg.cv, &arg.lock);
1478 ASSERT(arg.gone);
1479 mutex_exit(&arg.lock);
1480 ds->ds_dbuf = NULL;
1481 ds->ds_phys = NULL;
1482 mutex_destroy(&arg.lock);
1483 cv_destroy(&arg.cv);
1484 }
1485
1486 static void
1487 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1488 {
1489 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1490 int err;
1491 ASSERTV(uint64_t count);
1492
1493 ASSERT(ds->ds_phys->ds_num_children >= 2);
1494 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1495 /*
1496 * The err should not be ENOENT, but a bug in a previous version
1497 * of the code could cause upgrade_clones_cb() to not set
1498 * ds_next_snap_obj when it should, leading to a missing entry.
1499 * If we knew that the pool was created after
1500 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1501 * ENOENT. However, at least we can check that we don't have
1502 * too many entries in the next_clones_obj even after failing to
1503 * remove this one.
1504 */
1505 if (err != ENOENT) {
1506 VERIFY3U(err, ==, 0);
1507 }
1508 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1509 &count));
1510 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1511 }
1512
1513 static void
1514 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1515 {
1516 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1517 zap_cursor_t zc;
1518 zap_attribute_t za;
1519
1520 /*
1521 * If it is the old version, dd_clones doesn't exist so we can't
1522 * find the clones, but deadlist_remove_key() is a no-op so it
1523 * doesn't matter.
1524 */
1525 if (ds->ds_dir->dd_phys->dd_clones == 0)
1526 return;
1527
1528 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1529 zap_cursor_retrieve(&zc, &za) == 0;
1530 zap_cursor_advance(&zc)) {
1531 dsl_dataset_t *clone;
1532
1533 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1534 za.za_first_integer, FTAG, &clone));
1535 if (clone->ds_dir->dd_origin_txg > mintxg) {
1536 dsl_deadlist_remove_key(&clone->ds_deadlist,
1537 mintxg, tx);
1538 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1539 }
1540 dsl_dataset_rele(clone, FTAG);
1541 }
1542 zap_cursor_fini(&zc);
1543 }
1544
1545 struct process_old_arg {
1546 dsl_dataset_t *ds;
1547 dsl_dataset_t *ds_prev;
1548 boolean_t after_branch_point;
1549 zio_t *pio;
1550 uint64_t used, comp, uncomp;
1551 };
1552
1553 static int
1554 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1555 {
1556 struct process_old_arg *poa = arg;
1557 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1558
1559 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1560 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1561 if (poa->ds_prev && !poa->after_branch_point &&
1562 bp->blk_birth >
1563 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1564 poa->ds_prev->ds_phys->ds_unique_bytes +=
1565 bp_get_dsize_sync(dp->dp_spa, bp);
1566 }
1567 } else {
1568 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1569 poa->comp += BP_GET_PSIZE(bp);
1570 poa->uncomp += BP_GET_UCSIZE(bp);
1571 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1572 }
1573 return (0);
1574 }
1575
1576 static void
1577 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1578 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1579 {
1580 struct process_old_arg poa = { 0 };
1581 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1582 objset_t *mos = dp->dp_meta_objset;
1583
1584 ASSERT(ds->ds_deadlist.dl_oldfmt);
1585 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1586
1587 poa.ds = ds;
1588 poa.ds_prev = ds_prev;
1589 poa.after_branch_point = after_branch_point;
1590 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1591 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1592 process_old_cb, &poa, tx));
1593 VERIFY3U(zio_wait(poa.pio), ==, 0);
1594 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1595
1596 /* change snapused */
1597 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1598 -poa.used, -poa.comp, -poa.uncomp, tx);
1599
1600 /* swap next's deadlist to our deadlist */
1601 dsl_deadlist_close(&ds->ds_deadlist);
1602 dsl_deadlist_close(&ds_next->ds_deadlist);
1603 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1604 ds->ds_phys->ds_deadlist_obj);
1605 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1606 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1607 ds_next->ds_phys->ds_deadlist_obj);
1608 }
1609
1610 void
1611 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1612 {
1613 struct dsl_ds_destroyarg *dsda = arg1;
1614 dsl_dataset_t *ds = dsda->ds;
1615 int err;
1616 int after_branch_point = FALSE;
1617 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1618 objset_t *mos = dp->dp_meta_objset;
1619 dsl_dataset_t *ds_prev = NULL;
1620 boolean_t wont_destroy;
1621 uint64_t obj;
1622
1623 wont_destroy = (dsda->defer &&
1624 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1625
1626 ASSERT(ds->ds_owner || wont_destroy);
1627 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1628 ASSERT(ds->ds_prev == NULL ||
1629 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1630 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1631
1632 if (wont_destroy) {
1633 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1634 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1635 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1636 return;
1637 }
1638
1639 /* signal any waiters that this dataset is going away */
1640 mutex_enter(&ds->ds_lock);
1641 ds->ds_owner = dsl_reaper;
1642 cv_broadcast(&ds->ds_exclusive_cv);
1643 mutex_exit(&ds->ds_lock);
1644
1645 /* Remove our reservation */
1646 if (ds->ds_reserved != 0) {
1647 dsl_prop_setarg_t psa;
1648 uint64_t value = 0;
1649
1650 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1651 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1652 &value);
1653 psa.psa_effective_value = 0; /* predict default value */
1654
1655 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1656 ASSERT3U(ds->ds_reserved, ==, 0);
1657 }
1658
1659 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1660
1661 dsl_scan_ds_destroyed(ds, tx);
1662
1663 obj = ds->ds_object;
1664
1665 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1666 if (ds->ds_prev) {
1667 ds_prev = ds->ds_prev;
1668 } else {
1669 VERIFY(0 == dsl_dataset_hold_obj(dp,
1670 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1671 }
1672 after_branch_point =
1673 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1674
1675 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1676 if (after_branch_point &&
1677 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1678 remove_from_next_clones(ds_prev, obj, tx);
1679 if (ds->ds_phys->ds_next_snap_obj != 0) {
1680 VERIFY(0 == zap_add_int(mos,
1681 ds_prev->ds_phys->ds_next_clones_obj,
1682 ds->ds_phys->ds_next_snap_obj, tx));
1683 }
1684 }
1685 if (after_branch_point &&
1686 ds->ds_phys->ds_next_snap_obj == 0) {
1687 /* This clone is toast. */
1688 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1689 ds_prev->ds_phys->ds_num_children--;
1690
1691 /*
1692 * If the clone's origin has no other clones, no
1693 * user holds, and has been marked for deferred
1694 * deletion, then we should have done the necessary
1695 * destroy setup for it.
1696 */
1697 if (ds_prev->ds_phys->ds_num_children == 1 &&
1698 ds_prev->ds_userrefs == 0 &&
1699 DS_IS_DEFER_DESTROY(ds_prev)) {
1700 ASSERT3P(dsda->rm_origin, !=, NULL);
1701 } else {
1702 ASSERT3P(dsda->rm_origin, ==, NULL);
1703 }
1704 } else if (!after_branch_point) {
1705 ds_prev->ds_phys->ds_next_snap_obj =
1706 ds->ds_phys->ds_next_snap_obj;
1707 }
1708 }
1709
1710 if (dsl_dataset_is_snapshot(ds)) {
1711 dsl_dataset_t *ds_next;
1712 uint64_t old_unique;
1713 uint64_t used = 0, comp = 0, uncomp = 0;
1714
1715 VERIFY(0 == dsl_dataset_hold_obj(dp,
1716 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1717 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1718
1719 old_unique = ds_next->ds_phys->ds_unique_bytes;
1720
1721 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1722 ds_next->ds_phys->ds_prev_snap_obj =
1723 ds->ds_phys->ds_prev_snap_obj;
1724 ds_next->ds_phys->ds_prev_snap_txg =
1725 ds->ds_phys->ds_prev_snap_txg;
1726 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1727 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1728
1729
1730 if (ds_next->ds_deadlist.dl_oldfmt) {
1731 process_old_deadlist(ds, ds_prev, ds_next,
1732 after_branch_point, tx);
1733 } else {
1734 /* Adjust prev's unique space. */
1735 if (ds_prev && !after_branch_point) {
1736 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1737 ds_prev->ds_phys->ds_prev_snap_txg,
1738 ds->ds_phys->ds_prev_snap_txg,
1739 &used, &comp, &uncomp);
1740 ds_prev->ds_phys->ds_unique_bytes += used;
1741 }
1742
1743 /* Adjust snapused. */
1744 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1745 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1746 &used, &comp, &uncomp);
1747 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1748 -used, -comp, -uncomp, tx);
1749
1750 /* Move blocks to be freed to pool's free list. */
1751 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1752 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1753 tx);
1754 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1755 DD_USED_HEAD, used, comp, uncomp, tx);
1756 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1757
1758 /* Merge our deadlist into next's and free it. */
1759 dsl_deadlist_merge(&ds_next->ds_deadlist,
1760 ds->ds_phys->ds_deadlist_obj, tx);
1761 }
1762 dsl_deadlist_close(&ds->ds_deadlist);
1763 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1764
1765 /* Collapse range in clone heads */
1766 dsl_dataset_remove_clones_key(ds,
1767 ds->ds_phys->ds_creation_txg, tx);
1768
1769 if (dsl_dataset_is_snapshot(ds_next)) {
1770 dsl_dataset_t *ds_nextnext;
1771 dsl_dataset_t *hds;
1772
1773 /*
1774 * Update next's unique to include blocks which
1775 * were previously shared by only this snapshot
1776 * and it. Those blocks will be born after the
1777 * prev snap and before this snap, and will have
1778 * died after the next snap and before the one
1779 * after that (ie. be on the snap after next's
1780 * deadlist).
1781 */
1782 VERIFY(0 == dsl_dataset_hold_obj(dp,
1783 ds_next->ds_phys->ds_next_snap_obj,
1784 FTAG, &ds_nextnext));
1785 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1786 ds->ds_phys->ds_prev_snap_txg,
1787 ds->ds_phys->ds_creation_txg,
1788 &used, &comp, &uncomp);
1789 ds_next->ds_phys->ds_unique_bytes += used;
1790 dsl_dataset_rele(ds_nextnext, FTAG);
1791 ASSERT3P(ds_next->ds_prev, ==, NULL);
1792
1793 /* Collapse range in this head. */
1794 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1795 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1796 FTAG, &hds));
1797 dsl_deadlist_remove_key(&hds->ds_deadlist,
1798 ds->ds_phys->ds_creation_txg, tx);
1799 dsl_dataset_rele(hds, FTAG);
1800
1801 } else {
1802 ASSERT3P(ds_next->ds_prev, ==, ds);
1803 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1804 ds_next->ds_prev = NULL;
1805 if (ds_prev) {
1806 VERIFY(0 == dsl_dataset_get_ref(dp,
1807 ds->ds_phys->ds_prev_snap_obj,
1808 ds_next, &ds_next->ds_prev));
1809 }
1810
1811 dsl_dataset_recalc_head_uniq(ds_next);
1812
1813 /*
1814 * Reduce the amount of our unconsmed refreservation
1815 * being charged to our parent by the amount of
1816 * new unique data we have gained.
1817 */
1818 if (old_unique < ds_next->ds_reserved) {
1819 int64_t mrsdelta;
1820 uint64_t new_unique =
1821 ds_next->ds_phys->ds_unique_bytes;
1822
1823 ASSERT(old_unique <= new_unique);
1824 mrsdelta = MIN(new_unique - old_unique,
1825 ds_next->ds_reserved - old_unique);
1826 dsl_dir_diduse_space(ds->ds_dir,
1827 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1828 }
1829 }
1830 dsl_dataset_rele(ds_next, FTAG);
1831 } else {
1832 /*
1833 * There's no next snapshot, so this is a head dataset.
1834 * Destroy the deadlist. Unless it's a clone, the
1835 * deadlist should be empty. (If it's a clone, it's
1836 * safe to ignore the deadlist contents.)
1837 */
1838 struct killarg ka;
1839
1840 dsl_deadlist_close(&ds->ds_deadlist);
1841 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1842 ds->ds_phys->ds_deadlist_obj = 0;
1843
1844 /*
1845 * Free everything that we point to (that's born after
1846 * the previous snapshot, if we are a clone)
1847 *
1848 * NB: this should be very quick, because we already
1849 * freed all the objects in open context.
1850 */
1851 ka.ds = ds;
1852 ka.tx = tx;
1853 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1854 TRAVERSE_POST, kill_blkptr, &ka);
1855 ASSERT3U(err, ==, 0);
1856 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1857 ds->ds_phys->ds_unique_bytes == 0);
1858
1859 if (ds->ds_prev != NULL) {
1860 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1861 VERIFY3U(0, ==, zap_remove_int(mos,
1862 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1863 ds->ds_object, tx));
1864 }
1865 dsl_dataset_rele(ds->ds_prev, ds);
1866 ds->ds_prev = ds_prev = NULL;
1867 }
1868 }
1869
1870 /*
1871 * This must be done after the dsl_traverse(), because it will
1872 * re-open the objset.
1873 */
1874 if (ds->ds_objset) {
1875 dmu_objset_evict(ds->ds_objset);
1876 ds->ds_objset = NULL;
1877 }
1878
1879 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1880 /* Erase the link in the dir */
1881 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1882 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1883 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1884 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1885 ASSERT(err == 0);
1886 } else {
1887 /* remove from snapshot namespace */
1888 dsl_dataset_t *ds_head;
1889 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1890 VERIFY(0 == dsl_dataset_hold_obj(dp,
1891 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1892 VERIFY(0 == dsl_dataset_get_snapname(ds));
1893 #ifdef ZFS_DEBUG
1894 {
1895 uint64_t val;
1896
1897 err = dsl_dataset_snap_lookup(ds_head,
1898 ds->ds_snapname, &val);
1899 ASSERT3U(err, ==, 0);
1900 ASSERT3U(val, ==, obj);
1901 }
1902 #endif
1903 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1904 ASSERT(err == 0);
1905 dsl_dataset_rele(ds_head, FTAG);
1906 }
1907
1908 if (ds_prev && ds->ds_prev != ds_prev)
1909 dsl_dataset_rele(ds_prev, FTAG);
1910
1911 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1912 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1913 "dataset = %llu", ds->ds_object);
1914
1915 if (ds->ds_phys->ds_next_clones_obj != 0) {
1916 ASSERTV(uint64_t count);
1917 ASSERT(0 == zap_count(mos,
1918 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1919 VERIFY(0 == dmu_object_free(mos,
1920 ds->ds_phys->ds_next_clones_obj, tx));
1921 }
1922 if (ds->ds_phys->ds_props_obj != 0)
1923 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1924 if (ds->ds_phys->ds_userrefs_obj != 0)
1925 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1926 dsl_dir_close(ds->ds_dir, ds);
1927 ds->ds_dir = NULL;
1928 dsl_dataset_drain_refs(ds, tag);
1929 VERIFY(0 == dmu_object_free(mos, obj, tx));
1930
1931 if (dsda->rm_origin) {
1932 /*
1933 * Remove the origin of the clone we just destroyed.
1934 */
1935 struct dsl_ds_destroyarg ndsda = {0};
1936
1937 ndsda.ds = dsda->rm_origin;
1938 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1939 }
1940 }
1941
1942 static int
1943 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1944 {
1945 uint64_t asize;
1946
1947 if (!dmu_tx_is_syncing(tx))
1948 return (0);
1949
1950 /*
1951 * If there's an fs-only reservation, any blocks that might become
1952 * owned by the snapshot dataset must be accommodated by space
1953 * outside of the reservation.
1954 */
1955 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1956 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1957 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1958 return (ENOSPC);
1959
1960 /*
1961 * Propogate any reserved space for this snapshot to other
1962 * snapshot checks in this sync group.
1963 */
1964 if (asize > 0)
1965 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1966
1967 return (0);
1968 }
1969
1970 int
1971 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1972 {
1973 dsl_dataset_t *ds = arg1;
1974 const char *snapname = arg2;
1975 int err;
1976 uint64_t value;
1977
1978 /*
1979 * We don't allow multiple snapshots of the same txg. If there
1980 * is already one, try again.
1981 */
1982 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1983 return (EAGAIN);
1984
1985 /*
1986 * Check for conflicting name snapshot name.
1987 */
1988 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1989 if (err == 0)
1990 return (EEXIST);
1991 if (err != ENOENT)
1992 return (err);
1993
1994 /*
1995 * Check that the dataset's name is not too long. Name consists
1996 * of the dataset's length + 1 for the @-sign + snapshot name's length
1997 */
1998 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1999 return (ENAMETOOLONG);
2000
2001 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2002 if (err)
2003 return (err);
2004
2005 ds->ds_trysnap_txg = tx->tx_txg;
2006 return (0);
2007 }
2008
2009 void
2010 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2011 {
2012 dsl_dataset_t *ds = arg1;
2013 const char *snapname = arg2;
2014 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2015 dmu_buf_t *dbuf;
2016 dsl_dataset_phys_t *dsphys;
2017 uint64_t dsobj, crtxg;
2018 objset_t *mos = dp->dp_meta_objset;
2019 int err;
2020
2021 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2022
2023 /*
2024 * The origin's ds_creation_txg has to be < TXG_INITIAL
2025 */
2026 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2027 crtxg = 1;
2028 else
2029 crtxg = tx->tx_txg;
2030
2031 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2032 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2033 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2034 dmu_buf_will_dirty(dbuf, tx);
2035 dsphys = dbuf->db_data;
2036 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2037 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2038 dsphys->ds_fsid_guid = unique_create();
2039 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2040 sizeof (dsphys->ds_guid));
2041 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2042 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2043 dsphys->ds_next_snap_obj = ds->ds_object;
2044 dsphys->ds_num_children = 1;
2045 dsphys->ds_creation_time = gethrestime_sec();
2046 dsphys->ds_creation_txg = crtxg;
2047 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2048 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2049 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2050 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2051 dsphys->ds_flags = ds->ds_phys->ds_flags;
2052 dsphys->ds_bp = ds->ds_phys->ds_bp;
2053 dmu_buf_rele(dbuf, FTAG);
2054
2055 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2056 if (ds->ds_prev) {
2057 uint64_t next_clones_obj =
2058 ds->ds_prev->ds_phys->ds_next_clones_obj;
2059 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2060 ds->ds_object ||
2061 ds->ds_prev->ds_phys->ds_num_children > 1);
2062 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2063 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2064 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2065 ds->ds_prev->ds_phys->ds_creation_txg);
2066 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2067 } else if (next_clones_obj != 0) {
2068 remove_from_next_clones(ds->ds_prev,
2069 dsphys->ds_next_snap_obj, tx);
2070 VERIFY3U(0, ==, zap_add_int(mos,
2071 next_clones_obj, dsobj, tx));
2072 }
2073 }
2074
2075 /*
2076 * If we have a reference-reservation on this dataset, we will
2077 * need to increase the amount of refreservation being charged
2078 * since our unique space is going to zero.
2079 */
2080 if (ds->ds_reserved) {
2081 int64_t delta;
2082 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2083 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2084 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2085 delta, 0, 0, tx);
2086 }
2087
2088 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2089 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2090 ds->ds_dir->dd_myname, snapname, dsobj,
2091 ds->ds_phys->ds_prev_snap_txg);
2092 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2093 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2094 dsl_deadlist_close(&ds->ds_deadlist);
2095 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2096 dsl_deadlist_add_key(&ds->ds_deadlist,
2097 ds->ds_phys->ds_prev_snap_txg, tx);
2098
2099 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2100 ds->ds_phys->ds_prev_snap_obj = dsobj;
2101 ds->ds_phys->ds_prev_snap_txg = crtxg;
2102 ds->ds_phys->ds_unique_bytes = 0;
2103 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2104 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2105
2106 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2107 snapname, 8, 1, &dsobj, tx);
2108 ASSERT(err == 0);
2109
2110 if (ds->ds_prev)
2111 dsl_dataset_drop_ref(ds->ds_prev, ds);
2112 VERIFY(0 == dsl_dataset_get_ref(dp,
2113 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2114
2115 dsl_scan_ds_snapshotted(ds, tx);
2116
2117 dsl_dir_snap_cmtime_update(ds->ds_dir);
2118
2119 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2120 "dataset = %llu", dsobj);
2121 }
2122
2123 void
2124 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2125 {
2126 ASSERT(dmu_tx_is_syncing(tx));
2127 ASSERT(ds->ds_objset != NULL);
2128 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2129
2130 /*
2131 * in case we had to change ds_fsid_guid when we opened it,
2132 * sync it out now.
2133 */
2134 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2135 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2136
2137 dsl_dir_dirty(ds->ds_dir, tx);
2138 dmu_objset_sync(ds->ds_objset, zio, tx);
2139 }
2140
2141 static void
2142 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2143 {
2144 uint64_t count = 0;
2145 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2146 zap_cursor_t zc;
2147 zap_attribute_t za;
2148 nvlist_t *propval;
2149 nvlist_t *val;
2150
2151 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2152 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2153 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2154
2155 /*
2156 * There may me missing entries in ds_next_clones_obj
2157 * due to a bug in a previous version of the code.
2158 * Only trust it if it has the right number of entries.
2159 */
2160 if (ds->ds_phys->ds_next_clones_obj != 0) {
2161 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2162 &count));
2163 }
2164 if (count != ds->ds_phys->ds_num_children - 1) {
2165 goto fail;
2166 }
2167 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2168 zap_cursor_retrieve(&zc, &za) == 0;
2169 zap_cursor_advance(&zc)) {
2170 dsl_dataset_t *clone;
2171 char buf[ZFS_MAXNAMELEN];
2172 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2173 za.za_first_integer, FTAG, &clone) != 0) {
2174 goto fail;
2175 }
2176 dsl_dir_name(clone->ds_dir, buf);
2177 VERIFY(nvlist_add_boolean(val, buf) == 0);
2178 dsl_dataset_rele(clone, FTAG);
2179 }
2180 zap_cursor_fini(&zc);
2181 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2182 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2183 propval) == 0);
2184 fail:
2185 nvlist_free(val);
2186 nvlist_free(propval);
2187 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2188 }
2189
2190 void
2191 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2192 {
2193 uint64_t refd, avail, uobjs, aobjs, ratio;
2194
2195 dsl_dir_stats(ds->ds_dir, nv);
2196
2197 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2198 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2199 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2200
2201 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2202 ds->ds_phys->ds_creation_time);
2203 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2204 ds->ds_phys->ds_creation_txg);
2205 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2206 ds->ds_quota);
2207 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2208 ds->ds_reserved);
2209 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2210 ds->ds_phys->ds_guid);
2211 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2212 ds->ds_phys->ds_unique_bytes);
2213 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2214 ds->ds_object);
2215 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2216 ds->ds_userrefs);
2217 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2218 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2219
2220 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2221 uint64_t written, comp, uncomp;
2222 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2223 dsl_dataset_t *prev;
2224 int err;
2225
2226 rw_enter(&dp->dp_config_rwlock, RW_READER);
2227 err = dsl_dataset_hold_obj(dp,
2228 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2229 rw_exit(&dp->dp_config_rwlock);
2230 if (err == 0) {
2231 err = dsl_dataset_space_written(prev, ds, &written,
2232 &comp, &uncomp);
2233 dsl_dataset_rele(prev, FTAG);
2234 if (err == 0) {
2235 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2236 written);
2237 }
2238 }
2239 }
2240
2241 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2242 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2243 ds->ds_phys->ds_compressed_bytes);
2244 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2245
2246 if (ds->ds_phys->ds_next_snap_obj) {
2247 /*
2248 * This is a snapshot; override the dd's space used with
2249 * our unique space and compression ratio.
2250 */
2251 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2252 ds->ds_phys->ds_unique_bytes);
2253 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2254
2255 get_clones_stat(ds, nv);
2256 }
2257 }
2258
2259 void
2260 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2261 {
2262 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2263 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2264 stat->dds_guid = ds->ds_phys->ds_guid;
2265 if (ds->ds_phys->ds_next_snap_obj) {
2266 stat->dds_is_snapshot = B_TRUE;
2267 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2268 } else {
2269 stat->dds_is_snapshot = B_FALSE;
2270 stat->dds_num_clones = 0;
2271 }
2272
2273 /* clone origin is really a dsl_dir thing... */
2274 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2275 if (dsl_dir_is_clone(ds->ds_dir)) {
2276 dsl_dataset_t *ods;
2277
2278 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2279 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2280 dsl_dataset_name(ods, stat->dds_origin);
2281 dsl_dataset_drop_ref(ods, FTAG);
2282 } else {
2283 stat->dds_origin[0] = '\0';
2284 }
2285 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2286 }
2287
2288 uint64_t
2289 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2290 {
2291 return (ds->ds_fsid_guid);
2292 }
2293
2294 void
2295 dsl_dataset_space(dsl_dataset_t *ds,
2296 uint64_t *refdbytesp, uint64_t *availbytesp,
2297 uint64_t *usedobjsp, uint64_t *availobjsp)
2298 {
2299 *refdbytesp = ds->ds_phys->ds_used_bytes;
2300 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2301 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2302 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2303 if (ds->ds_quota != 0) {
2304 /*
2305 * Adjust available bytes according to refquota
2306 */
2307 if (*refdbytesp < ds->ds_quota)
2308 *availbytesp = MIN(*availbytesp,
2309 ds->ds_quota - *refdbytesp);
2310 else
2311 *availbytesp = 0;
2312 }
2313 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2314 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2315 }
2316
2317 boolean_t
2318 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2319 {
2320 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2321
2322 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2323 dsl_pool_sync_context(dp));
2324 if (ds->ds_prev == NULL)
2325 return (B_FALSE);
2326 if (ds->ds_phys->ds_bp.blk_birth >
2327 ds->ds_prev->ds_phys->ds_creation_txg) {
2328 objset_t *os, *os_prev;
2329 /*
2330 * It may be that only the ZIL differs, because it was
2331 * reset in the head. Don't count that as being
2332 * modified.
2333 */
2334 if (dmu_objset_from_ds(ds, &os) != 0)
2335 return (B_TRUE);
2336 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2337 return (B_TRUE);
2338 return (bcmp(&os->os_phys->os_meta_dnode,
2339 &os_prev->os_phys->os_meta_dnode,
2340 sizeof (os->os_phys->os_meta_dnode)) != 0);
2341 }
2342 return (B_FALSE);
2343 }
2344
2345 /* ARGSUSED */
2346 static int
2347 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2348 {
2349 dsl_dataset_t *ds = arg1;
2350 char *newsnapname = arg2;
2351 dsl_dir_t *dd = ds->ds_dir;
2352 dsl_dataset_t *hds;
2353 uint64_t val;
2354 int err;
2355
2356 err = dsl_dataset_hold_obj(dd->dd_pool,
2357 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2358 if (err)
2359 return (err);
2360
2361 /* new name better not be in use */
2362 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2363 dsl_dataset_rele(hds, FTAG);
2364
2365 if (err == 0)
2366 err = EEXIST;
2367 else if (err == ENOENT)
2368 err = 0;
2369
2370 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2371 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2372 err = ENAMETOOLONG;
2373
2374 return (err);
2375 }
2376
2377 static void
2378 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2379 {
2380 dsl_dataset_t *ds = arg1;
2381 const char *newsnapname = arg2;
2382 dsl_dir_t *dd = ds->ds_dir;
2383 objset_t *mos = dd->dd_pool->dp_meta_objset;
2384 dsl_dataset_t *hds;
2385 int err;
2386
2387 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2388
2389 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2390 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2391
2392 VERIFY(0 == dsl_dataset_get_snapname(ds));
2393 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2394 ASSERT3U(err, ==, 0);
2395 mutex_enter(&ds->ds_lock);
2396 (void) strcpy(ds->ds_snapname, newsnapname);
2397 mutex_exit(&ds->ds_lock);
2398 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2399 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2400 ASSERT3U(err, ==, 0);
2401
2402 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2403 "dataset = %llu", ds->ds_object);
2404 dsl_dataset_rele(hds, FTAG);
2405 }
2406
2407 struct renamesnaparg {
2408 dsl_sync_task_group_t *dstg;
2409 char failed[MAXPATHLEN];
2410 char *oldsnap;
2411 char *newsnap;
2412 };
2413
2414 static int
2415 dsl_snapshot_rename_one(const char *name, void *arg)
2416 {
2417 struct renamesnaparg *ra = arg;
2418 dsl_dataset_t *ds = NULL;
2419 char *snapname;
2420 int err;
2421
2422 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2423 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2424
2425 /*
2426 * For recursive snapshot renames the parent won't be changing
2427 * so we just pass name for both the to/from argument.
2428 */
2429 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2430 if (err != 0) {
2431 strfree(snapname);
2432 return (err == ENOENT ? 0 : err);
2433 }
2434
2435 #ifdef _KERNEL
2436 /*
2437 * For all filesystems undergoing rename, we'll need to unmount it.
2438 */
2439 (void) zfs_unmount_snap(snapname, NULL);
2440 #endif
2441 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2442 strfree(snapname);
2443 if (err != 0)
2444 return (err == ENOENT ? 0 : err);
2445
2446 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2447 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2448
2449 return (0);
2450 }
2451
2452 static int
2453 dsl_recursive_rename(char *oldname, const char *newname)
2454 {
2455 int err;
2456 struct renamesnaparg *ra;
2457 dsl_sync_task_t *dst;
2458 spa_t *spa;
2459 char *cp, *fsname = spa_strdup(oldname);
2460 int len = strlen(oldname) + 1;
2461
2462 /* truncate the snapshot name to get the fsname */
2463 cp = strchr(fsname, '@');
2464 *cp = '\0';
2465
2466 err = spa_open(fsname, &spa, FTAG);
2467 if (err) {
2468 kmem_free(fsname, len);
2469 return (err);
2470 }
2471 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2472 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2473
2474 ra->oldsnap = strchr(oldname, '@') + 1;
2475 ra->newsnap = strchr(newname, '@') + 1;
2476 *ra->failed = '\0';
2477
2478 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2479 DS_FIND_CHILDREN);
2480 kmem_free(fsname, len);
2481
2482 if (err == 0) {
2483 err = dsl_sync_task_group_wait(ra->dstg);
2484 }
2485
2486 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2487 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2488 dsl_dataset_t *ds = dst->dst_arg1;
2489 if (dst->dst_err) {
2490 dsl_dir_name(ds->ds_dir, ra->failed);
2491 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2492 (void) strlcat(ra->failed, ra->newsnap,
2493 sizeof (ra->failed));
2494 }
2495 dsl_dataset_rele(ds, ra->dstg);
2496 }
2497
2498 if (err)
2499 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2500
2501 dsl_sync_task_group_destroy(ra->dstg);
2502 kmem_free(ra, sizeof (struct renamesnaparg));
2503 spa_close(spa, FTAG);
2504 return (err);
2505 }
2506
2507 static int
2508 dsl_valid_rename(const char *oldname, void *arg)
2509 {
2510 int delta = *(int *)arg;
2511
2512 if (strlen(oldname) + delta >= MAXNAMELEN)
2513 return (ENAMETOOLONG);
2514
2515 return (0);
2516 }
2517
2518 #pragma weak dmu_objset_rename = dsl_dataset_rename
2519 int
2520 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2521 {
2522 dsl_dir_t *dd;
2523 dsl_dataset_t *ds;
2524 const char *tail;
2525 int err;
2526
2527 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2528 if (err)
2529 return (err);
2530
2531 if (tail == NULL) {
2532 int delta = strlen(newname) - strlen(oldname);
2533
2534 /* if we're growing, validate child name lengths */
2535 if (delta > 0)
2536 err = dmu_objset_find(oldname, dsl_valid_rename,
2537 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2538
2539 if (err == 0)
2540 err = dsl_dir_rename(dd, newname);
2541 dsl_dir_close(dd, FTAG);
2542 return (err);
2543 }
2544
2545 if (tail[0] != '@') {
2546 /* the name ended in a nonexistent component */
2547 dsl_dir_close(dd, FTAG);
2548 return (ENOENT);
2549 }
2550
2551 dsl_dir_close(dd, FTAG);
2552
2553 /* new name must be snapshot in same filesystem */
2554 tail = strchr(newname, '@');
2555 if (tail == NULL)
2556 return (EINVAL);
2557 tail++;
2558 if (strncmp(oldname, newname, tail - newname) != 0)
2559 return (EXDEV);
2560
2561 if (recursive) {
2562 err = dsl_recursive_rename(oldname, newname);
2563 } else {
2564 err = dsl_dataset_hold(oldname, FTAG, &ds);
2565 if (err)
2566 return (err);
2567
2568 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2569 dsl_dataset_snapshot_rename_check,
2570 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2571
2572 dsl_dataset_rele(ds, FTAG);
2573 }
2574
2575 return (err);
2576 }
2577
2578 struct promotenode {
2579 list_node_t link;
2580 dsl_dataset_t *ds;
2581 };
2582
2583 struct promotearg {
2584 list_t shared_snaps, origin_snaps, clone_snaps;
2585 dsl_dataset_t *origin_origin;
2586 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2587 char *err_ds;
2588 };
2589
2590 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2591
2592 static int
2593 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2594 {
2595 dsl_dataset_t *hds = arg1;
2596 struct promotearg *pa = arg2;
2597 struct promotenode *snap = list_head(&pa->shared_snaps);
2598 dsl_dataset_t *origin_ds = snap->ds;
2599 int err;
2600 uint64_t unused;
2601
2602 /* Check that it is a real clone */
2603 if (!dsl_dir_is_clone(hds->ds_dir))
2604 return (EINVAL);
2605
2606 /* Since this is so expensive, don't do the preliminary check */
2607 if (!dmu_tx_is_syncing(tx))
2608 return (0);
2609
2610 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2611 return (EXDEV);
2612
2613 /* compute origin's new unique space */
2614 snap = list_tail(&pa->clone_snaps);
2615 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2616 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2617 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2618 &pa->unique, &unused, &unused);
2619
2620 /*
2621 * Walk the snapshots that we are moving
2622 *
2623 * Compute space to transfer. Consider the incremental changes
2624 * to used for each snapshot:
2625 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2626 * So each snapshot gave birth to:
2627 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2628 * So a sequence would look like:
2629 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2630 * Which simplifies to:
2631 * uN + kN + kN-1 + ... + k1 + k0
2632 * Note however, if we stop before we reach the ORIGIN we get:
2633 * uN + kN + kN-1 + ... + kM - uM-1
2634 */
2635 pa->used = origin_ds->ds_phys->ds_used_bytes;
2636 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2637 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2638 for (snap = list_head(&pa->shared_snaps); snap;
2639 snap = list_next(&pa->shared_snaps, snap)) {
2640 uint64_t val, dlused, dlcomp, dluncomp;
2641 dsl_dataset_t *ds = snap->ds;
2642
2643 /* Check that the snapshot name does not conflict */
2644 VERIFY(0 == dsl_dataset_get_snapname(ds));
2645 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2646 if (err == 0) {
2647 err = EEXIST;
2648 goto out;
2649 }
2650 if (err != ENOENT)
2651 goto out;
2652
2653 /* The very first snapshot does not have a deadlist */
2654 if (ds->ds_phys->ds_prev_snap_obj == 0)
2655 continue;
2656
2657 dsl_deadlist_space(&ds->ds_deadlist,
2658 &dlused, &dlcomp, &dluncomp);
2659 pa->used += dlused;
2660 pa->comp += dlcomp;
2661 pa->uncomp += dluncomp;
2662 }
2663
2664 /*
2665 * If we are a clone of a clone then we never reached ORIGIN,
2666 * so we need to subtract out the clone origin's used space.
2667 */
2668 if (pa->origin_origin) {
2669 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2670 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2671 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2672 }
2673
2674 /* Check that there is enough space here */
2675 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2676 pa->used);
2677 if (err)
2678 return (err);
2679
2680 /*
2681 * Compute the amounts of space that will be used by snapshots
2682 * after the promotion (for both origin and clone). For each,
2683 * it is the amount of space that will be on all of their
2684 * deadlists (that was not born before their new origin).
2685 */
2686 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2687 uint64_t space;
2688
2689 /*
2690 * Note, typically this will not be a clone of a clone,
2691 * so dd_origin_txg will be < TXG_INITIAL, so
2692 * these snaplist_space() -> dsl_deadlist_space_range()
2693 * calls will be fast because they do not have to
2694 * iterate over all bps.
2695 */
2696 snap = list_head(&pa->origin_snaps);
2697 err = snaplist_space(&pa->shared_snaps,
2698 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2699 if (err)
2700 return (err);
2701
2702 err = snaplist_space(&pa->clone_snaps,
2703 snap->ds->ds_dir->dd_origin_txg, &space);
2704 if (err)
2705 return (err);
2706 pa->cloneusedsnap += space;
2707 }
2708 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2709 err = snaplist_space(&pa->origin_snaps,
2710 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2711 if (err)
2712 return (err);
2713 }
2714
2715 return (0);
2716 out:
2717 pa->err_ds = snap->ds->ds_snapname;
2718 return (err);
2719 }
2720
2721 static void
2722 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2723 {
2724 dsl_dataset_t *hds = arg1;
2725 struct promotearg *pa = arg2;
2726 struct promotenode *snap = list_head(&pa->shared_snaps);
2727 dsl_dataset_t *origin_ds = snap->ds;
2728 dsl_dataset_t *origin_head;
2729 dsl_dir_t *dd = hds->ds_dir;
2730 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2731 dsl_dir_t *odd = NULL;
2732 uint64_t oldnext_obj;
2733 int64_t delta;
2734
2735 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2736
2737 snap = list_head(&pa->origin_snaps);
2738 origin_head = snap->ds;
2739
2740 /*
2741 * We need to explicitly open odd, since origin_ds's dd will be
2742 * changing.
2743 */
2744 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2745 NULL, FTAG, &odd));
2746
2747 /* change origin's next snap */
2748 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2749 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2750 snap = list_tail(&pa->clone_snaps);
2751 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2752 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2753
2754 /* change the origin's next clone */
2755 if (origin_ds->ds_phys->ds_next_clones_obj) {
2756 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2757 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2758 origin_ds->ds_phys->ds_next_clones_obj,
2759 oldnext_obj, tx));
2760 }
2761
2762 /* change origin */
2763 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2764 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2765 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2766 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2767 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2768 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2769 origin_head->ds_dir->dd_origin_txg =
2770 origin_ds->ds_phys->ds_creation_txg;
2771
2772 /* change dd_clone entries */
2773 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2774 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2775 odd->dd_phys->dd_clones, hds->ds_object, tx));
2776 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2777 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2778 hds->ds_object, tx));
2779
2780 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2781 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2782 origin_head->ds_object, tx));
2783 if (dd->dd_phys->dd_clones == 0) {
2784 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2785 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2786 }
2787 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2788 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2789
2790 }
2791
2792 /* move snapshots to this dir */
2793 for (snap = list_head(&pa->shared_snaps); snap;
2794 snap = list_next(&pa->shared_snaps, snap)) {
2795 dsl_dataset_t *ds = snap->ds;
2796
2797 /* unregister props as dsl_dir is changing */
2798 if (ds->ds_objset) {
2799 dmu_objset_evict(ds->ds_objset);
2800 ds->ds_objset = NULL;
2801 }
2802 /* move snap name entry */
2803 VERIFY(0 == dsl_dataset_get_snapname(ds));
2804 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2805 ds->ds_snapname, tx));
2806 VERIFY(0 == zap_add(dp->dp_meta_objset,
2807 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2808 8, 1, &ds->ds_object, tx));
2809
2810 /* change containing dsl_dir */
2811 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2812 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2813 ds->ds_phys->ds_dir_obj = dd->dd_object;
2814 ASSERT3P(ds->ds_dir, ==, odd);
2815 dsl_dir_close(ds->ds_dir, ds);
2816 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2817 NULL, ds, &ds->ds_dir));
2818
2819 /* move any clone references */
2820 if (ds->ds_phys->ds_next_clones_obj &&
2821 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2822 zap_cursor_t zc;
2823 zap_attribute_t za;
2824
2825 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2826 ds->ds_phys->ds_next_clones_obj);
2827 zap_cursor_retrieve(&zc, &za) == 0;
2828 zap_cursor_advance(&zc)) {
2829 dsl_dataset_t *cnds;
2830 uint64_t o;
2831
2832 if (za.za_first_integer == oldnext_obj) {
2833 /*
2834 * We've already moved the
2835 * origin's reference.
2836 */
2837 continue;
2838 }
2839
2840 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2841 za.za_first_integer, FTAG, &cnds));
2842 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2843
2844 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2845 odd->dd_phys->dd_clones, o, tx), ==, 0);
2846 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2847 dd->dd_phys->dd_clones, o, tx), ==, 0);
2848 dsl_dataset_rele(cnds, FTAG);
2849 }
2850 zap_cursor_fini(&zc);
2851 }
2852
2853 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2854 }
2855
2856 /*
2857 * Change space accounting.
2858 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2859 * both be valid, or both be 0 (resulting in delta == 0). This
2860 * is true for each of {clone,origin} independently.
2861 */
2862
2863 delta = pa->cloneusedsnap -
2864 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2865 ASSERT3S(delta, >=, 0);
2866 ASSERT3U(pa->used, >=, delta);
2867 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2868 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2869 pa->used - delta, pa->comp, pa->uncomp, tx);
2870
2871 delta = pa->originusedsnap -
2872 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2873 ASSERT3S(delta, <=, 0);
2874 ASSERT3U(pa->used, >=, -delta);
2875 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2876 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2877 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2878
2879 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2880
2881 /* log history record */
2882 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2883 "dataset = %llu", hds->ds_object);
2884
2885 dsl_dir_close(odd, FTAG);
2886 }
2887
2888 static char *snaplist_tag = "snaplist";
2889 /*
2890 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2891 * (exclusive) and last_obj (inclusive). The list will be in reverse
2892 * order (last_obj will be the list_head()). If first_obj == 0, do all
2893 * snapshots back to this dataset's origin.
2894 */
2895 static int
2896 snaplist_make(dsl_pool_t *dp, boolean_t own,
2897 uint64_t first_obj, uint64_t last_obj, list_t *l)
2898 {
2899 uint64_t obj = last_obj;
2900
2901 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2902
2903 list_create(l, sizeof (struct promotenode),
2904 offsetof(struct promotenode, link));
2905
2906 while (obj != first_obj) {
2907 dsl_dataset_t *ds;
2908 struct promotenode *snap;
2909 int err;
2910
2911 if (own) {
2912 err = dsl_dataset_own_obj(dp, obj,
2913 0, snaplist_tag, &ds);
2914 if (err == 0)
2915 dsl_dataset_make_exclusive(ds, snaplist_tag);
2916 } else {
2917 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2918 }
2919 if (err == ENOENT) {
2920 /* lost race with snapshot destroy */
2921 struct promotenode *last = list_tail(l);
2922 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2923 obj = last->ds->ds_phys->ds_prev_snap_obj;
2924 continue;
2925 } else if (err) {
2926 return (err);
2927 }
2928
2929 if (first_obj == 0)
2930 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2931
2932 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2933 snap->ds = ds;
2934 list_insert_tail(l, snap);
2935 obj = ds->ds_phys->ds_prev_snap_obj;
2936 }
2937
2938 return (0);
2939 }
2940
2941 static int
2942 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2943 {
2944 struct promotenode *snap;
2945
2946 *spacep = 0;
2947 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2948 uint64_t used, comp, uncomp;
2949 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2950 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2951 *spacep += used;
2952 }
2953 return (0);
2954 }
2955
2956 static void
2957 snaplist_destroy(list_t *l, boolean_t own)
2958 {
2959 struct promotenode *snap;
2960
2961 if (!l || !list_link_active(&l->list_head))
2962 return;
2963
2964 while ((snap = list_tail(l)) != NULL) {
2965 list_remove(l, snap);
2966 if (own)
2967 dsl_dataset_disown(snap->ds, snaplist_tag);
2968 else
2969 dsl_dataset_rele(snap->ds, snaplist_tag);
2970 kmem_free(snap, sizeof (struct promotenode));
2971 }
2972 list_destroy(l);
2973 }
2974
2975 /*
2976 * Promote a clone. Nomenclature note:
2977 * "clone" or "cds": the original clone which is being promoted
2978 * "origin" or "ods": the snapshot which is originally clone's origin
2979 * "origin head" or "ohds": the dataset which is the head
2980 * (filesystem/volume) for the origin
2981 * "origin origin": the origin of the origin's filesystem (typically
2982 * NULL, indicating that the clone is not a clone of a clone).
2983 */
2984 int
2985 dsl_dataset_promote(const char *name, char *conflsnap)
2986 {
2987 dsl_dataset_t *ds;
2988 dsl_dir_t *dd;
2989 dsl_pool_t *dp;
2990 dmu_object_info_t doi;
2991 struct promotearg pa;
2992 struct promotenode *snap;
2993 int err;
2994
2995 bzero(&pa, sizeof(struct promotearg));
2996 err = dsl_dataset_hold(name, FTAG, &ds);
2997 if (err)
2998 return (err);
2999 dd = ds->ds_dir;
3000 dp = dd->dd_pool;
3001
3002 err = dmu_object_info(dp->dp_meta_objset,
3003 ds->ds_phys->ds_snapnames_zapobj, &doi);
3004 if (err) {
3005 dsl_dataset_rele(ds, FTAG);
3006 return (err);
3007 }
3008
3009 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3010 dsl_dataset_rele(ds, FTAG);
3011 return (EINVAL);
3012 }
3013
3014 /*
3015 * We are going to inherit all the snapshots taken before our
3016 * origin (i.e., our new origin will be our parent's origin).
3017 * Take ownership of them so that we can rename them into our
3018 * namespace.
3019 */
3020 rw_enter(&dp->dp_config_rwlock, RW_READER);
3021
3022 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3023 &pa.shared_snaps);
3024 if (err != 0)
3025 goto out;
3026
3027 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3028 if (err != 0)
3029 goto out;
3030
3031 snap = list_head(&pa.shared_snaps);
3032 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3033 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3034 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3035 if (err != 0)
3036 goto out;
3037
3038 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3039 err = dsl_dataset_hold_obj(dp,
3040 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3041 FTAG, &pa.origin_origin);
3042 if (err != 0)
3043 goto out;
3044 }
3045
3046 out:
3047 rw_exit(&dp->dp_config_rwlock);
3048
3049 /*
3050 * Add in 128x the snapnames zapobj size, since we will be moving
3051 * a bunch of snapnames to the promoted ds, and dirtying their
3052 * bonus buffers.
3053 */
3054 if (err == 0) {
3055 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3056 dsl_dataset_promote_sync, ds, &pa,
3057 2 + 2 * doi.doi_physical_blocks_512);
3058 if (err && pa.err_ds && conflsnap)
3059 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3060 }
3061
3062 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3063 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3064 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3065 if (pa.origin_origin)
3066 dsl_dataset_rele(pa.origin_origin, FTAG);
3067 dsl_dataset_rele(ds, FTAG);
3068 return (err);
3069 }
3070
3071 struct cloneswaparg {
3072 dsl_dataset_t *cds; /* clone dataset */
3073 dsl_dataset_t *ohds; /* origin's head dataset */
3074 boolean_t force;
3075 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3076 };
3077
3078 /* ARGSUSED */
3079 static int
3080 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3081 {
3082 struct cloneswaparg *csa = arg1;
3083
3084 /* they should both be heads */
3085 if (dsl_dataset_is_snapshot(csa->cds) ||
3086 dsl_dataset_is_snapshot(csa->ohds))
3087 return (EINVAL);
3088
3089 /* the branch point should be just before them */
3090 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3091 return (EINVAL);
3092
3093 /* cds should be the clone (unless they are unrelated) */
3094 if (csa->cds->ds_prev != NULL &&
3095 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3096 csa->ohds->ds_object !=
3097 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3098 return (EINVAL);
3099
3100 /* the clone should be a child of the origin */
3101 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3102 return (EINVAL);
3103
3104 /* ohds shouldn't be modified unless 'force' */
3105 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3106 return (ETXTBSY);
3107
3108 /* adjust amount of any unconsumed refreservation */
3109 csa->unused_refres_delta =
3110 (int64_t)MIN(csa->ohds->ds_reserved,
3111 csa->ohds->ds_phys->ds_unique_bytes) -
3112 (int64_t)MIN(csa->ohds->ds_reserved,
3113 csa->cds->ds_phys->ds_unique_bytes);
3114
3115 if (csa->unused_refres_delta > 0 &&
3116 csa->unused_refres_delta >
3117 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3118 return (ENOSPC);
3119
3120 if (csa->ohds->ds_quota != 0 &&
3121 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3122 return (EDQUOT);
3123
3124 return (0);
3125 }
3126
3127 /* ARGSUSED */
3128 static void
3129 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3130 {
3131 struct cloneswaparg *csa = arg1;
3132 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3133
3134 ASSERT(csa->cds->ds_reserved == 0);
3135 ASSERT(csa->ohds->ds_quota == 0 ||
3136 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3137
3138 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3139 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3140
3141 if (csa->cds->ds_objset != NULL) {
3142 dmu_objset_evict(csa->cds->ds_objset);
3143 csa->cds->ds_objset = NULL;
3144 }
3145
3146 if (csa->ohds->ds_objset != NULL) {
3147 dmu_objset_evict(csa->ohds->ds_objset);
3148 csa->ohds->ds_objset = NULL;
3149 }
3150
3151 /*
3152 * Reset origin's unique bytes, if it exists.
3153 */
3154 if (csa->cds->ds_prev) {
3155 dsl_dataset_t *origin = csa->cds->ds_prev;
3156 uint64_t comp, uncomp;
3157
3158 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3159 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3160 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3161 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3162 }
3163
3164 /* swap blkptrs */
3165 {
3166 blkptr_t tmp;
3167 tmp = csa->ohds->ds_phys->ds_bp;
3168 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3169 csa->cds->ds_phys->ds_bp = tmp;
3170 }
3171
3172 /* set dd_*_bytes */
3173 {
3174 int64_t dused, dcomp, duncomp;
3175 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3176 uint64_t odl_used, odl_comp, odl_uncomp;
3177
3178 ASSERT3U(csa->cds->ds_dir->dd_phys->
3179 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3180
3181 dsl_deadlist_space(&csa->cds->ds_deadlist,
3182 &cdl_used, &cdl_comp, &cdl_uncomp);
3183 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3184 &odl_used, &odl_comp, &odl_uncomp);
3185
3186 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3187 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3188 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3189 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3190 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3191 cdl_uncomp -
3192 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3193
3194 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3195 dused, dcomp, duncomp, tx);
3196 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3197 -dused, -dcomp, -duncomp, tx);
3198
3199 /*
3200 * The difference in the space used by snapshots is the
3201 * difference in snapshot space due to the head's
3202 * deadlist (since that's the only thing that's
3203 * changing that affects the snapused).
3204 */
3205 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3206 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3207 &cdl_used, &cdl_comp, &cdl_uncomp);
3208 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3209 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3210 &odl_used, &odl_comp, &odl_uncomp);
3211 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3212 DD_USED_HEAD, DD_USED_SNAP, tx);
3213 }
3214
3215 /* swap ds_*_bytes */
3216 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3217 csa->cds->ds_phys->ds_used_bytes);
3218 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3219 csa->cds->ds_phys->ds_compressed_bytes);
3220 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3221 csa->cds->ds_phys->ds_uncompressed_bytes);
3222 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3223 csa->cds->ds_phys->ds_unique_bytes);
3224
3225 /* apply any parent delta for change in unconsumed refreservation */
3226 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3227 csa->unused_refres_delta, 0, 0, tx);
3228
3229 /*
3230 * Swap deadlists.
3231 */
3232 dsl_deadlist_close(&csa->cds->ds_deadlist);
3233 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3234 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3235 csa->cds->ds_phys->ds_deadlist_obj);
3236 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3237 csa->cds->ds_phys->ds_deadlist_obj);
3238 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3239 csa->ohds->ds_phys->ds_deadlist_obj);
3240
3241 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3242 }
3243
3244 /*
3245 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3246 * recv" into an existing fs to swizzle the file system to the new
3247 * version, and by "zfs rollback". Can also be used to swap two
3248 * independent head datasets if neither has any snapshots.
3249 */
3250 int
3251 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3252 boolean_t force)
3253 {
3254 struct cloneswaparg csa;
3255 int error;
3256
3257 ASSERT(clone->ds_owner);
3258 ASSERT(origin_head->ds_owner);
3259 retry:
3260 /*
3261 * Need exclusive access for the swap. If we're swapping these
3262 * datasets back after an error, we already hold the locks.
3263 */
3264 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3265 rw_enter(&clone->ds_rwlock, RW_WRITER);
3266 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3267 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3268 rw_exit(&clone->ds_rwlock);
3269 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3270 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3271 rw_exit(&origin_head->ds_rwlock);
3272 goto retry;
3273 }
3274 }
3275 csa.cds = clone;
3276 csa.ohds = origin_head;
3277 csa.force = force;
3278 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3279 dsl_dataset_clone_swap_check,
3280 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3281 return (error);
3282 }
3283
3284 /*
3285 * Given a pool name and a dataset object number in that pool,
3286 * return the name of that dataset.
3287 */
3288 int
3289 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3290 {
3291 spa_t *spa;
3292 dsl_pool_t *dp;
3293 dsl_dataset_t *ds;
3294 int error;
3295
3296 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3297 return (error);
3298 dp = spa_get_dsl(spa);
3299 rw_enter(&dp->dp_config_rwlock, RW_READER);
3300 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3301 dsl_dataset_name(ds, buf);
3302 dsl_dataset_rele(ds, FTAG);
3303 }
3304 rw_exit(&dp->dp_config_rwlock);
3305 spa_close(spa, FTAG);
3306
3307 return (error);
3308 }
3309
3310 int
3311 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3312 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3313 {
3314 int error = 0;
3315
3316 ASSERT3S(asize, >, 0);
3317
3318 /*
3319 * *ref_rsrv is the portion of asize that will come from any
3320 * unconsumed refreservation space.
3321 */
3322 *ref_rsrv = 0;
3323
3324 mutex_enter(&ds->ds_lock);
3325 /*
3326 * Make a space adjustment for reserved bytes.
3327 */
3328 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3329 ASSERT3U(*used, >=,
3330 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3331 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3332 *ref_rsrv =
3333 asize - MIN(asize, parent_delta(ds, asize + inflight));
3334 }
3335
3336 if (!check_quota || ds->ds_quota == 0) {
3337 mutex_exit(&ds->ds_lock);
3338 return (0);
3339 }
3340 /*
3341 * If they are requesting more space, and our current estimate
3342 * is over quota, they get to try again unless the actual
3343 * on-disk is over quota and there are no pending changes (which
3344 * may free up space for us).
3345 */
3346 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3347 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3348 error = ERESTART;
3349 else
3350 error = EDQUOT;
3351
3352 DMU_TX_STAT_BUMP(dmu_tx_quota);
3353 }
3354 mutex_exit(&ds->ds_lock);
3355
3356 return (error);
3357 }
3358
3359 /* ARGSUSED */
3360 static int
3361 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3362 {
3363 dsl_dataset_t *ds = arg1;
3364 dsl_prop_setarg_t *psa = arg2;
3365 int err;
3366
3367 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3368 return (ENOTSUP);
3369
3370 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3371 return (err);
3372
3373 if (psa->psa_effective_value == 0)
3374 return (0);
3375
3376 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3377 psa->psa_effective_value < ds->ds_reserved)
3378 return (ENOSPC);
3379
3380 return (0);
3381 }
3382
3383 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3384
3385 void
3386 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3387 {
3388 dsl_dataset_t *ds = arg1;
3389 dsl_prop_setarg_t *psa = arg2;
3390 uint64_t effective_value = psa->psa_effective_value;
3391
3392 dsl_prop_set_sync(ds, psa, tx);
3393 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3394
3395 if (ds->ds_quota != effective_value) {
3396 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3397 ds->ds_quota = effective_value;
3398
3399 spa_history_log_internal(LOG_DS_REFQUOTA,
3400 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3401 (longlong_t)ds->ds_quota, ds->ds_object);
3402 }
3403 }
3404
3405 int
3406 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3407 {
3408 dsl_dataset_t *ds;
3409 dsl_prop_setarg_t psa;
3410 int err;
3411
3412 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3413
3414 err = dsl_dataset_hold(dsname, FTAG, &ds);
3415 if (err)
3416 return (err);
3417
3418 /*
3419 * If someone removes a file, then tries to set the quota, we
3420 * want to make sure the file freeing takes effect.
3421 */
3422 txg_wait_open(ds->ds_dir->dd_pool, 0);
3423
3424 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3425 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3426 ds, &psa, 0);
3427
3428 dsl_dataset_rele(ds, FTAG);
3429 return (err);
3430 }
3431
3432 static int
3433 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3434 {
3435 dsl_dataset_t *ds = arg1;
3436 dsl_prop_setarg_t *psa = arg2;
3437 uint64_t effective_value;
3438 uint64_t unique;
3439 int err;
3440
3441 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3442 SPA_VERSION_REFRESERVATION)
3443 return (ENOTSUP);
3444
3445 if (dsl_dataset_is_snapshot(ds))
3446 return (EINVAL);
3447
3448 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3449 return (err);
3450
3451 effective_value = psa->psa_effective_value;
3452
3453 /*
3454 * If we are doing the preliminary check in open context, the
3455 * space estimates may be inaccurate.
3456 */
3457 if (!dmu_tx_is_syncing(tx))
3458 return (0);
3459
3460 mutex_enter(&ds->ds_lock);
3461 if (!DS_UNIQUE_IS_ACCURATE(ds))
3462 dsl_dataset_recalc_head_uniq(ds);
3463 unique = ds->ds_phys->ds_unique_bytes;
3464 mutex_exit(&ds->ds_lock);
3465
3466 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3467 uint64_t delta = MAX(unique, effective_value) -
3468 MAX(unique, ds->ds_reserved);
3469
3470 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3471 return (ENOSPC);
3472 if (ds->ds_quota > 0 &&
3473 effective_value > ds->ds_quota)
3474 return (ENOSPC);
3475 }
3476
3477 return (0);
3478 }
3479
3480 static void
3481 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3482 {
3483 dsl_dataset_t *ds = arg1;
3484 dsl_prop_setarg_t *psa = arg2;
3485 uint64_t effective_value = psa->psa_effective_value;
3486 uint64_t unique;
3487 int64_t delta;
3488
3489 dsl_prop_set_sync(ds, psa, tx);
3490 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3491
3492 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3493
3494 mutex_enter(&ds->ds_dir->dd_lock);
3495 mutex_enter(&ds->ds_lock);
3496 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3497 unique = ds->ds_phys->ds_unique_bytes;
3498 delta = MAX(0, (int64_t)(effective_value - unique)) -
3499 MAX(0, (int64_t)(ds->ds_reserved - unique));
3500 ds->ds_reserved = effective_value;
3501 mutex_exit(&ds->ds_lock);
3502
3503 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3504 mutex_exit(&ds->ds_dir->dd_lock);
3505
3506 spa_history_log_internal(LOG_DS_REFRESERV,
3507 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3508 (longlong_t)effective_value, ds->ds_object);
3509 }
3510
3511 int
3512 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3513 uint64_t reservation)
3514 {
3515 dsl_dataset_t *ds;
3516 dsl_prop_setarg_t psa;
3517 int err;
3518
3519 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3520 &reservation);
3521
3522 err = dsl_dataset_hold(dsname, FTAG, &ds);
3523 if (err)
3524 return (err);
3525
3526 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3527 dsl_dataset_set_reservation_check,
3528 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3529
3530 dsl_dataset_rele(ds, FTAG);
3531 return (err);
3532 }
3533
3534 typedef struct zfs_hold_cleanup_arg {
3535 dsl_pool_t *dp;
3536 uint64_t dsobj;
3537 char htag[MAXNAMELEN];
3538 } zfs_hold_cleanup_arg_t;
3539
3540 static void
3541 dsl_dataset_user_release_onexit(void *arg)
3542 {
3543 zfs_hold_cleanup_arg_t *ca = arg;
3544
3545 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3546 B_TRUE);
3547 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3548 }
3549
3550 void
3551 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3552 minor_t minor)
3553 {
3554 zfs_hold_cleanup_arg_t *ca;
3555
3556 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3557 ca->dp = ds->ds_dir->dd_pool;
3558 ca->dsobj = ds->ds_object;
3559 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3560 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3561 dsl_dataset_user_release_onexit, ca, NULL));
3562 }
3563
3564 /*
3565 * If you add new checks here, you may need to add
3566 * additional checks to the "temporary" case in
3567 * snapshot_check() in dmu_objset.c.
3568 */
3569 static int
3570 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3571 {
3572 dsl_dataset_t *ds = arg1;
3573 struct dsl_ds_holdarg *ha = arg2;
3574 char *htag = ha->htag;
3575 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3576 int error = 0;
3577
3578 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3579 return (ENOTSUP);
3580
3581 if (!dsl_dataset_is_snapshot(ds))
3582 return (EINVAL);
3583
3584 /* tags must be unique */
3585 mutex_enter(&ds->ds_lock);
3586 if (ds->ds_phys->ds_userrefs_obj) {
3587 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3588 8, 1, tx);
3589 if (error == 0)
3590 error = EEXIST;
3591 else if (error == ENOENT)
3592 error = 0;
3593 }
3594 mutex_exit(&ds->ds_lock);
3595
3596 if (error == 0 && ha->temphold &&
3597 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3598 error = E2BIG;
3599
3600 return (error);
3601 }
3602
3603 void
3604 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3605 {
3606 dsl_dataset_t *ds = arg1;
3607 struct dsl_ds_holdarg *ha = arg2;
3608 char *htag = ha->htag;
3609 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3610 objset_t *mos = dp->dp_meta_objset;
3611 uint64_t now = gethrestime_sec();
3612 uint64_t zapobj;
3613
3614 mutex_enter(&ds->ds_lock);
3615 if (ds->ds_phys->ds_userrefs_obj == 0) {
3616 /*
3617 * This is the first user hold for this dataset. Create
3618 * the userrefs zap object.
3619 */
3620 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3621 zapobj = ds->ds_phys->ds_userrefs_obj =
3622 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3623 } else {
3624 zapobj = ds->ds_phys->ds_userrefs_obj;
3625 }
3626 ds->ds_userrefs++;
3627 mutex_exit(&ds->ds_lock);
3628
3629 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3630
3631 if (ha->temphold) {
3632 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3633 htag, &now, tx));
3634 }
3635
3636 spa_history_log_internal(LOG_DS_USER_HOLD,
3637 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3638 (int)ha->temphold, ds->ds_object);
3639 }
3640
3641 static int
3642 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3643 {
3644 struct dsl_ds_holdarg *ha = arg;
3645 dsl_dataset_t *ds;
3646 int error;
3647 char *name;
3648
3649 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3650 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3651 error = dsl_dataset_hold(name, ha->dstg, &ds);
3652 strfree(name);
3653 if (error == 0) {
3654 ha->gotone = B_TRUE;
3655 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3656 dsl_dataset_user_hold_sync, ds, ha, 0);
3657 } else if (error == ENOENT && ha->recursive) {
3658 error = 0;
3659 } else {
3660 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3661 }
3662 return (error);
3663 }
3664
3665 int
3666 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3667 boolean_t temphold)
3668 {
3669 struct dsl_ds_holdarg *ha;
3670 int error;
3671
3672 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3673 ha->htag = htag;
3674 ha->temphold = temphold;
3675 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3676 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3677 ds, ha, 0);
3678 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3679
3680 return (error);
3681 }
3682
3683 int
3684 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3685 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3686 {
3687 struct dsl_ds_holdarg *ha;
3688 dsl_sync_task_t *dst;
3689 spa_t *spa;
3690 int error;
3691 minor_t minor = 0;
3692
3693 if (cleanup_fd != -1) {
3694 /* Currently we only support cleanup-on-exit of tempholds. */
3695 if (!temphold)
3696 return (EINVAL);
3697 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3698 if (error)
3699 return (error);
3700 }
3701
3702 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3703
3704 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3705
3706 error = spa_open(dsname, &spa, FTAG);
3707 if (error) {
3708 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3709 if (cleanup_fd != -1)
3710 zfs_onexit_fd_rele(cleanup_fd);
3711 return (error);
3712 }
3713
3714 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3715 ha->htag = htag;
3716 ha->snapname = snapname;
3717 ha->recursive = recursive;
3718 ha->temphold = temphold;
3719
3720 if (recursive) {
3721 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3722 ha, DS_FIND_CHILDREN);
3723 } else {
3724 error = dsl_dataset_user_hold_one(dsname, ha);
3725 }
3726 if (error == 0)
3727 error = dsl_sync_task_group_wait(ha->dstg);
3728
3729 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3730 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3731 dsl_dataset_t *ds = dst->dst_arg1;
3732
3733 if (dst->dst_err) {
3734 dsl_dataset_name(ds, ha->failed);
3735 *strchr(ha->failed, '@') = '\0';
3736 } else if (error == 0 && minor != 0 && temphold) {
3737 /*
3738 * If this hold is to be released upon process exit,
3739 * register that action now.
3740 */
3741 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3742 }
3743 dsl_dataset_rele(ds, ha->dstg);
3744 }
3745
3746 if (error == 0 && recursive && !ha->gotone)
3747 error = ENOENT;
3748
3749 if (error)
3750 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3751
3752 dsl_sync_task_group_destroy(ha->dstg);
3753
3754 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3755 spa_close(spa, FTAG);
3756 if (cleanup_fd != -1)
3757 zfs_onexit_fd_rele(cleanup_fd);
3758 return (error);
3759 }
3760
3761 struct dsl_ds_releasearg {
3762 dsl_dataset_t *ds;
3763 const char *htag;
3764 boolean_t own; /* do we own or just hold ds? */
3765 };
3766
3767 static int
3768 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3769 boolean_t *might_destroy)
3770 {
3771 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3772 uint64_t zapobj;
3773 uint64_t tmp;
3774 int error;
3775
3776 *might_destroy = B_FALSE;
3777
3778 mutex_enter(&ds->ds_lock);
3779 zapobj = ds->ds_phys->ds_userrefs_obj;
3780 if (zapobj == 0) {
3781 /* The tag can't possibly exist */
3782 mutex_exit(&ds->ds_lock);
3783 return (ESRCH);
3784 }
3785
3786 /* Make sure the tag exists */
3787 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3788 if (error) {
3789 mutex_exit(&ds->ds_lock);
3790 if (error == ENOENT)
3791 error = ESRCH;
3792 return (error);
3793 }
3794
3795 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3796 DS_IS_DEFER_DESTROY(ds))
3797 *might_destroy = B_TRUE;
3798
3799 mutex_exit(&ds->ds_lock);
3800 return (0);
3801 }
3802
3803 static int
3804 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3805 {
3806 struct dsl_ds_releasearg *ra = arg1;
3807 dsl_dataset_t *ds = ra->ds;
3808 boolean_t might_destroy;
3809 int error;
3810
3811 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3812 return (ENOTSUP);
3813
3814 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3815 if (error)
3816 return (error);
3817
3818 if (might_destroy) {
3819 struct dsl_ds_destroyarg dsda = {0};
3820
3821 if (dmu_tx_is_syncing(tx)) {
3822 /*
3823 * If we're not prepared to remove the snapshot,
3824 * we can't allow the release to happen right now.
3825 */
3826 if (!ra->own)
3827 return (EBUSY);
3828 }
3829 dsda.ds = ds;
3830 dsda.releasing = B_TRUE;
3831 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3832 }
3833
3834 return (0);
3835 }
3836
3837 static void
3838 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3839 {
3840 struct dsl_ds_releasearg *ra = arg1;
3841 dsl_dataset_t *ds = ra->ds;
3842 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3843 objset_t *mos = dp->dp_meta_objset;
3844 uint64_t zapobj;
3845 uint64_t dsobj = ds->ds_object;
3846 uint64_t refs;
3847 int error;
3848
3849 mutex_enter(&ds->ds_lock);
3850 ds->ds_userrefs--;
3851 refs = ds->ds_userrefs;
3852 mutex_exit(&ds->ds_lock);
3853 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3854 VERIFY(error == 0 || error == ENOENT);
3855 zapobj = ds->ds_phys->ds_userrefs_obj;
3856 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3857 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3858 DS_IS_DEFER_DESTROY(ds)) {
3859 struct dsl_ds_destroyarg dsda = {0};
3860
3861 ASSERT(ra->own);
3862 dsda.ds = ds;
3863 dsda.releasing = B_TRUE;
3864 /* We already did the destroy_check */
3865 dsl_dataset_destroy_sync(&dsda, tag, tx);
3866 }
3867
3868 spa_history_log_internal(LOG_DS_USER_RELEASE,
3869 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3870 ra->htag, (longlong_t)refs, dsobj);
3871 }
3872
3873 static int
3874 dsl_dataset_user_release_one(const char *dsname, void *arg)
3875 {
3876 struct dsl_ds_holdarg *ha = arg;
3877 struct dsl_ds_releasearg *ra;
3878 dsl_dataset_t *ds;
3879 int error;
3880 void *dtag = ha->dstg;
3881 char *name;
3882 boolean_t own = B_FALSE;
3883 boolean_t might_destroy;
3884
3885 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3886 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3887 error = dsl_dataset_hold(name, dtag, &ds);
3888 strfree(name);
3889 if (error == ENOENT && ha->recursive)
3890 return (0);
3891 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3892 if (error)
3893 return (error);
3894
3895 ha->gotone = B_TRUE;
3896
3897 ASSERT(dsl_dataset_is_snapshot(ds));
3898
3899 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3900 if (error) {
3901 dsl_dataset_rele(ds, dtag);
3902 return (error);
3903 }
3904
3905 if (might_destroy) {
3906 #ifdef _KERNEL
3907 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3908 error = zfs_unmount_snap(name, NULL);
3909 strfree(name);
3910 if (error) {
3911 dsl_dataset_rele(ds, dtag);
3912 return (error);
3913 }
3914 #endif
3915 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3916 dsl_dataset_rele(ds, dtag);
3917 return (EBUSY);
3918 } else {
3919 own = B_TRUE;
3920 dsl_dataset_make_exclusive(ds, dtag);
3921 }
3922 }
3923
3924 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3925 ra->ds = ds;
3926 ra->htag = ha->htag;
3927 ra->own = own;
3928 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3929 dsl_dataset_user_release_sync, ra, dtag, 0);
3930
3931 return (0);
3932 }
3933
3934 int
3935 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3936 boolean_t recursive)
3937 {
3938 struct dsl_ds_holdarg *ha;
3939 dsl_sync_task_t *dst;
3940 spa_t *spa;
3941 int error;
3942
3943 top:
3944 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3945
3946 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3947
3948 error = spa_open(dsname, &spa, FTAG);
3949 if (error) {
3950 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3951 return (error);
3952 }
3953
3954 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3955 ha->htag = htag;
3956 ha->snapname = snapname;
3957 ha->recursive = recursive;
3958 if (recursive) {
3959 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3960 ha, DS_FIND_CHILDREN);
3961 } else {
3962 error = dsl_dataset_user_release_one(dsname, ha);
3963 }
3964 if (error == 0)
3965 error = dsl_sync_task_group_wait(ha->dstg);
3966
3967 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3968 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3969 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3970 dsl_dataset_t *ds = ra->ds;
3971
3972 if (dst->dst_err)
3973 dsl_dataset_name(ds, ha->failed);
3974
3975 if (ra->own)
3976 dsl_dataset_disown(ds, ha->dstg);
3977 else
3978 dsl_dataset_rele(ds, ha->dstg);
3979
3980 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3981 }
3982
3983 if (error == 0 && recursive && !ha->gotone)
3984 error = ENOENT;
3985
3986 if (error && error != EBUSY)
3987 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3988
3989 dsl_sync_task_group_destroy(ha->dstg);
3990 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3991 spa_close(spa, FTAG);
3992
3993 /*
3994 * We can get EBUSY if we were racing with deferred destroy and
3995 * dsl_dataset_user_release_check() hadn't done the necessary
3996 * open context setup. We can also get EBUSY if we're racing
3997 * with destroy and that thread is the ds_owner. Either way
3998 * the busy condition should be transient, and we should retry
3999 * the release operation.
4000 */
4001 if (error == EBUSY)
4002 goto top;
4003
4004 return (error);
4005 }
4006
4007 /*
4008 * Called at spa_load time (with retry == B_FALSE) to release a stale
4009 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4010 */
4011 int
4012 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4013 boolean_t retry)
4014 {
4015 dsl_dataset_t *ds;
4016 char *snap;
4017 char *name;
4018 int namelen;
4019 int error;
4020
4021 do {
4022 rw_enter(&dp->dp_config_rwlock, RW_READER);
4023 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4024 rw_exit(&dp->dp_config_rwlock);
4025 if (error)
4026 return (error);
4027 namelen = dsl_dataset_namelen(ds)+1;
4028 name = kmem_alloc(namelen, KM_SLEEP);
4029 dsl_dataset_name(ds, name);
4030 dsl_dataset_rele(ds, FTAG);
4031
4032 snap = strchr(name, '@');
4033 *snap = '\0';
4034 ++snap;
4035 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4036 kmem_free(name, namelen);
4037
4038 /*
4039 * The object can't have been destroyed because we have a hold,
4040 * but it might have been renamed, resulting in ENOENT. Retry
4041 * if we've been requested to do so.
4042 *
4043 * It would be nice if we could use the dsobj all the way
4044 * through and avoid ENOENT entirely. But we might need to
4045 * unmount the snapshot, and there's currently no way to lookup
4046 * a vfsp using a ZFS object id.
4047 */
4048 } while ((error == ENOENT) && retry);
4049
4050 return (error);
4051 }
4052
4053 int
4054 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4055 {
4056 dsl_dataset_t *ds;
4057 int err;
4058
4059 err = dsl_dataset_hold(dsname, FTAG, &ds);
4060 if (err)
4061 return (err);
4062
4063 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4064 if (ds->ds_phys->ds_userrefs_obj != 0) {
4065 zap_attribute_t *za;
4066 zap_cursor_t zc;
4067
4068 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4069 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4070 ds->ds_phys->ds_userrefs_obj);
4071 zap_cursor_retrieve(&zc, za) == 0;
4072 zap_cursor_advance(&zc)) {
4073 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4074 za->za_first_integer));
4075 }
4076 zap_cursor_fini(&zc);
4077 kmem_free(za, sizeof (zap_attribute_t));
4078 }
4079 dsl_dataset_rele(ds, FTAG);
4080 return (0);
4081 }
4082
4083 /*
4084 * Note, this function is used as the callback for dmu_objset_find(). We
4085 * always return 0 so that we will continue to find and process
4086 * inconsistent datasets, even if we encounter an error trying to
4087 * process one of them.
4088 */
4089 /* ARGSUSED */
4090 int
4091 dsl_destroy_inconsistent(const char *dsname, void *arg)
4092 {
4093 dsl_dataset_t *ds;
4094
4095 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4096 if (DS_IS_INCONSISTENT(ds))
4097 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4098 else
4099 dsl_dataset_disown(ds, FTAG);
4100 }
4101 return (0);
4102 }
4103
4104
4105 /*
4106 * Return (in *usedp) the amount of space written in new that is not
4107 * present in oldsnap. New may be a snapshot or the head. Old must be
4108 * a snapshot before new, in new's filesystem (or its origin). If not then
4109 * fail and return EINVAL.
4110 *
4111 * The written space is calculated by considering two components: First, we
4112 * ignore any freed space, and calculate the written as new's used space
4113 * minus old's used space. Next, we add in the amount of space that was freed
4114 * between the two snapshots, thus reducing new's used space relative to old's.
4115 * Specifically, this is the space that was born before old->ds_creation_txg,
4116 * and freed before new (ie. on new's deadlist or a previous deadlist).
4117 *
4118 * space freed [---------------------]
4119 * snapshots ---O-------O--------O-------O------
4120 * oldsnap new
4121 */
4122 int
4123 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4124 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4125 {
4126 int err = 0;
4127 uint64_t snapobj;
4128 dsl_pool_t *dp = new->ds_dir->dd_pool;
4129
4130 *usedp = 0;
4131 *usedp += new->ds_phys->ds_used_bytes;
4132 *usedp -= oldsnap->ds_phys->ds_used_bytes;
4133
4134 *compp = 0;
4135 *compp += new->ds_phys->ds_compressed_bytes;
4136 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4137
4138 *uncompp = 0;
4139 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4140 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4141
4142 rw_enter(&dp->dp_config_rwlock, RW_READER);
4143 snapobj = new->ds_object;
4144 while (snapobj != oldsnap->ds_object) {
4145 dsl_dataset_t *snap;
4146 uint64_t used, comp, uncomp;
4147
4148 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4149 if (err != 0)
4150 break;
4151
4152 if (snap->ds_phys->ds_prev_snap_txg ==
4153 oldsnap->ds_phys->ds_creation_txg) {
4154 /*
4155 * The blocks in the deadlist can not be born after
4156 * ds_prev_snap_txg, so get the whole deadlist space,
4157 * which is more efficient (especially for old-format
4158 * deadlists). Unfortunately the deadlist code
4159 * doesn't have enough information to make this
4160 * optimization itself.
4161 */
4162 dsl_deadlist_space(&snap->ds_deadlist,
4163 &used, &comp, &uncomp);
4164 } else {
4165 dsl_deadlist_space_range(&snap->ds_deadlist,
4166 0, oldsnap->ds_phys->ds_creation_txg,
4167 &used, &comp, &uncomp);
4168 }
4169 *usedp += used;
4170 *compp += comp;
4171 *uncompp += uncomp;
4172
4173 /*
4174 * If we get to the beginning of the chain of snapshots
4175 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4176 * was not a snapshot of/before new.
4177 */
4178 snapobj = snap->ds_phys->ds_prev_snap_obj;
4179 dsl_dataset_rele(snap, FTAG);
4180 if (snapobj == 0) {
4181 err = EINVAL;
4182 break;
4183 }
4184
4185 }
4186 rw_exit(&dp->dp_config_rwlock);
4187 return (err);
4188 }
4189
4190 /*
4191 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4192 * lastsnap, and all snapshots in between are deleted.
4193 *
4194 * blocks that would be freed [---------------------------]
4195 * snapshots ---O-------O--------O-------O--------O
4196 * firstsnap lastsnap
4197 *
4198 * This is the set of blocks that were born after the snap before firstsnap,
4199 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4200 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4201 * We calculate this by iterating over the relevant deadlists (from the snap
4202 * after lastsnap, backward to the snap after firstsnap), summing up the
4203 * space on the deadlist that was born after the snap before firstsnap.
4204 */
4205 int
4206 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4207 dsl_dataset_t *lastsnap,
4208 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4209 {
4210 int err = 0;
4211 uint64_t snapobj;
4212 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4213
4214 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4215 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4216
4217 /*
4218 * Check that the snapshots are in the same dsl_dir, and firstsnap
4219 * is before lastsnap.
4220 */
4221 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4222 firstsnap->ds_phys->ds_creation_txg >
4223 lastsnap->ds_phys->ds_creation_txg)
4224 return (EINVAL);
4225
4226 *usedp = *compp = *uncompp = 0;
4227
4228 rw_enter(&dp->dp_config_rwlock, RW_READER);
4229 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4230 while (snapobj != firstsnap->ds_object) {
4231 dsl_dataset_t *ds;
4232 uint64_t used, comp, uncomp;
4233
4234 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4235 if (err != 0)
4236 break;
4237
4238 dsl_deadlist_space_range(&ds->ds_deadlist,
4239 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4240 &used, &comp, &uncomp);
4241 *usedp += used;
4242 *compp += comp;
4243 *uncompp += uncomp;
4244
4245 snapobj = ds->ds_phys->ds_prev_snap_obj;
4246 ASSERT3U(snapobj, !=, 0);
4247 dsl_dataset_rele(ds, FTAG);
4248 }
4249 rw_exit(&dp->dp_config_rwlock);
4250 return (err);
4251 }
4252
4253 #if defined(_KERNEL) && defined(HAVE_SPL)
4254 EXPORT_SYMBOL(dmu_snapshots_destroy_nvl);
4255 EXPORT_SYMBOL(dsl_dataset_hold);
4256 EXPORT_SYMBOL(dsl_dataset_hold_obj);
4257 EXPORT_SYMBOL(dsl_dataset_own);
4258 EXPORT_SYMBOL(dsl_dataset_own_obj);
4259 EXPORT_SYMBOL(dsl_dataset_name);
4260 EXPORT_SYMBOL(dsl_dataset_rele);
4261 EXPORT_SYMBOL(dsl_dataset_disown);
4262 EXPORT_SYMBOL(dsl_dataset_drop_ref);
4263 EXPORT_SYMBOL(dsl_dataset_tryown);
4264 EXPORT_SYMBOL(dsl_dataset_make_exclusive);
4265 EXPORT_SYMBOL(dsl_dataset_create_sync);
4266 EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
4267 EXPORT_SYMBOL(dsl_dataset_destroy);
4268 EXPORT_SYMBOL(dsl_dataset_destroy_check);
4269 EXPORT_SYMBOL(dsl_dataset_destroy_sync);
4270 EXPORT_SYMBOL(dsl_dataset_snapshot_check);
4271 EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
4272 EXPORT_SYMBOL(dsl_dataset_rename);
4273 EXPORT_SYMBOL(dsl_dataset_promote);
4274 EXPORT_SYMBOL(dsl_dataset_clone_swap);
4275 EXPORT_SYMBOL(dsl_dataset_user_hold);
4276 EXPORT_SYMBOL(dsl_dataset_user_release);
4277 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
4278 EXPORT_SYMBOL(dsl_dataset_get_holds);
4279 EXPORT_SYMBOL(dsl_dataset_get_blkptr);
4280 EXPORT_SYMBOL(dsl_dataset_set_blkptr);
4281 EXPORT_SYMBOL(dsl_dataset_get_spa);
4282 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap);
4283 EXPORT_SYMBOL(dsl_dataset_space_written);
4284 EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
4285 EXPORT_SYMBOL(dsl_dataset_sync);
4286 EXPORT_SYMBOL(dsl_dataset_block_born);
4287 EXPORT_SYMBOL(dsl_dataset_block_kill);
4288 EXPORT_SYMBOL(dsl_dataset_block_freeable);
4289 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg);
4290 EXPORT_SYMBOL(dsl_dataset_dirty);
4291 EXPORT_SYMBOL(dsl_dataset_stats);
4292 EXPORT_SYMBOL(dsl_dataset_fast_stat);
4293 EXPORT_SYMBOL(dsl_dataset_space);
4294 EXPORT_SYMBOL(dsl_dataset_fsid_guid);
4295 EXPORT_SYMBOL(dsl_dsobj_to_dsname);
4296 EXPORT_SYMBOL(dsl_dataset_check_quota);
4297 EXPORT_SYMBOL(dsl_dataset_set_quota);
4298 EXPORT_SYMBOL(dsl_dataset_set_quota_sync);
4299 EXPORT_SYMBOL(dsl_dataset_set_reservation);
4300 EXPORT_SYMBOL(dsl_destroy_inconsistent);
4301 #endif