]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_dataset.c
Illumos #2703: add mechanism to report ZFS send progress
[mirror_zfs.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/arc.h>
36 #include <sys/zio.h>
37 #include <sys/zap.h>
38 #include <sys/unique.h>
39 #include <sys/zfs_context.h>
40 #include <sys/zfs_ioctl.h>
41 #include <sys/spa.h>
42 #include <sys/zfs_znode.h>
43 #include <sys/zfs_onexit.h>
44 #include <sys/zvol.h>
45 #include <sys/dsl_scan.h>
46 #include <sys/dsl_deadlist.h>
47
48 static char *dsl_reaper = "the grim reaper";
49
50 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
51 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
52 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
53
54 #define SWITCH64(x, y) \
55 { \
56 uint64_t __tmp = (x); \
57 (x) = (y); \
58 (y) = __tmp; \
59 }
60
61 #define DS_REF_MAX (1ULL << 62)
62
63 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
64
65 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
66
67
68 /*
69 * Figure out how much of this delta should be propogated to the dsl_dir
70 * layer. If there's a refreservation, that space has already been
71 * partially accounted for in our ancestors.
72 */
73 static int64_t
74 parent_delta(dsl_dataset_t *ds, int64_t delta)
75 {
76 uint64_t old_bytes, new_bytes;
77
78 if (ds->ds_reserved == 0)
79 return (delta);
80
81 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
82 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
83
84 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
85 return (new_bytes - old_bytes);
86 }
87
88 void
89 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
90 {
91 int used, compressed, uncompressed;
92 int64_t delta;
93
94 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
95 compressed = BP_GET_PSIZE(bp);
96 uncompressed = BP_GET_UCSIZE(bp);
97
98 dprintf_bp(bp, "ds=%p", ds);
99
100 ASSERT(dmu_tx_is_syncing(tx));
101 /* It could have been compressed away to nothing */
102 if (BP_IS_HOLE(bp))
103 return;
104 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
105 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
106 if (ds == NULL) {
107 /*
108 * Account for the meta-objset space in its placeholder
109 * dsl_dir.
110 */
111 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
112 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
113 used, compressed, uncompressed, tx);
114 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
115 return;
116 }
117 dmu_buf_will_dirty(ds->ds_dbuf, tx);
118
119 mutex_enter(&ds->ds_dir->dd_lock);
120 mutex_enter(&ds->ds_lock);
121 delta = parent_delta(ds, used);
122 ds->ds_phys->ds_used_bytes += used;
123 ds->ds_phys->ds_compressed_bytes += compressed;
124 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
125 ds->ds_phys->ds_unique_bytes += used;
126 mutex_exit(&ds->ds_lock);
127 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
128 compressed, uncompressed, tx);
129 dsl_dir_transfer_space(ds->ds_dir, used - delta,
130 DD_USED_REFRSRV, DD_USED_HEAD, tx);
131 mutex_exit(&ds->ds_dir->dd_lock);
132 }
133
134 int
135 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
136 boolean_t async)
137 {
138 int used, compressed, uncompressed;
139
140 if (BP_IS_HOLE(bp))
141 return (0);
142
143 ASSERT(dmu_tx_is_syncing(tx));
144 ASSERT(bp->blk_birth <= tx->tx_txg);
145
146 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
147 compressed = BP_GET_PSIZE(bp);
148 uncompressed = BP_GET_UCSIZE(bp);
149
150 ASSERT(used > 0);
151 if (ds == NULL) {
152 /*
153 * Account for the meta-objset space in its placeholder
154 * dataset.
155 */
156 dsl_free(tx->tx_pool, tx->tx_txg, bp);
157
158 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
159 -used, -compressed, -uncompressed, tx);
160 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
161 return (used);
162 }
163 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
164
165 ASSERT(!dsl_dataset_is_snapshot(ds));
166 dmu_buf_will_dirty(ds->ds_dbuf, tx);
167
168 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
169 int64_t delta;
170
171 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
172 dsl_free(tx->tx_pool, tx->tx_txg, bp);
173
174 mutex_enter(&ds->ds_dir->dd_lock);
175 mutex_enter(&ds->ds_lock);
176 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
177 !DS_UNIQUE_IS_ACCURATE(ds));
178 delta = parent_delta(ds, -used);
179 ds->ds_phys->ds_unique_bytes -= used;
180 mutex_exit(&ds->ds_lock);
181 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
182 delta, -compressed, -uncompressed, tx);
183 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
184 DD_USED_REFRSRV, DD_USED_HEAD, tx);
185 mutex_exit(&ds->ds_dir->dd_lock);
186 } else {
187 dprintf_bp(bp, "putting on dead list: %s", "");
188 if (async) {
189 /*
190 * We are here as part of zio's write done callback,
191 * which means we're a zio interrupt thread. We can't
192 * call dsl_deadlist_insert() now because it may block
193 * waiting for I/O. Instead, put bp on the deferred
194 * queue and let dsl_pool_sync() finish the job.
195 */
196 bplist_append(&ds->ds_pending_deadlist, bp);
197 } else {
198 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
199 }
200 ASSERT3U(ds->ds_prev->ds_object, ==,
201 ds->ds_phys->ds_prev_snap_obj);
202 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
203 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
204 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
205 ds->ds_object && bp->blk_birth >
206 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
207 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
208 mutex_enter(&ds->ds_prev->ds_lock);
209 ds->ds_prev->ds_phys->ds_unique_bytes += used;
210 mutex_exit(&ds->ds_prev->ds_lock);
211 }
212 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
213 dsl_dir_transfer_space(ds->ds_dir, used,
214 DD_USED_HEAD, DD_USED_SNAP, tx);
215 }
216 }
217 mutex_enter(&ds->ds_lock);
218 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
219 ds->ds_phys->ds_used_bytes -= used;
220 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
221 ds->ds_phys->ds_compressed_bytes -= compressed;
222 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
223 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
224 mutex_exit(&ds->ds_lock);
225
226 return (used);
227 }
228
229 uint64_t
230 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
231 {
232 uint64_t trysnap = 0;
233
234 if (ds == NULL)
235 return (0);
236 /*
237 * The snapshot creation could fail, but that would cause an
238 * incorrect FALSE return, which would only result in an
239 * overestimation of the amount of space that an operation would
240 * consume, which is OK.
241 *
242 * There's also a small window where we could miss a pending
243 * snapshot, because we could set the sync task in the quiescing
244 * phase. So this should only be used as a guess.
245 */
246 if (ds->ds_trysnap_txg >
247 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
248 trysnap = ds->ds_trysnap_txg;
249 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
250 }
251
252 boolean_t
253 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
254 uint64_t blk_birth)
255 {
256 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
257 return (B_FALSE);
258
259 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
260
261 return (B_TRUE);
262 }
263
264 /* ARGSUSED */
265 static void
266 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
267 {
268 dsl_dataset_t *ds = dsv;
269
270 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
271
272 unique_remove(ds->ds_fsid_guid);
273
274 if (ds->ds_objset != NULL)
275 dmu_objset_evict(ds->ds_objset);
276
277 if (ds->ds_prev) {
278 dsl_dataset_drop_ref(ds->ds_prev, ds);
279 ds->ds_prev = NULL;
280 }
281
282 bplist_destroy(&ds->ds_pending_deadlist);
283 if (db != NULL) {
284 dsl_deadlist_close(&ds->ds_deadlist);
285 } else {
286 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
287 ASSERT(!ds->ds_deadlist.dl_oldfmt);
288 }
289 if (ds->ds_dir)
290 dsl_dir_close(ds->ds_dir, ds);
291
292 ASSERT(!list_link_active(&ds->ds_synced_link));
293
294 mutex_destroy(&ds->ds_lock);
295 mutex_destroy(&ds->ds_recvlock);
296 mutex_destroy(&ds->ds_opening_lock);
297 rw_destroy(&ds->ds_rwlock);
298 cv_destroy(&ds->ds_exclusive_cv);
299
300 kmem_free(ds, sizeof (dsl_dataset_t));
301 }
302
303 static int
304 dsl_dataset_get_snapname(dsl_dataset_t *ds)
305 {
306 dsl_dataset_phys_t *headphys;
307 int err;
308 dmu_buf_t *headdbuf;
309 dsl_pool_t *dp = ds->ds_dir->dd_pool;
310 objset_t *mos = dp->dp_meta_objset;
311
312 if (ds->ds_snapname[0])
313 return (0);
314 if (ds->ds_phys->ds_next_snap_obj == 0)
315 return (0);
316
317 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
318 FTAG, &headdbuf);
319 if (err)
320 return (err);
321 headphys = headdbuf->db_data;
322 err = zap_value_search(dp->dp_meta_objset,
323 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
324 dmu_buf_rele(headdbuf, FTAG);
325 return (err);
326 }
327
328 static int
329 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
330 {
331 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
332 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
333 matchtype_t mt;
334 int err;
335
336 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
337 mt = MT_FIRST;
338 else
339 mt = MT_EXACT;
340
341 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
342 value, mt, NULL, 0, NULL);
343 if (err == ENOTSUP && mt == MT_FIRST)
344 err = zap_lookup(mos, snapobj, name, 8, 1, value);
345 return (err);
346 }
347
348 static int
349 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
350 {
351 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
352 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
353 matchtype_t mt;
354 int err;
355
356 dsl_dir_snap_cmtime_update(ds->ds_dir);
357
358 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
359 mt = MT_FIRST;
360 else
361 mt = MT_EXACT;
362
363 err = zap_remove_norm(mos, snapobj, name, mt, tx);
364 if (err == ENOTSUP && mt == MT_FIRST)
365 err = zap_remove(mos, snapobj, name, tx);
366 return (err);
367 }
368
369 static int
370 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
371 dsl_dataset_t **dsp)
372 {
373 objset_t *mos = dp->dp_meta_objset;
374 dmu_buf_t *dbuf;
375 dsl_dataset_t *ds;
376 int err;
377 dmu_object_info_t doi;
378
379 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
380 dsl_pool_sync_context(dp));
381
382 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
383 if (err)
384 return (err);
385
386 /* Make sure dsobj has the correct object type. */
387 dmu_object_info_from_db(dbuf, &doi);
388 if (doi.doi_type != DMU_OT_DSL_DATASET)
389 return (EINVAL);
390
391 ds = dmu_buf_get_user(dbuf);
392 if (ds == NULL) {
393 dsl_dataset_t *winner = NULL;
394
395 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_PUSHPAGE);
396 ds->ds_dbuf = dbuf;
397 ds->ds_object = dsobj;
398 ds->ds_phys = dbuf->db_data;
399 list_link_init(&ds->ds_synced_link);
400
401 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
402 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
403 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
404 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
405
406 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
407 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
408
409 bplist_create(&ds->ds_pending_deadlist);
410 dsl_deadlist_open(&ds->ds_deadlist,
411 mos, ds->ds_phys->ds_deadlist_obj);
412
413 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
414 offsetof(dmu_sendarg_t, dsa_link));
415
416 if (err == 0) {
417 err = dsl_dir_open_obj(dp,
418 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
419 }
420 if (err) {
421 mutex_destroy(&ds->ds_lock);
422 mutex_destroy(&ds->ds_recvlock);
423 mutex_destroy(&ds->ds_opening_lock);
424 rw_destroy(&ds->ds_rwlock);
425 cv_destroy(&ds->ds_exclusive_cv);
426 bplist_destroy(&ds->ds_pending_deadlist);
427 dsl_deadlist_close(&ds->ds_deadlist);
428 kmem_free(ds, sizeof (dsl_dataset_t));
429 dmu_buf_rele(dbuf, tag);
430 return (err);
431 }
432
433 if (!dsl_dataset_is_snapshot(ds)) {
434 ds->ds_snapname[0] = '\0';
435 if (ds->ds_phys->ds_prev_snap_obj) {
436 err = dsl_dataset_get_ref(dp,
437 ds->ds_phys->ds_prev_snap_obj,
438 ds, &ds->ds_prev);
439 }
440 } else {
441 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
442 err = dsl_dataset_get_snapname(ds);
443 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
444 err = zap_count(
445 ds->ds_dir->dd_pool->dp_meta_objset,
446 ds->ds_phys->ds_userrefs_obj,
447 &ds->ds_userrefs);
448 }
449 }
450
451 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
452 /*
453 * In sync context, we're called with either no lock
454 * or with the write lock. If we're not syncing,
455 * we're always called with the read lock held.
456 */
457 boolean_t need_lock =
458 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
459 dsl_pool_sync_context(dp);
460
461 if (need_lock)
462 rw_enter(&dp->dp_config_rwlock, RW_READER);
463
464 err = dsl_prop_get_ds(ds,
465 "refreservation", sizeof (uint64_t), 1,
466 &ds->ds_reserved, NULL);
467 if (err == 0) {
468 err = dsl_prop_get_ds(ds,
469 "refquota", sizeof (uint64_t), 1,
470 &ds->ds_quota, NULL);
471 }
472
473 if (need_lock)
474 rw_exit(&dp->dp_config_rwlock);
475 } else {
476 ds->ds_reserved = ds->ds_quota = 0;
477 }
478
479 if (err == 0) {
480 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
481 dsl_dataset_evict);
482 }
483 if (err || winner) {
484 bplist_destroy(&ds->ds_pending_deadlist);
485 dsl_deadlist_close(&ds->ds_deadlist);
486 if (ds->ds_prev)
487 dsl_dataset_drop_ref(ds->ds_prev, ds);
488 dsl_dir_close(ds->ds_dir, ds);
489 mutex_destroy(&ds->ds_lock);
490 mutex_destroy(&ds->ds_recvlock);
491 mutex_destroy(&ds->ds_opening_lock);
492 rw_destroy(&ds->ds_rwlock);
493 cv_destroy(&ds->ds_exclusive_cv);
494 kmem_free(ds, sizeof (dsl_dataset_t));
495 if (err) {
496 dmu_buf_rele(dbuf, tag);
497 return (err);
498 }
499 ds = winner;
500 } else {
501 ds->ds_fsid_guid =
502 unique_insert(ds->ds_phys->ds_fsid_guid);
503 }
504 }
505 ASSERT3P(ds->ds_dbuf, ==, dbuf);
506 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
507 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
508 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
509 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
510 mutex_enter(&ds->ds_lock);
511 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
512 mutex_exit(&ds->ds_lock);
513 dmu_buf_rele(ds->ds_dbuf, tag);
514 return (ENOENT);
515 }
516 mutex_exit(&ds->ds_lock);
517 *dsp = ds;
518 return (0);
519 }
520
521 static int
522 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
523 {
524 dsl_pool_t *dp = ds->ds_dir->dd_pool;
525
526 /*
527 * In syncing context we don't want the rwlock lock: there
528 * may be an existing writer waiting for sync phase to
529 * finish. We don't need to worry about such writers, since
530 * sync phase is single-threaded, so the writer can't be
531 * doing anything while we are active.
532 */
533 if (dsl_pool_sync_context(dp)) {
534 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
535 return (0);
536 }
537
538 /*
539 * Normal users will hold the ds_rwlock as a READER until they
540 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
541 * drop their READER lock after they set the ds_owner field.
542 *
543 * If the dataset is being destroyed, the destroy thread will
544 * obtain a WRITER lock for exclusive access after it's done its
545 * open-context work and then change the ds_owner to
546 * dsl_reaper once destruction is assured. So threads
547 * may block here temporarily, until the "destructability" of
548 * the dataset is determined.
549 */
550 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
551 mutex_enter(&ds->ds_lock);
552 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
553 rw_exit(&dp->dp_config_rwlock);
554 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
555 if (DSL_DATASET_IS_DESTROYED(ds)) {
556 mutex_exit(&ds->ds_lock);
557 dsl_dataset_drop_ref(ds, tag);
558 rw_enter(&dp->dp_config_rwlock, RW_READER);
559 return (ENOENT);
560 }
561 /*
562 * The dp_config_rwlock lives above the ds_lock. And
563 * we need to check DSL_DATASET_IS_DESTROYED() while
564 * holding the ds_lock, so we have to drop and reacquire
565 * the ds_lock here.
566 */
567 mutex_exit(&ds->ds_lock);
568 rw_enter(&dp->dp_config_rwlock, RW_READER);
569 mutex_enter(&ds->ds_lock);
570 }
571 mutex_exit(&ds->ds_lock);
572 return (0);
573 }
574
575 int
576 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
577 dsl_dataset_t **dsp)
578 {
579 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
580
581 if (err)
582 return (err);
583 return (dsl_dataset_hold_ref(*dsp, tag));
584 }
585
586 int
587 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
588 void *tag, dsl_dataset_t **dsp)
589 {
590 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
591 if (err)
592 return (err);
593 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
594 dsl_dataset_rele(*dsp, tag);
595 *dsp = NULL;
596 return (EBUSY);
597 }
598 return (0);
599 }
600
601 int
602 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
603 {
604 dsl_dir_t *dd;
605 dsl_pool_t *dp;
606 const char *snapname;
607 uint64_t obj;
608 int err = 0;
609
610 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
611 if (err)
612 return (err);
613
614 dp = dd->dd_pool;
615 obj = dd->dd_phys->dd_head_dataset_obj;
616 rw_enter(&dp->dp_config_rwlock, RW_READER);
617 if (obj)
618 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
619 else
620 err = ENOENT;
621 if (err)
622 goto out;
623
624 err = dsl_dataset_hold_ref(*dsp, tag);
625
626 /* we may be looking for a snapshot */
627 if (err == 0 && snapname != NULL) {
628 dsl_dataset_t *ds = NULL;
629
630 if (*snapname++ != '@') {
631 dsl_dataset_rele(*dsp, tag);
632 err = ENOENT;
633 goto out;
634 }
635
636 dprintf("looking for snapshot '%s'\n", snapname);
637 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
638 if (err == 0)
639 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
640 dsl_dataset_rele(*dsp, tag);
641
642 ASSERT3U((err == 0), ==, (ds != NULL));
643
644 if (ds) {
645 mutex_enter(&ds->ds_lock);
646 if (ds->ds_snapname[0] == 0)
647 (void) strlcpy(ds->ds_snapname, snapname,
648 sizeof (ds->ds_snapname));
649 mutex_exit(&ds->ds_lock);
650 err = dsl_dataset_hold_ref(ds, tag);
651 *dsp = err ? NULL : ds;
652 }
653 }
654 out:
655 rw_exit(&dp->dp_config_rwlock);
656 dsl_dir_close(dd, FTAG);
657 return (err);
658 }
659
660 int
661 dsl_dataset_own(const char *name, boolean_t inconsistentok,
662 void *tag, dsl_dataset_t **dsp)
663 {
664 int err = dsl_dataset_hold(name, tag, dsp);
665 if (err)
666 return (err);
667 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
668 dsl_dataset_rele(*dsp, tag);
669 return (EBUSY);
670 }
671 return (0);
672 }
673
674 void
675 dsl_dataset_name(dsl_dataset_t *ds, char *name)
676 {
677 if (ds == NULL) {
678 (void) strcpy(name, "mos");
679 } else {
680 dsl_dir_name(ds->ds_dir, name);
681 VERIFY(0 == dsl_dataset_get_snapname(ds));
682 if (ds->ds_snapname[0]) {
683 (void) strcat(name, "@");
684 /*
685 * We use a "recursive" mutex so that we
686 * can call dprintf_ds() with ds_lock held.
687 */
688 if (!MUTEX_HELD(&ds->ds_lock)) {
689 mutex_enter(&ds->ds_lock);
690 (void) strcat(name, ds->ds_snapname);
691 mutex_exit(&ds->ds_lock);
692 } else {
693 (void) strcat(name, ds->ds_snapname);
694 }
695 }
696 }
697 }
698
699 static int
700 dsl_dataset_namelen(dsl_dataset_t *ds)
701 {
702 int result;
703
704 if (ds == NULL) {
705 result = 3; /* "mos" */
706 } else {
707 result = dsl_dir_namelen(ds->ds_dir);
708 VERIFY(0 == dsl_dataset_get_snapname(ds));
709 if (ds->ds_snapname[0]) {
710 ++result; /* adding one for the @-sign */
711 if (!MUTEX_HELD(&ds->ds_lock)) {
712 mutex_enter(&ds->ds_lock);
713 result += strlen(ds->ds_snapname);
714 mutex_exit(&ds->ds_lock);
715 } else {
716 result += strlen(ds->ds_snapname);
717 }
718 }
719 }
720
721 return (result);
722 }
723
724 void
725 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
726 {
727 dmu_buf_rele(ds->ds_dbuf, tag);
728 }
729
730 void
731 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
732 {
733 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
734 rw_exit(&ds->ds_rwlock);
735 }
736 dsl_dataset_drop_ref(ds, tag);
737 }
738
739 void
740 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
741 {
742 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
743 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
744
745 mutex_enter(&ds->ds_lock);
746 ds->ds_owner = NULL;
747 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
748 rw_exit(&ds->ds_rwlock);
749 cv_broadcast(&ds->ds_exclusive_cv);
750 }
751 mutex_exit(&ds->ds_lock);
752 if (ds->ds_dbuf)
753 dsl_dataset_drop_ref(ds, tag);
754 else
755 dsl_dataset_evict(NULL, ds);
756 }
757
758 boolean_t
759 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
760 {
761 boolean_t gotit = FALSE;
762
763 mutex_enter(&ds->ds_lock);
764 if (ds->ds_owner == NULL &&
765 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
766 ds->ds_owner = tag;
767 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
768 rw_exit(&ds->ds_rwlock);
769 gotit = TRUE;
770 }
771 mutex_exit(&ds->ds_lock);
772 return (gotit);
773 }
774
775 void
776 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
777 {
778 ASSERT3P(owner, ==, ds->ds_owner);
779 if (!RW_WRITE_HELD(&ds->ds_rwlock))
780 rw_enter(&ds->ds_rwlock, RW_WRITER);
781 }
782
783 uint64_t
784 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
785 uint64_t flags, dmu_tx_t *tx)
786 {
787 dsl_pool_t *dp = dd->dd_pool;
788 dmu_buf_t *dbuf;
789 dsl_dataset_phys_t *dsphys;
790 uint64_t dsobj;
791 objset_t *mos = dp->dp_meta_objset;
792
793 if (origin == NULL)
794 origin = dp->dp_origin_snap;
795
796 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
797 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
798 ASSERT(dmu_tx_is_syncing(tx));
799 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
800
801 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
802 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
803 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
804 dmu_buf_will_dirty(dbuf, tx);
805 dsphys = dbuf->db_data;
806 bzero(dsphys, sizeof (dsl_dataset_phys_t));
807 dsphys->ds_dir_obj = dd->dd_object;
808 dsphys->ds_flags = flags;
809 dsphys->ds_fsid_guid = unique_create();
810 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
811 sizeof (dsphys->ds_guid));
812 dsphys->ds_snapnames_zapobj =
813 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
814 DMU_OT_NONE, 0, tx);
815 dsphys->ds_creation_time = gethrestime_sec();
816 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
817
818 if (origin == NULL) {
819 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
820 } else {
821 dsl_dataset_t *ohds;
822
823 dsphys->ds_prev_snap_obj = origin->ds_object;
824 dsphys->ds_prev_snap_txg =
825 origin->ds_phys->ds_creation_txg;
826 dsphys->ds_used_bytes =
827 origin->ds_phys->ds_used_bytes;
828 dsphys->ds_compressed_bytes =
829 origin->ds_phys->ds_compressed_bytes;
830 dsphys->ds_uncompressed_bytes =
831 origin->ds_phys->ds_uncompressed_bytes;
832 dsphys->ds_bp = origin->ds_phys->ds_bp;
833 dsphys->ds_flags |= origin->ds_phys->ds_flags;
834
835 dmu_buf_will_dirty(origin->ds_dbuf, tx);
836 origin->ds_phys->ds_num_children++;
837
838 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
839 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
840 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
841 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
842 dsl_dataset_rele(ohds, FTAG);
843
844 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
845 if (origin->ds_phys->ds_next_clones_obj == 0) {
846 origin->ds_phys->ds_next_clones_obj =
847 zap_create(mos,
848 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
849 }
850 VERIFY(0 == zap_add_int(mos,
851 origin->ds_phys->ds_next_clones_obj,
852 dsobj, tx));
853 }
854
855 dmu_buf_will_dirty(dd->dd_dbuf, tx);
856 dd->dd_phys->dd_origin_obj = origin->ds_object;
857 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
858 if (origin->ds_dir->dd_phys->dd_clones == 0) {
859 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
860 origin->ds_dir->dd_phys->dd_clones =
861 zap_create(mos,
862 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
863 }
864 VERIFY3U(0, ==, zap_add_int(mos,
865 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
866 }
867 }
868
869 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
870 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
871
872 dmu_buf_rele(dbuf, FTAG);
873
874 dmu_buf_will_dirty(dd->dd_dbuf, tx);
875 dd->dd_phys->dd_head_dataset_obj = dsobj;
876
877 return (dsobj);
878 }
879
880 uint64_t
881 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
882 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
883 {
884 dsl_pool_t *dp = pdd->dd_pool;
885 uint64_t dsobj, ddobj;
886 dsl_dir_t *dd;
887
888 ASSERT(lastname[0] != '@');
889
890 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
891 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
892
893 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
894
895 dsl_deleg_set_create_perms(dd, tx, cr);
896
897 dsl_dir_close(dd, FTAG);
898
899 /*
900 * If we are creating a clone, make sure we zero out any stale
901 * data from the origin snapshots zil header.
902 */
903 if (origin != NULL) {
904 dsl_dataset_t *ds;
905 objset_t *os;
906
907 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
908 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
909 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
910 dsl_dataset_dirty(ds, tx);
911 dsl_dataset_rele(ds, FTAG);
912 }
913
914 return (dsobj);
915 }
916
917 /*
918 * The snapshots must all be in the same pool.
919 */
920 int
921 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
922 {
923 int err;
924 dsl_sync_task_t *dst;
925 spa_t *spa;
926 nvpair_t *pair;
927 dsl_sync_task_group_t *dstg;
928
929 pair = nvlist_next_nvpair(snaps, NULL);
930 if (pair == NULL)
931 return (0);
932
933 err = spa_open(nvpair_name(pair), &spa, FTAG);
934 if (err)
935 return (err);
936 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
937
938 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
939 pair = nvlist_next_nvpair(snaps, pair)) {
940 dsl_dataset_t *ds;
941 int err;
942
943 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
944 if (err == 0) {
945 struct dsl_ds_destroyarg *dsda;
946
947 dsl_dataset_make_exclusive(ds, dstg);
948 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
949 KM_SLEEP);
950 dsda->ds = ds;
951 dsda->defer = defer;
952 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
953 dsl_dataset_destroy_sync, dsda, dstg, 0);
954 } else if (err == ENOENT) {
955 err = 0;
956 } else {
957 (void) strcpy(failed, nvpair_name(pair));
958 break;
959 }
960 }
961
962 if (err == 0)
963 err = dsl_sync_task_group_wait(dstg);
964
965 for (dst = list_head(&dstg->dstg_tasks); dst;
966 dst = list_next(&dstg->dstg_tasks, dst)) {
967 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
968 dsl_dataset_t *ds = dsda->ds;
969
970 /*
971 * Return the file system name that triggered the error
972 */
973 if (dst->dst_err) {
974 dsl_dataset_name(ds, failed);
975 }
976 ASSERT3P(dsda->rm_origin, ==, NULL);
977 dsl_dataset_disown(ds, dstg);
978 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
979 }
980
981 dsl_sync_task_group_destroy(dstg);
982 spa_close(spa, FTAG);
983 return (err);
984
985 }
986
987 static boolean_t
988 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
989 {
990 boolean_t might_destroy = B_FALSE;
991
992 mutex_enter(&ds->ds_lock);
993 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
994 DS_IS_DEFER_DESTROY(ds))
995 might_destroy = B_TRUE;
996 mutex_exit(&ds->ds_lock);
997
998 return (might_destroy);
999 }
1000
1001 /*
1002 * If we're removing a clone, and these three conditions are true:
1003 * 1) the clone's origin has no other children
1004 * 2) the clone's origin has no user references
1005 * 3) the clone's origin has been marked for deferred destruction
1006 * Then, prepare to remove the origin as part of this sync task group.
1007 */
1008 static int
1009 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1010 {
1011 dsl_dataset_t *ds = dsda->ds;
1012 dsl_dataset_t *origin = ds->ds_prev;
1013
1014 if (dsl_dataset_might_destroy_origin(origin)) {
1015 char *name;
1016 int namelen;
1017 int error;
1018
1019 namelen = dsl_dataset_namelen(origin) + 1;
1020 name = kmem_alloc(namelen, KM_SLEEP);
1021 dsl_dataset_name(origin, name);
1022 #ifdef _KERNEL
1023 error = zfs_unmount_snap(name, NULL);
1024 if (error) {
1025 kmem_free(name, namelen);
1026 return (error);
1027 }
1028 #endif
1029 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1030 kmem_free(name, namelen);
1031 if (error)
1032 return (error);
1033 dsda->rm_origin = origin;
1034 dsl_dataset_make_exclusive(origin, tag);
1035 }
1036
1037 return (0);
1038 }
1039
1040 /*
1041 * ds must be opened as OWNER. On return (whether successful or not),
1042 * ds will be closed and caller can no longer dereference it.
1043 */
1044 int
1045 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1046 {
1047 int err;
1048 dsl_sync_task_group_t *dstg;
1049 objset_t *os;
1050 dsl_dir_t *dd;
1051 uint64_t obj;
1052 struct dsl_ds_destroyarg dsda = { 0 };
1053 dsl_dataset_t *dummy_ds;
1054
1055 dsda.ds = ds;
1056
1057 if (dsl_dataset_is_snapshot(ds)) {
1058 /* Destroying a snapshot is simpler */
1059 dsl_dataset_make_exclusive(ds, tag);
1060
1061 dsda.defer = defer;
1062 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1063 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1064 &dsda, tag, 0);
1065 ASSERT3P(dsda.rm_origin, ==, NULL);
1066 goto out;
1067 } else if (defer) {
1068 err = EINVAL;
1069 goto out;
1070 }
1071
1072 dd = ds->ds_dir;
1073 dummy_ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
1074 dummy_ds->ds_dir = dd;
1075 dummy_ds->ds_object = ds->ds_object;
1076
1077 /*
1078 * Check for errors and mark this ds as inconsistent, in
1079 * case we crash while freeing the objects.
1080 */
1081 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1082 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1083 if (err)
1084 goto out_free;
1085
1086 err = dmu_objset_from_ds(ds, &os);
1087 if (err)
1088 goto out_free;
1089
1090 /*
1091 * remove the objects in open context, so that we won't
1092 * have too much to do in syncing context.
1093 */
1094 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1095 ds->ds_phys->ds_prev_snap_txg)) {
1096 /*
1097 * Ignore errors, if there is not enough disk space
1098 * we will deal with it in dsl_dataset_destroy_sync().
1099 */
1100 (void) dmu_free_object(os, obj);
1101 }
1102 if (err != ESRCH)
1103 goto out_free;
1104
1105 /*
1106 * Only the ZIL knows how to free log blocks.
1107 */
1108 zil_destroy(dmu_objset_zil(os), B_FALSE);
1109
1110 /*
1111 * Sync out all in-flight IO.
1112 */
1113 txg_wait_synced(dd->dd_pool, 0);
1114
1115 /*
1116 * If we managed to free all the objects in open
1117 * context, the user space accounting should be zero.
1118 */
1119 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1120 dmu_objset_userused_enabled(os)) {
1121 ASSERTV(uint64_t count);
1122 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1123 count == 0);
1124 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1125 count == 0);
1126 }
1127
1128 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1129 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1130 rw_exit(&dd->dd_pool->dp_config_rwlock);
1131
1132 if (err)
1133 goto out_free;
1134
1135 /*
1136 * Blow away the dsl_dir + head dataset.
1137 */
1138 dsl_dataset_make_exclusive(ds, tag);
1139 /*
1140 * If we're removing a clone, we might also need to remove its
1141 * origin.
1142 */
1143 do {
1144 dsda.need_prep = B_FALSE;
1145 if (dsl_dir_is_clone(dd)) {
1146 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1147 if (err) {
1148 dsl_dir_close(dd, FTAG);
1149 goto out_free;
1150 }
1151 }
1152
1153 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1154 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1155 dsl_dataset_destroy_sync, &dsda, tag, 0);
1156 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1157 dsl_dir_destroy_sync, dummy_ds, FTAG, 0);
1158 err = dsl_sync_task_group_wait(dstg);
1159 dsl_sync_task_group_destroy(dstg);
1160
1161 /*
1162 * We could be racing against 'zfs release' or 'zfs destroy -d'
1163 * on the origin snap, in which case we can get EBUSY if we
1164 * needed to destroy the origin snap but were not ready to
1165 * do so.
1166 */
1167 if (dsda.need_prep) {
1168 ASSERT(err == EBUSY);
1169 ASSERT(dsl_dir_is_clone(dd));
1170 ASSERT(dsda.rm_origin == NULL);
1171 }
1172 } while (dsda.need_prep);
1173
1174 if (dsda.rm_origin != NULL)
1175 dsl_dataset_disown(dsda.rm_origin, tag);
1176
1177 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1178 if (err)
1179 dsl_dir_close(dd, FTAG);
1180
1181 out_free:
1182 kmem_free(dummy_ds, sizeof (dsl_dataset_t));
1183 out:
1184 dsl_dataset_disown(ds, tag);
1185 return (err);
1186 }
1187
1188 blkptr_t *
1189 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1190 {
1191 return (&ds->ds_phys->ds_bp);
1192 }
1193
1194 void
1195 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1196 {
1197 ASSERT(dmu_tx_is_syncing(tx));
1198 /* If it's the meta-objset, set dp_meta_rootbp */
1199 if (ds == NULL) {
1200 tx->tx_pool->dp_meta_rootbp = *bp;
1201 } else {
1202 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1203 ds->ds_phys->ds_bp = *bp;
1204 }
1205 }
1206
1207 spa_t *
1208 dsl_dataset_get_spa(dsl_dataset_t *ds)
1209 {
1210 return (ds->ds_dir->dd_pool->dp_spa);
1211 }
1212
1213 void
1214 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1215 {
1216 dsl_pool_t *dp;
1217
1218 if (ds == NULL) /* this is the meta-objset */
1219 return;
1220
1221 ASSERT(ds->ds_objset != NULL);
1222
1223 if (ds->ds_phys->ds_next_snap_obj != 0)
1224 panic("dirtying snapshot!");
1225
1226 dp = ds->ds_dir->dd_pool;
1227
1228 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1229 /* up the hold count until we can be written out */
1230 dmu_buf_add_ref(ds->ds_dbuf, ds);
1231 }
1232 }
1233
1234 /*
1235 * The unique space in the head dataset can be calculated by subtracting
1236 * the space used in the most recent snapshot, that is still being used
1237 * in this file system, from the space currently in use. To figure out
1238 * the space in the most recent snapshot still in use, we need to take
1239 * the total space used in the snapshot and subtract out the space that
1240 * has been freed up since the snapshot was taken.
1241 */
1242 static void
1243 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1244 {
1245 uint64_t mrs_used;
1246 uint64_t dlused, dlcomp, dluncomp;
1247
1248 ASSERT(!dsl_dataset_is_snapshot(ds));
1249
1250 if (ds->ds_phys->ds_prev_snap_obj != 0)
1251 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1252 else
1253 mrs_used = 0;
1254
1255 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1256
1257 ASSERT3U(dlused, <=, mrs_used);
1258 ds->ds_phys->ds_unique_bytes =
1259 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1260
1261 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1262 SPA_VERSION_UNIQUE_ACCURATE)
1263 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1264 }
1265
1266 struct killarg {
1267 dsl_dataset_t *ds;
1268 dmu_tx_t *tx;
1269 };
1270
1271 /* ARGSUSED */
1272 static int
1273 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1274 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1275 {
1276 struct killarg *ka = arg;
1277 dmu_tx_t *tx = ka->tx;
1278
1279 if (bp == NULL)
1280 return (0);
1281
1282 if (zb->zb_level == ZB_ZIL_LEVEL) {
1283 ASSERT(zilog != NULL);
1284 /*
1285 * It's a block in the intent log. It has no
1286 * accounting, so just free it.
1287 */
1288 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1289 } else {
1290 ASSERT(zilog == NULL);
1291 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1292 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1293 }
1294
1295 return (0);
1296 }
1297
1298 /* ARGSUSED */
1299 static int
1300 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1301 {
1302 dsl_dataset_t *ds = arg1;
1303 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1304 uint64_t count;
1305 int err;
1306
1307 /*
1308 * Can't delete a head dataset if there are snapshots of it.
1309 * (Except if the only snapshots are from the branch we cloned
1310 * from.)
1311 */
1312 if (ds->ds_prev != NULL &&
1313 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1314 return (EBUSY);
1315
1316 /*
1317 * This is really a dsl_dir thing, but check it here so that
1318 * we'll be less likely to leave this dataset inconsistent &
1319 * nearly destroyed.
1320 */
1321 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1322 if (err)
1323 return (err);
1324 if (count != 0)
1325 return (EEXIST);
1326
1327 return (0);
1328 }
1329
1330 /* ARGSUSED */
1331 static void
1332 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1333 {
1334 dsl_dataset_t *ds = arg1;
1335 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1336
1337 /* Mark it as inconsistent on-disk, in case we crash */
1338 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1339 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1340
1341 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1342 "dataset = %llu", ds->ds_object);
1343 }
1344
1345 static int
1346 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1347 dmu_tx_t *tx)
1348 {
1349 dsl_dataset_t *ds = dsda->ds;
1350 dsl_dataset_t *ds_prev = ds->ds_prev;
1351
1352 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1353 struct dsl_ds_destroyarg ndsda = {0};
1354
1355 /*
1356 * If we're not prepared to remove the origin, don't remove
1357 * the clone either.
1358 */
1359 if (dsda->rm_origin == NULL) {
1360 dsda->need_prep = B_TRUE;
1361 return (EBUSY);
1362 }
1363
1364 ndsda.ds = ds_prev;
1365 ndsda.is_origin_rm = B_TRUE;
1366 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1367 }
1368
1369 /*
1370 * If we're not going to remove the origin after all,
1371 * undo the open context setup.
1372 */
1373 if (dsda->rm_origin != NULL) {
1374 dsl_dataset_disown(dsda->rm_origin, tag);
1375 dsda->rm_origin = NULL;
1376 }
1377
1378 return (0);
1379 }
1380
1381 /*
1382 * If you add new checks here, you may need to add
1383 * additional checks to the "temporary" case in
1384 * snapshot_check() in dmu_objset.c.
1385 */
1386 /* ARGSUSED */
1387 int
1388 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1389 {
1390 struct dsl_ds_destroyarg *dsda = arg1;
1391 dsl_dataset_t *ds = dsda->ds;
1392
1393 /* we have an owner hold, so noone else can destroy us */
1394 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1395
1396 /*
1397 * Only allow deferred destroy on pools that support it.
1398 * NOTE: deferred destroy is only supported on snapshots.
1399 */
1400 if (dsda->defer) {
1401 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1402 SPA_VERSION_USERREFS)
1403 return (ENOTSUP);
1404 ASSERT(dsl_dataset_is_snapshot(ds));
1405 return (0);
1406 }
1407
1408 /*
1409 * Can't delete a head dataset if there are snapshots of it.
1410 * (Except if the only snapshots are from the branch we cloned
1411 * from.)
1412 */
1413 if (ds->ds_prev != NULL &&
1414 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1415 return (EBUSY);
1416
1417 /*
1418 * If we made changes this txg, traverse_dsl_dataset won't find
1419 * them. Try again.
1420 */
1421 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1422 return (EAGAIN);
1423
1424 if (dsl_dataset_is_snapshot(ds)) {
1425 /*
1426 * If this snapshot has an elevated user reference count,
1427 * we can't destroy it yet.
1428 */
1429 if (ds->ds_userrefs > 0 && !dsda->releasing)
1430 return (EBUSY);
1431
1432 mutex_enter(&ds->ds_lock);
1433 /*
1434 * Can't delete a branch point. However, if we're destroying
1435 * a clone and removing its origin due to it having a user
1436 * hold count of 0 and having been marked for deferred destroy,
1437 * it's OK for the origin to have a single clone.
1438 */
1439 if (ds->ds_phys->ds_num_children >
1440 (dsda->is_origin_rm ? 2 : 1)) {
1441 mutex_exit(&ds->ds_lock);
1442 return (EEXIST);
1443 }
1444 mutex_exit(&ds->ds_lock);
1445 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1446 return (dsl_dataset_origin_check(dsda, arg2, tx));
1447 }
1448
1449 /* XXX we should do some i/o error checking... */
1450 return (0);
1451 }
1452
1453 struct refsarg {
1454 kmutex_t lock;
1455 boolean_t gone;
1456 kcondvar_t cv;
1457 };
1458
1459 /* ARGSUSED */
1460 static void
1461 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1462 {
1463 struct refsarg *arg = argv;
1464
1465 mutex_enter(&arg->lock);
1466 arg->gone = TRUE;
1467 cv_signal(&arg->cv);
1468 mutex_exit(&arg->lock);
1469 }
1470
1471 static void
1472 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1473 {
1474 struct refsarg arg;
1475
1476 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1477 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1478 arg.gone = FALSE;
1479 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1480 dsl_dataset_refs_gone);
1481 dmu_buf_rele(ds->ds_dbuf, tag);
1482 mutex_enter(&arg.lock);
1483 while (!arg.gone)
1484 cv_wait(&arg.cv, &arg.lock);
1485 ASSERT(arg.gone);
1486 mutex_exit(&arg.lock);
1487 ds->ds_dbuf = NULL;
1488 ds->ds_phys = NULL;
1489 mutex_destroy(&arg.lock);
1490 cv_destroy(&arg.cv);
1491 }
1492
1493 static void
1494 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1495 {
1496 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1497 int err;
1498 ASSERTV(uint64_t count);
1499
1500 ASSERT(ds->ds_phys->ds_num_children >= 2);
1501 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1502 /*
1503 * The err should not be ENOENT, but a bug in a previous version
1504 * of the code could cause upgrade_clones_cb() to not set
1505 * ds_next_snap_obj when it should, leading to a missing entry.
1506 * If we knew that the pool was created after
1507 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1508 * ENOENT. However, at least we can check that we don't have
1509 * too many entries in the next_clones_obj even after failing to
1510 * remove this one.
1511 */
1512 if (err != ENOENT) {
1513 VERIFY3U(err, ==, 0);
1514 }
1515 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1516 &count));
1517 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1518 }
1519
1520 static void
1521 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1522 {
1523 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1524 zap_cursor_t zc;
1525 zap_attribute_t za;
1526
1527 /*
1528 * If it is the old version, dd_clones doesn't exist so we can't
1529 * find the clones, but deadlist_remove_key() is a no-op so it
1530 * doesn't matter.
1531 */
1532 if (ds->ds_dir->dd_phys->dd_clones == 0)
1533 return;
1534
1535 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1536 zap_cursor_retrieve(&zc, &za) == 0;
1537 zap_cursor_advance(&zc)) {
1538 dsl_dataset_t *clone;
1539
1540 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1541 za.za_first_integer, FTAG, &clone));
1542 if (clone->ds_dir->dd_origin_txg > mintxg) {
1543 dsl_deadlist_remove_key(&clone->ds_deadlist,
1544 mintxg, tx);
1545 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1546 }
1547 dsl_dataset_rele(clone, FTAG);
1548 }
1549 zap_cursor_fini(&zc);
1550 }
1551
1552 struct process_old_arg {
1553 dsl_dataset_t *ds;
1554 dsl_dataset_t *ds_prev;
1555 boolean_t after_branch_point;
1556 zio_t *pio;
1557 uint64_t used, comp, uncomp;
1558 };
1559
1560 static int
1561 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1562 {
1563 struct process_old_arg *poa = arg;
1564 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1565
1566 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1567 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1568 if (poa->ds_prev && !poa->after_branch_point &&
1569 bp->blk_birth >
1570 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1571 poa->ds_prev->ds_phys->ds_unique_bytes +=
1572 bp_get_dsize_sync(dp->dp_spa, bp);
1573 }
1574 } else {
1575 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1576 poa->comp += BP_GET_PSIZE(bp);
1577 poa->uncomp += BP_GET_UCSIZE(bp);
1578 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1579 }
1580 return (0);
1581 }
1582
1583 static void
1584 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1585 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1586 {
1587 struct process_old_arg poa = { 0 };
1588 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1589 objset_t *mos = dp->dp_meta_objset;
1590
1591 ASSERT(ds->ds_deadlist.dl_oldfmt);
1592 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1593
1594 poa.ds = ds;
1595 poa.ds_prev = ds_prev;
1596 poa.after_branch_point = after_branch_point;
1597 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1598 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1599 process_old_cb, &poa, tx));
1600 VERIFY3U(zio_wait(poa.pio), ==, 0);
1601 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1602
1603 /* change snapused */
1604 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1605 -poa.used, -poa.comp, -poa.uncomp, tx);
1606
1607 /* swap next's deadlist to our deadlist */
1608 dsl_deadlist_close(&ds->ds_deadlist);
1609 dsl_deadlist_close(&ds_next->ds_deadlist);
1610 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1611 ds->ds_phys->ds_deadlist_obj);
1612 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1613 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1614 ds_next->ds_phys->ds_deadlist_obj);
1615 }
1616
1617 void
1618 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1619 {
1620 struct dsl_ds_destroyarg *dsda = arg1;
1621 dsl_dataset_t *ds = dsda->ds;
1622 int err;
1623 int after_branch_point = FALSE;
1624 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1625 objset_t *mos = dp->dp_meta_objset;
1626 dsl_dataset_t *ds_prev = NULL;
1627 boolean_t wont_destroy;
1628 uint64_t obj;
1629
1630 wont_destroy = (dsda->defer &&
1631 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1632
1633 ASSERT(ds->ds_owner || wont_destroy);
1634 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1635 ASSERT(ds->ds_prev == NULL ||
1636 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1637 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1638
1639 if (wont_destroy) {
1640 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1641 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1642 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1643 return;
1644 }
1645
1646 /* signal any waiters that this dataset is going away */
1647 mutex_enter(&ds->ds_lock);
1648 ds->ds_owner = dsl_reaper;
1649 cv_broadcast(&ds->ds_exclusive_cv);
1650 mutex_exit(&ds->ds_lock);
1651
1652 /* Remove our reservation */
1653 if (ds->ds_reserved != 0) {
1654 dsl_prop_setarg_t psa;
1655 uint64_t value = 0;
1656
1657 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1658 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1659 &value);
1660 psa.psa_effective_value = 0; /* predict default value */
1661
1662 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1663 ASSERT3U(ds->ds_reserved, ==, 0);
1664 }
1665
1666 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1667
1668 dsl_scan_ds_destroyed(ds, tx);
1669
1670 obj = ds->ds_object;
1671
1672 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1673 if (ds->ds_prev) {
1674 ds_prev = ds->ds_prev;
1675 } else {
1676 VERIFY(0 == dsl_dataset_hold_obj(dp,
1677 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1678 }
1679 after_branch_point =
1680 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1681
1682 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1683 if (after_branch_point &&
1684 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1685 remove_from_next_clones(ds_prev, obj, tx);
1686 if (ds->ds_phys->ds_next_snap_obj != 0) {
1687 VERIFY(0 == zap_add_int(mos,
1688 ds_prev->ds_phys->ds_next_clones_obj,
1689 ds->ds_phys->ds_next_snap_obj, tx));
1690 }
1691 }
1692 if (after_branch_point &&
1693 ds->ds_phys->ds_next_snap_obj == 0) {
1694 /* This clone is toast. */
1695 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1696 ds_prev->ds_phys->ds_num_children--;
1697
1698 /*
1699 * If the clone's origin has no other clones, no
1700 * user holds, and has been marked for deferred
1701 * deletion, then we should have done the necessary
1702 * destroy setup for it.
1703 */
1704 if (ds_prev->ds_phys->ds_num_children == 1 &&
1705 ds_prev->ds_userrefs == 0 &&
1706 DS_IS_DEFER_DESTROY(ds_prev)) {
1707 ASSERT3P(dsda->rm_origin, !=, NULL);
1708 } else {
1709 ASSERT3P(dsda->rm_origin, ==, NULL);
1710 }
1711 } else if (!after_branch_point) {
1712 ds_prev->ds_phys->ds_next_snap_obj =
1713 ds->ds_phys->ds_next_snap_obj;
1714 }
1715 }
1716
1717 if (dsl_dataset_is_snapshot(ds)) {
1718 dsl_dataset_t *ds_next;
1719 uint64_t old_unique;
1720 uint64_t used = 0, comp = 0, uncomp = 0;
1721
1722 VERIFY(0 == dsl_dataset_hold_obj(dp,
1723 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1724 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1725
1726 old_unique = ds_next->ds_phys->ds_unique_bytes;
1727
1728 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1729 ds_next->ds_phys->ds_prev_snap_obj =
1730 ds->ds_phys->ds_prev_snap_obj;
1731 ds_next->ds_phys->ds_prev_snap_txg =
1732 ds->ds_phys->ds_prev_snap_txg;
1733 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1734 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1735
1736
1737 if (ds_next->ds_deadlist.dl_oldfmt) {
1738 process_old_deadlist(ds, ds_prev, ds_next,
1739 after_branch_point, tx);
1740 } else {
1741 /* Adjust prev's unique space. */
1742 if (ds_prev && !after_branch_point) {
1743 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1744 ds_prev->ds_phys->ds_prev_snap_txg,
1745 ds->ds_phys->ds_prev_snap_txg,
1746 &used, &comp, &uncomp);
1747 ds_prev->ds_phys->ds_unique_bytes += used;
1748 }
1749
1750 /* Adjust snapused. */
1751 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1752 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1753 &used, &comp, &uncomp);
1754 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1755 -used, -comp, -uncomp, tx);
1756
1757 /* Move blocks to be freed to pool's free list. */
1758 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1759 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1760 tx);
1761 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1762 DD_USED_HEAD, used, comp, uncomp, tx);
1763 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1764
1765 /* Merge our deadlist into next's and free it. */
1766 dsl_deadlist_merge(&ds_next->ds_deadlist,
1767 ds->ds_phys->ds_deadlist_obj, tx);
1768 }
1769 dsl_deadlist_close(&ds->ds_deadlist);
1770 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1771
1772 /* Collapse range in clone heads */
1773 dsl_dataset_remove_clones_key(ds,
1774 ds->ds_phys->ds_creation_txg, tx);
1775
1776 if (dsl_dataset_is_snapshot(ds_next)) {
1777 dsl_dataset_t *ds_nextnext;
1778 dsl_dataset_t *hds;
1779
1780 /*
1781 * Update next's unique to include blocks which
1782 * were previously shared by only this snapshot
1783 * and it. Those blocks will be born after the
1784 * prev snap and before this snap, and will have
1785 * died after the next snap and before the one
1786 * after that (ie. be on the snap after next's
1787 * deadlist).
1788 */
1789 VERIFY(0 == dsl_dataset_hold_obj(dp,
1790 ds_next->ds_phys->ds_next_snap_obj,
1791 FTAG, &ds_nextnext));
1792 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1793 ds->ds_phys->ds_prev_snap_txg,
1794 ds->ds_phys->ds_creation_txg,
1795 &used, &comp, &uncomp);
1796 ds_next->ds_phys->ds_unique_bytes += used;
1797 dsl_dataset_rele(ds_nextnext, FTAG);
1798 ASSERT3P(ds_next->ds_prev, ==, NULL);
1799
1800 /* Collapse range in this head. */
1801 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1802 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1803 FTAG, &hds));
1804 dsl_deadlist_remove_key(&hds->ds_deadlist,
1805 ds->ds_phys->ds_creation_txg, tx);
1806 dsl_dataset_rele(hds, FTAG);
1807
1808 } else {
1809 ASSERT3P(ds_next->ds_prev, ==, ds);
1810 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1811 ds_next->ds_prev = NULL;
1812 if (ds_prev) {
1813 VERIFY(0 == dsl_dataset_get_ref(dp,
1814 ds->ds_phys->ds_prev_snap_obj,
1815 ds_next, &ds_next->ds_prev));
1816 }
1817
1818 dsl_dataset_recalc_head_uniq(ds_next);
1819
1820 /*
1821 * Reduce the amount of our unconsmed refreservation
1822 * being charged to our parent by the amount of
1823 * new unique data we have gained.
1824 */
1825 if (old_unique < ds_next->ds_reserved) {
1826 int64_t mrsdelta;
1827 uint64_t new_unique =
1828 ds_next->ds_phys->ds_unique_bytes;
1829
1830 ASSERT(old_unique <= new_unique);
1831 mrsdelta = MIN(new_unique - old_unique,
1832 ds_next->ds_reserved - old_unique);
1833 dsl_dir_diduse_space(ds->ds_dir,
1834 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1835 }
1836 }
1837 dsl_dataset_rele(ds_next, FTAG);
1838 } else {
1839 /*
1840 * There's no next snapshot, so this is a head dataset.
1841 * Destroy the deadlist. Unless it's a clone, the
1842 * deadlist should be empty. (If it's a clone, it's
1843 * safe to ignore the deadlist contents.)
1844 */
1845 struct killarg ka;
1846
1847 dsl_deadlist_close(&ds->ds_deadlist);
1848 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1849 ds->ds_phys->ds_deadlist_obj = 0;
1850
1851 /*
1852 * Free everything that we point to (that's born after
1853 * the previous snapshot, if we are a clone)
1854 *
1855 * NB: this should be very quick, because we already
1856 * freed all the objects in open context.
1857 */
1858 ka.ds = ds;
1859 ka.tx = tx;
1860 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1861 TRAVERSE_POST, kill_blkptr, &ka);
1862 ASSERT3U(err, ==, 0);
1863 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1864 ds->ds_phys->ds_unique_bytes == 0);
1865
1866 if (ds->ds_prev != NULL) {
1867 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1868 VERIFY3U(0, ==, zap_remove_int(mos,
1869 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1870 ds->ds_object, tx));
1871 }
1872 dsl_dataset_rele(ds->ds_prev, ds);
1873 ds->ds_prev = ds_prev = NULL;
1874 }
1875 }
1876
1877 /*
1878 * This must be done after the dsl_traverse(), because it will
1879 * re-open the objset.
1880 */
1881 if (ds->ds_objset) {
1882 dmu_objset_evict(ds->ds_objset);
1883 ds->ds_objset = NULL;
1884 }
1885
1886 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1887 /* Erase the link in the dir */
1888 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1889 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1890 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1891 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1892 ASSERT(err == 0);
1893 } else {
1894 /* remove from snapshot namespace */
1895 dsl_dataset_t *ds_head;
1896 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1897 VERIFY(0 == dsl_dataset_hold_obj(dp,
1898 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1899 VERIFY(0 == dsl_dataset_get_snapname(ds));
1900 #ifdef ZFS_DEBUG
1901 {
1902 uint64_t val;
1903
1904 err = dsl_dataset_snap_lookup(ds_head,
1905 ds->ds_snapname, &val);
1906 ASSERT3U(err, ==, 0);
1907 ASSERT3U(val, ==, obj);
1908 }
1909 #endif
1910 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1911 ASSERT(err == 0);
1912 dsl_dataset_rele(ds_head, FTAG);
1913 }
1914
1915 if (ds_prev && ds->ds_prev != ds_prev)
1916 dsl_dataset_rele(ds_prev, FTAG);
1917
1918 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1919 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1920 "dataset = %llu", ds->ds_object);
1921
1922 if (ds->ds_phys->ds_next_clones_obj != 0) {
1923 ASSERTV(uint64_t count);
1924 ASSERT(0 == zap_count(mos,
1925 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1926 VERIFY(0 == dmu_object_free(mos,
1927 ds->ds_phys->ds_next_clones_obj, tx));
1928 }
1929 if (ds->ds_phys->ds_props_obj != 0)
1930 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1931 if (ds->ds_phys->ds_userrefs_obj != 0)
1932 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1933 dsl_dir_close(ds->ds_dir, ds);
1934 ds->ds_dir = NULL;
1935 dsl_dataset_drain_refs(ds, tag);
1936 VERIFY(0 == dmu_object_free(mos, obj, tx));
1937
1938 if (dsda->rm_origin) {
1939 /*
1940 * Remove the origin of the clone we just destroyed.
1941 */
1942 struct dsl_ds_destroyarg ndsda = {0};
1943
1944 ndsda.ds = dsda->rm_origin;
1945 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1946 }
1947 }
1948
1949 static int
1950 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1951 {
1952 uint64_t asize;
1953
1954 if (!dmu_tx_is_syncing(tx))
1955 return (0);
1956
1957 /*
1958 * If there's an fs-only reservation, any blocks that might become
1959 * owned by the snapshot dataset must be accommodated by space
1960 * outside of the reservation.
1961 */
1962 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1963 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1964 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1965 return (ENOSPC);
1966
1967 /*
1968 * Propogate any reserved space for this snapshot to other
1969 * snapshot checks in this sync group.
1970 */
1971 if (asize > 0)
1972 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1973
1974 return (0);
1975 }
1976
1977 int
1978 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1979 {
1980 dsl_dataset_t *ds = arg1;
1981 const char *snapname = arg2;
1982 int err;
1983 uint64_t value;
1984
1985 /*
1986 * We don't allow multiple snapshots of the same txg. If there
1987 * is already one, try again.
1988 */
1989 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1990 return (EAGAIN);
1991
1992 /*
1993 * Check for conflicting name snapshot name.
1994 */
1995 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1996 if (err == 0)
1997 return (EEXIST);
1998 if (err != ENOENT)
1999 return (err);
2000
2001 /*
2002 * Check that the dataset's name is not too long. Name consists
2003 * of the dataset's length + 1 for the @-sign + snapshot name's length
2004 */
2005 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2006 return (ENAMETOOLONG);
2007
2008 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2009 if (err)
2010 return (err);
2011
2012 ds->ds_trysnap_txg = tx->tx_txg;
2013 return (0);
2014 }
2015
2016 void
2017 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2018 {
2019 dsl_dataset_t *ds = arg1;
2020 const char *snapname = arg2;
2021 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2022 dmu_buf_t *dbuf;
2023 dsl_dataset_phys_t *dsphys;
2024 uint64_t dsobj, crtxg;
2025 objset_t *mos = dp->dp_meta_objset;
2026 int err;
2027
2028 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2029
2030 /*
2031 * The origin's ds_creation_txg has to be < TXG_INITIAL
2032 */
2033 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2034 crtxg = 1;
2035 else
2036 crtxg = tx->tx_txg;
2037
2038 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2039 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2040 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2041 dmu_buf_will_dirty(dbuf, tx);
2042 dsphys = dbuf->db_data;
2043 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2044 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2045 dsphys->ds_fsid_guid = unique_create();
2046 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2047 sizeof (dsphys->ds_guid));
2048 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2049 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2050 dsphys->ds_next_snap_obj = ds->ds_object;
2051 dsphys->ds_num_children = 1;
2052 dsphys->ds_creation_time = gethrestime_sec();
2053 dsphys->ds_creation_txg = crtxg;
2054 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2055 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2056 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2057 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2058 dsphys->ds_flags = ds->ds_phys->ds_flags;
2059 dsphys->ds_bp = ds->ds_phys->ds_bp;
2060 dmu_buf_rele(dbuf, FTAG);
2061
2062 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2063 if (ds->ds_prev) {
2064 uint64_t next_clones_obj =
2065 ds->ds_prev->ds_phys->ds_next_clones_obj;
2066 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2067 ds->ds_object ||
2068 ds->ds_prev->ds_phys->ds_num_children > 1);
2069 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2070 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2071 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2072 ds->ds_prev->ds_phys->ds_creation_txg);
2073 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2074 } else if (next_clones_obj != 0) {
2075 remove_from_next_clones(ds->ds_prev,
2076 dsphys->ds_next_snap_obj, tx);
2077 VERIFY3U(0, ==, zap_add_int(mos,
2078 next_clones_obj, dsobj, tx));
2079 }
2080 }
2081
2082 /*
2083 * If we have a reference-reservation on this dataset, we will
2084 * need to increase the amount of refreservation being charged
2085 * since our unique space is going to zero.
2086 */
2087 if (ds->ds_reserved) {
2088 int64_t delta;
2089 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2090 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2091 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2092 delta, 0, 0, tx);
2093 }
2094
2095 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2096 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2097 ds->ds_dir->dd_myname, snapname, dsobj,
2098 ds->ds_phys->ds_prev_snap_txg);
2099 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2100 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2101 dsl_deadlist_close(&ds->ds_deadlist);
2102 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2103 dsl_deadlist_add_key(&ds->ds_deadlist,
2104 ds->ds_phys->ds_prev_snap_txg, tx);
2105
2106 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2107 ds->ds_phys->ds_prev_snap_obj = dsobj;
2108 ds->ds_phys->ds_prev_snap_txg = crtxg;
2109 ds->ds_phys->ds_unique_bytes = 0;
2110 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2111 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2112
2113 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2114 snapname, 8, 1, &dsobj, tx);
2115 ASSERT(err == 0);
2116
2117 if (ds->ds_prev)
2118 dsl_dataset_drop_ref(ds->ds_prev, ds);
2119 VERIFY(0 == dsl_dataset_get_ref(dp,
2120 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2121
2122 dsl_scan_ds_snapshotted(ds, tx);
2123
2124 dsl_dir_snap_cmtime_update(ds->ds_dir);
2125
2126 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2127 "dataset = %llu", dsobj);
2128 }
2129
2130 void
2131 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2132 {
2133 ASSERT(dmu_tx_is_syncing(tx));
2134 ASSERT(ds->ds_objset != NULL);
2135 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2136
2137 /*
2138 * in case we had to change ds_fsid_guid when we opened it,
2139 * sync it out now.
2140 */
2141 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2142 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2143
2144 dsl_dir_dirty(ds->ds_dir, tx);
2145 dmu_objset_sync(ds->ds_objset, zio, tx);
2146 }
2147
2148 static void
2149 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2150 {
2151 uint64_t count = 0;
2152 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2153 zap_cursor_t zc;
2154 zap_attribute_t za;
2155 nvlist_t *propval;
2156 nvlist_t *val;
2157
2158 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2159 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2160 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2161
2162 /*
2163 * There may me missing entries in ds_next_clones_obj
2164 * due to a bug in a previous version of the code.
2165 * Only trust it if it has the right number of entries.
2166 */
2167 if (ds->ds_phys->ds_next_clones_obj != 0) {
2168 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2169 &count));
2170 }
2171 if (count != ds->ds_phys->ds_num_children - 1) {
2172 goto fail;
2173 }
2174 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2175 zap_cursor_retrieve(&zc, &za) == 0;
2176 zap_cursor_advance(&zc)) {
2177 dsl_dataset_t *clone;
2178 char buf[ZFS_MAXNAMELEN];
2179 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2180 za.za_first_integer, FTAG, &clone) != 0) {
2181 goto fail;
2182 }
2183 dsl_dir_name(clone->ds_dir, buf);
2184 VERIFY(nvlist_add_boolean(val, buf) == 0);
2185 dsl_dataset_rele(clone, FTAG);
2186 }
2187 zap_cursor_fini(&zc);
2188 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2189 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2190 propval) == 0);
2191 fail:
2192 nvlist_free(val);
2193 nvlist_free(propval);
2194 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2195 }
2196
2197 void
2198 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2199 {
2200 uint64_t refd, avail, uobjs, aobjs, ratio;
2201
2202 dsl_dir_stats(ds->ds_dir, nv);
2203
2204 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2205 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2206 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2207
2208 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2209 ds->ds_phys->ds_creation_time);
2210 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2211 ds->ds_phys->ds_creation_txg);
2212 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2213 ds->ds_quota);
2214 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2215 ds->ds_reserved);
2216 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2217 ds->ds_phys->ds_guid);
2218 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2219 ds->ds_phys->ds_unique_bytes);
2220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2221 ds->ds_object);
2222 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2223 ds->ds_userrefs);
2224 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2225 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2226
2227 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2228 uint64_t written, comp, uncomp;
2229 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2230 dsl_dataset_t *prev;
2231 int err;
2232
2233 rw_enter(&dp->dp_config_rwlock, RW_READER);
2234 err = dsl_dataset_hold_obj(dp,
2235 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2236 rw_exit(&dp->dp_config_rwlock);
2237 if (err == 0) {
2238 err = dsl_dataset_space_written(prev, ds, &written,
2239 &comp, &uncomp);
2240 dsl_dataset_rele(prev, FTAG);
2241 if (err == 0) {
2242 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2243 written);
2244 }
2245 }
2246 }
2247
2248 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2249 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2250 ds->ds_phys->ds_compressed_bytes);
2251 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2252
2253 if (ds->ds_phys->ds_next_snap_obj) {
2254 /*
2255 * This is a snapshot; override the dd's space used with
2256 * our unique space and compression ratio.
2257 */
2258 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2259 ds->ds_phys->ds_unique_bytes);
2260 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2261
2262 get_clones_stat(ds, nv);
2263 }
2264 }
2265
2266 void
2267 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2268 {
2269 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2270 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2271 stat->dds_guid = ds->ds_phys->ds_guid;
2272 if (ds->ds_phys->ds_next_snap_obj) {
2273 stat->dds_is_snapshot = B_TRUE;
2274 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2275 } else {
2276 stat->dds_is_snapshot = B_FALSE;
2277 stat->dds_num_clones = 0;
2278 }
2279
2280 /* clone origin is really a dsl_dir thing... */
2281 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2282 if (dsl_dir_is_clone(ds->ds_dir)) {
2283 dsl_dataset_t *ods;
2284
2285 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2286 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2287 dsl_dataset_name(ods, stat->dds_origin);
2288 dsl_dataset_drop_ref(ods, FTAG);
2289 } else {
2290 stat->dds_origin[0] = '\0';
2291 }
2292 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2293 }
2294
2295 uint64_t
2296 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2297 {
2298 return (ds->ds_fsid_guid);
2299 }
2300
2301 void
2302 dsl_dataset_space(dsl_dataset_t *ds,
2303 uint64_t *refdbytesp, uint64_t *availbytesp,
2304 uint64_t *usedobjsp, uint64_t *availobjsp)
2305 {
2306 *refdbytesp = ds->ds_phys->ds_used_bytes;
2307 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2308 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2309 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2310 if (ds->ds_quota != 0) {
2311 /*
2312 * Adjust available bytes according to refquota
2313 */
2314 if (*refdbytesp < ds->ds_quota)
2315 *availbytesp = MIN(*availbytesp,
2316 ds->ds_quota - *refdbytesp);
2317 else
2318 *availbytesp = 0;
2319 }
2320 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2321 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2322 }
2323
2324 boolean_t
2325 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2326 {
2327 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2328
2329 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2330 dsl_pool_sync_context(dp));
2331 if (ds->ds_prev == NULL)
2332 return (B_FALSE);
2333 if (ds->ds_phys->ds_bp.blk_birth >
2334 ds->ds_prev->ds_phys->ds_creation_txg) {
2335 objset_t *os, *os_prev;
2336 /*
2337 * It may be that only the ZIL differs, because it was
2338 * reset in the head. Don't count that as being
2339 * modified.
2340 */
2341 if (dmu_objset_from_ds(ds, &os) != 0)
2342 return (B_TRUE);
2343 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2344 return (B_TRUE);
2345 return (bcmp(&os->os_phys->os_meta_dnode,
2346 &os_prev->os_phys->os_meta_dnode,
2347 sizeof (os->os_phys->os_meta_dnode)) != 0);
2348 }
2349 return (B_FALSE);
2350 }
2351
2352 /* ARGSUSED */
2353 static int
2354 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2355 {
2356 dsl_dataset_t *ds = arg1;
2357 char *newsnapname = arg2;
2358 dsl_dir_t *dd = ds->ds_dir;
2359 dsl_dataset_t *hds;
2360 uint64_t val;
2361 int err;
2362
2363 err = dsl_dataset_hold_obj(dd->dd_pool,
2364 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2365 if (err)
2366 return (err);
2367
2368 /* new name better not be in use */
2369 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2370 dsl_dataset_rele(hds, FTAG);
2371
2372 if (err == 0)
2373 err = EEXIST;
2374 else if (err == ENOENT)
2375 err = 0;
2376
2377 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2378 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2379 err = ENAMETOOLONG;
2380
2381 return (err);
2382 }
2383
2384 static void
2385 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2386 {
2387 dsl_dataset_t *ds = arg1;
2388 const char *newsnapname = arg2;
2389 dsl_dir_t *dd = ds->ds_dir;
2390 objset_t *mos = dd->dd_pool->dp_meta_objset;
2391 dsl_dataset_t *hds;
2392 int err;
2393
2394 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2395
2396 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2397 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2398
2399 VERIFY(0 == dsl_dataset_get_snapname(ds));
2400 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2401 ASSERT3U(err, ==, 0);
2402 mutex_enter(&ds->ds_lock);
2403 (void) strcpy(ds->ds_snapname, newsnapname);
2404 mutex_exit(&ds->ds_lock);
2405 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2406 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2407 ASSERT3U(err, ==, 0);
2408
2409 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2410 "dataset = %llu", ds->ds_object);
2411 dsl_dataset_rele(hds, FTAG);
2412 }
2413
2414 struct renamesnaparg {
2415 dsl_sync_task_group_t *dstg;
2416 char failed[MAXPATHLEN];
2417 char *oldsnap;
2418 char *newsnap;
2419 };
2420
2421 static int
2422 dsl_snapshot_rename_one(const char *name, void *arg)
2423 {
2424 struct renamesnaparg *ra = arg;
2425 dsl_dataset_t *ds = NULL;
2426 char *snapname;
2427 int err;
2428
2429 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2430 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2431
2432 /*
2433 * For recursive snapshot renames the parent won't be changing
2434 * so we just pass name for both the to/from argument.
2435 */
2436 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2437 if (err != 0) {
2438 strfree(snapname);
2439 return (err == ENOENT ? 0 : err);
2440 }
2441
2442 #ifdef _KERNEL
2443 /*
2444 * For all filesystems undergoing rename, we'll need to unmount it.
2445 */
2446 (void) zfs_unmount_snap(snapname, NULL);
2447 #endif
2448 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2449 strfree(snapname);
2450 if (err != 0)
2451 return (err == ENOENT ? 0 : err);
2452
2453 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2454 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2455
2456 return (0);
2457 }
2458
2459 static int
2460 dsl_recursive_rename(char *oldname, const char *newname)
2461 {
2462 int err;
2463 struct renamesnaparg *ra;
2464 dsl_sync_task_t *dst;
2465 spa_t *spa;
2466 char *cp, *fsname = spa_strdup(oldname);
2467 int len = strlen(oldname) + 1;
2468
2469 /* truncate the snapshot name to get the fsname */
2470 cp = strchr(fsname, '@');
2471 *cp = '\0';
2472
2473 err = spa_open(fsname, &spa, FTAG);
2474 if (err) {
2475 kmem_free(fsname, len);
2476 return (err);
2477 }
2478 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2479 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2480
2481 ra->oldsnap = strchr(oldname, '@') + 1;
2482 ra->newsnap = strchr(newname, '@') + 1;
2483 *ra->failed = '\0';
2484
2485 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2486 DS_FIND_CHILDREN);
2487 kmem_free(fsname, len);
2488
2489 if (err == 0) {
2490 err = dsl_sync_task_group_wait(ra->dstg);
2491 }
2492
2493 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2494 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2495 dsl_dataset_t *ds = dst->dst_arg1;
2496 if (dst->dst_err) {
2497 dsl_dir_name(ds->ds_dir, ra->failed);
2498 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2499 (void) strlcat(ra->failed, ra->newsnap,
2500 sizeof (ra->failed));
2501 }
2502 dsl_dataset_rele(ds, ra->dstg);
2503 }
2504
2505 if (err)
2506 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2507
2508 dsl_sync_task_group_destroy(ra->dstg);
2509 kmem_free(ra, sizeof (struct renamesnaparg));
2510 spa_close(spa, FTAG);
2511 return (err);
2512 }
2513
2514 static int
2515 dsl_valid_rename(const char *oldname, void *arg)
2516 {
2517 int delta = *(int *)arg;
2518
2519 if (strlen(oldname) + delta >= MAXNAMELEN)
2520 return (ENAMETOOLONG);
2521
2522 return (0);
2523 }
2524
2525 #pragma weak dmu_objset_rename = dsl_dataset_rename
2526 int
2527 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2528 {
2529 dsl_dir_t *dd;
2530 dsl_dataset_t *ds;
2531 const char *tail;
2532 int err;
2533
2534 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2535 if (err)
2536 return (err);
2537
2538 if (tail == NULL) {
2539 int delta = strlen(newname) - strlen(oldname);
2540
2541 /* if we're growing, validate child name lengths */
2542 if (delta > 0)
2543 err = dmu_objset_find(oldname, dsl_valid_rename,
2544 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2545
2546 if (err == 0)
2547 err = dsl_dir_rename(dd, newname);
2548 dsl_dir_close(dd, FTAG);
2549 return (err);
2550 }
2551
2552 if (tail[0] != '@') {
2553 /* the name ended in a nonexistent component */
2554 dsl_dir_close(dd, FTAG);
2555 return (ENOENT);
2556 }
2557
2558 dsl_dir_close(dd, FTAG);
2559
2560 /* new name must be snapshot in same filesystem */
2561 tail = strchr(newname, '@');
2562 if (tail == NULL)
2563 return (EINVAL);
2564 tail++;
2565 if (strncmp(oldname, newname, tail - newname) != 0)
2566 return (EXDEV);
2567
2568 if (recursive) {
2569 err = dsl_recursive_rename(oldname, newname);
2570 } else {
2571 err = dsl_dataset_hold(oldname, FTAG, &ds);
2572 if (err)
2573 return (err);
2574
2575 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2576 dsl_dataset_snapshot_rename_check,
2577 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2578
2579 dsl_dataset_rele(ds, FTAG);
2580 }
2581
2582 return (err);
2583 }
2584
2585 struct promotenode {
2586 list_node_t link;
2587 dsl_dataset_t *ds;
2588 };
2589
2590 struct promotearg {
2591 list_t shared_snaps, origin_snaps, clone_snaps;
2592 dsl_dataset_t *origin_origin;
2593 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2594 char *err_ds;
2595 };
2596
2597 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2598
2599 static int
2600 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2601 {
2602 dsl_dataset_t *hds = arg1;
2603 struct promotearg *pa = arg2;
2604 struct promotenode *snap = list_head(&pa->shared_snaps);
2605 dsl_dataset_t *origin_ds = snap->ds;
2606 int err;
2607 uint64_t unused;
2608
2609 /* Check that it is a real clone */
2610 if (!dsl_dir_is_clone(hds->ds_dir))
2611 return (EINVAL);
2612
2613 /* Since this is so expensive, don't do the preliminary check */
2614 if (!dmu_tx_is_syncing(tx))
2615 return (0);
2616
2617 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2618 return (EXDEV);
2619
2620 /* compute origin's new unique space */
2621 snap = list_tail(&pa->clone_snaps);
2622 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2623 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2624 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2625 &pa->unique, &unused, &unused);
2626
2627 /*
2628 * Walk the snapshots that we are moving
2629 *
2630 * Compute space to transfer. Consider the incremental changes
2631 * to used for each snapshot:
2632 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2633 * So each snapshot gave birth to:
2634 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2635 * So a sequence would look like:
2636 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2637 * Which simplifies to:
2638 * uN + kN + kN-1 + ... + k1 + k0
2639 * Note however, if we stop before we reach the ORIGIN we get:
2640 * uN + kN + kN-1 + ... + kM - uM-1
2641 */
2642 pa->used = origin_ds->ds_phys->ds_used_bytes;
2643 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2644 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2645 for (snap = list_head(&pa->shared_snaps); snap;
2646 snap = list_next(&pa->shared_snaps, snap)) {
2647 uint64_t val, dlused, dlcomp, dluncomp;
2648 dsl_dataset_t *ds = snap->ds;
2649
2650 /* Check that the snapshot name does not conflict */
2651 VERIFY(0 == dsl_dataset_get_snapname(ds));
2652 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2653 if (err == 0) {
2654 err = EEXIST;
2655 goto out;
2656 }
2657 if (err != ENOENT)
2658 goto out;
2659
2660 /* The very first snapshot does not have a deadlist */
2661 if (ds->ds_phys->ds_prev_snap_obj == 0)
2662 continue;
2663
2664 dsl_deadlist_space(&ds->ds_deadlist,
2665 &dlused, &dlcomp, &dluncomp);
2666 pa->used += dlused;
2667 pa->comp += dlcomp;
2668 pa->uncomp += dluncomp;
2669 }
2670
2671 /*
2672 * If we are a clone of a clone then we never reached ORIGIN,
2673 * so we need to subtract out the clone origin's used space.
2674 */
2675 if (pa->origin_origin) {
2676 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2677 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2678 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2679 }
2680
2681 /* Check that there is enough space here */
2682 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2683 pa->used);
2684 if (err)
2685 return (err);
2686
2687 /*
2688 * Compute the amounts of space that will be used by snapshots
2689 * after the promotion (for both origin and clone). For each,
2690 * it is the amount of space that will be on all of their
2691 * deadlists (that was not born before their new origin).
2692 */
2693 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2694 uint64_t space;
2695
2696 /*
2697 * Note, typically this will not be a clone of a clone,
2698 * so dd_origin_txg will be < TXG_INITIAL, so
2699 * these snaplist_space() -> dsl_deadlist_space_range()
2700 * calls will be fast because they do not have to
2701 * iterate over all bps.
2702 */
2703 snap = list_head(&pa->origin_snaps);
2704 err = snaplist_space(&pa->shared_snaps,
2705 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2706 if (err)
2707 return (err);
2708
2709 err = snaplist_space(&pa->clone_snaps,
2710 snap->ds->ds_dir->dd_origin_txg, &space);
2711 if (err)
2712 return (err);
2713 pa->cloneusedsnap += space;
2714 }
2715 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2716 err = snaplist_space(&pa->origin_snaps,
2717 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2718 if (err)
2719 return (err);
2720 }
2721
2722 return (0);
2723 out:
2724 pa->err_ds = snap->ds->ds_snapname;
2725 return (err);
2726 }
2727
2728 static void
2729 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2730 {
2731 dsl_dataset_t *hds = arg1;
2732 struct promotearg *pa = arg2;
2733 struct promotenode *snap = list_head(&pa->shared_snaps);
2734 dsl_dataset_t *origin_ds = snap->ds;
2735 dsl_dataset_t *origin_head;
2736 dsl_dir_t *dd = hds->ds_dir;
2737 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2738 dsl_dir_t *odd = NULL;
2739 uint64_t oldnext_obj;
2740 int64_t delta;
2741
2742 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2743
2744 snap = list_head(&pa->origin_snaps);
2745 origin_head = snap->ds;
2746
2747 /*
2748 * We need to explicitly open odd, since origin_ds's dd will be
2749 * changing.
2750 */
2751 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2752 NULL, FTAG, &odd));
2753
2754 /* change origin's next snap */
2755 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2756 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2757 snap = list_tail(&pa->clone_snaps);
2758 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2759 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2760
2761 /* change the origin's next clone */
2762 if (origin_ds->ds_phys->ds_next_clones_obj) {
2763 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2764 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2765 origin_ds->ds_phys->ds_next_clones_obj,
2766 oldnext_obj, tx));
2767 }
2768
2769 /* change origin */
2770 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2771 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2772 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2773 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2774 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2775 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2776 origin_head->ds_dir->dd_origin_txg =
2777 origin_ds->ds_phys->ds_creation_txg;
2778
2779 /* change dd_clone entries */
2780 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2781 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2782 odd->dd_phys->dd_clones, hds->ds_object, tx));
2783 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2784 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2785 hds->ds_object, tx));
2786
2787 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2788 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2789 origin_head->ds_object, tx));
2790 if (dd->dd_phys->dd_clones == 0) {
2791 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2792 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2793 }
2794 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2795 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2796
2797 }
2798
2799 /* move snapshots to this dir */
2800 for (snap = list_head(&pa->shared_snaps); snap;
2801 snap = list_next(&pa->shared_snaps, snap)) {
2802 dsl_dataset_t *ds = snap->ds;
2803
2804 /* unregister props as dsl_dir is changing */
2805 if (ds->ds_objset) {
2806 dmu_objset_evict(ds->ds_objset);
2807 ds->ds_objset = NULL;
2808 }
2809 /* move snap name entry */
2810 VERIFY(0 == dsl_dataset_get_snapname(ds));
2811 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2812 ds->ds_snapname, tx));
2813 VERIFY(0 == zap_add(dp->dp_meta_objset,
2814 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2815 8, 1, &ds->ds_object, tx));
2816
2817 /* change containing dsl_dir */
2818 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2819 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2820 ds->ds_phys->ds_dir_obj = dd->dd_object;
2821 ASSERT3P(ds->ds_dir, ==, odd);
2822 dsl_dir_close(ds->ds_dir, ds);
2823 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2824 NULL, ds, &ds->ds_dir));
2825
2826 /* move any clone references */
2827 if (ds->ds_phys->ds_next_clones_obj &&
2828 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2829 zap_cursor_t zc;
2830 zap_attribute_t za;
2831
2832 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2833 ds->ds_phys->ds_next_clones_obj);
2834 zap_cursor_retrieve(&zc, &za) == 0;
2835 zap_cursor_advance(&zc)) {
2836 dsl_dataset_t *cnds;
2837 uint64_t o;
2838
2839 if (za.za_first_integer == oldnext_obj) {
2840 /*
2841 * We've already moved the
2842 * origin's reference.
2843 */
2844 continue;
2845 }
2846
2847 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2848 za.za_first_integer, FTAG, &cnds));
2849 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2850
2851 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2852 odd->dd_phys->dd_clones, o, tx), ==, 0);
2853 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2854 dd->dd_phys->dd_clones, o, tx), ==, 0);
2855 dsl_dataset_rele(cnds, FTAG);
2856 }
2857 zap_cursor_fini(&zc);
2858 }
2859
2860 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2861 }
2862
2863 /*
2864 * Change space accounting.
2865 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2866 * both be valid, or both be 0 (resulting in delta == 0). This
2867 * is true for each of {clone,origin} independently.
2868 */
2869
2870 delta = pa->cloneusedsnap -
2871 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2872 ASSERT3S(delta, >=, 0);
2873 ASSERT3U(pa->used, >=, delta);
2874 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2875 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2876 pa->used - delta, pa->comp, pa->uncomp, tx);
2877
2878 delta = pa->originusedsnap -
2879 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2880 ASSERT3S(delta, <=, 0);
2881 ASSERT3U(pa->used, >=, -delta);
2882 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2883 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2884 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2885
2886 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2887
2888 /* log history record */
2889 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2890 "dataset = %llu", hds->ds_object);
2891
2892 dsl_dir_close(odd, FTAG);
2893 }
2894
2895 static char *snaplist_tag = "snaplist";
2896 /*
2897 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2898 * (exclusive) and last_obj (inclusive). The list will be in reverse
2899 * order (last_obj will be the list_head()). If first_obj == 0, do all
2900 * snapshots back to this dataset's origin.
2901 */
2902 static int
2903 snaplist_make(dsl_pool_t *dp, boolean_t own,
2904 uint64_t first_obj, uint64_t last_obj, list_t *l)
2905 {
2906 uint64_t obj = last_obj;
2907
2908 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2909
2910 list_create(l, sizeof (struct promotenode),
2911 offsetof(struct promotenode, link));
2912
2913 while (obj != first_obj) {
2914 dsl_dataset_t *ds;
2915 struct promotenode *snap;
2916 int err;
2917
2918 if (own) {
2919 err = dsl_dataset_own_obj(dp, obj,
2920 0, snaplist_tag, &ds);
2921 if (err == 0)
2922 dsl_dataset_make_exclusive(ds, snaplist_tag);
2923 } else {
2924 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2925 }
2926 if (err == ENOENT) {
2927 /* lost race with snapshot destroy */
2928 struct promotenode *last = list_tail(l);
2929 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2930 obj = last->ds->ds_phys->ds_prev_snap_obj;
2931 continue;
2932 } else if (err) {
2933 return (err);
2934 }
2935
2936 if (first_obj == 0)
2937 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2938
2939 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2940 snap->ds = ds;
2941 list_insert_tail(l, snap);
2942 obj = ds->ds_phys->ds_prev_snap_obj;
2943 }
2944
2945 return (0);
2946 }
2947
2948 static int
2949 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2950 {
2951 struct promotenode *snap;
2952
2953 *spacep = 0;
2954 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2955 uint64_t used, comp, uncomp;
2956 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2957 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2958 *spacep += used;
2959 }
2960 return (0);
2961 }
2962
2963 static void
2964 snaplist_destroy(list_t *l, boolean_t own)
2965 {
2966 struct promotenode *snap;
2967
2968 if (!l || !list_link_active(&l->list_head))
2969 return;
2970
2971 while ((snap = list_tail(l)) != NULL) {
2972 list_remove(l, snap);
2973 if (own)
2974 dsl_dataset_disown(snap->ds, snaplist_tag);
2975 else
2976 dsl_dataset_rele(snap->ds, snaplist_tag);
2977 kmem_free(snap, sizeof (struct promotenode));
2978 }
2979 list_destroy(l);
2980 }
2981
2982 /*
2983 * Promote a clone. Nomenclature note:
2984 * "clone" or "cds": the original clone which is being promoted
2985 * "origin" or "ods": the snapshot which is originally clone's origin
2986 * "origin head" or "ohds": the dataset which is the head
2987 * (filesystem/volume) for the origin
2988 * "origin origin": the origin of the origin's filesystem (typically
2989 * NULL, indicating that the clone is not a clone of a clone).
2990 */
2991 int
2992 dsl_dataset_promote(const char *name, char *conflsnap)
2993 {
2994 dsl_dataset_t *ds;
2995 dsl_dir_t *dd;
2996 dsl_pool_t *dp;
2997 dmu_object_info_t doi;
2998 struct promotearg pa;
2999 struct promotenode *snap;
3000 int err;
3001
3002 bzero(&pa, sizeof(struct promotearg));
3003 err = dsl_dataset_hold(name, FTAG, &ds);
3004 if (err)
3005 return (err);
3006 dd = ds->ds_dir;
3007 dp = dd->dd_pool;
3008
3009 err = dmu_object_info(dp->dp_meta_objset,
3010 ds->ds_phys->ds_snapnames_zapobj, &doi);
3011 if (err) {
3012 dsl_dataset_rele(ds, FTAG);
3013 return (err);
3014 }
3015
3016 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3017 dsl_dataset_rele(ds, FTAG);
3018 return (EINVAL);
3019 }
3020
3021 /*
3022 * We are going to inherit all the snapshots taken before our
3023 * origin (i.e., our new origin will be our parent's origin).
3024 * Take ownership of them so that we can rename them into our
3025 * namespace.
3026 */
3027 rw_enter(&dp->dp_config_rwlock, RW_READER);
3028
3029 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3030 &pa.shared_snaps);
3031 if (err != 0)
3032 goto out;
3033
3034 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3035 if (err != 0)
3036 goto out;
3037
3038 snap = list_head(&pa.shared_snaps);
3039 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3040 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3041 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3042 if (err != 0)
3043 goto out;
3044
3045 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3046 err = dsl_dataset_hold_obj(dp,
3047 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3048 FTAG, &pa.origin_origin);
3049 if (err != 0)
3050 goto out;
3051 }
3052
3053 out:
3054 rw_exit(&dp->dp_config_rwlock);
3055
3056 /*
3057 * Add in 128x the snapnames zapobj size, since we will be moving
3058 * a bunch of snapnames to the promoted ds, and dirtying their
3059 * bonus buffers.
3060 */
3061 if (err == 0) {
3062 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3063 dsl_dataset_promote_sync, ds, &pa,
3064 2 + 2 * doi.doi_physical_blocks_512);
3065 if (err && pa.err_ds && conflsnap)
3066 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3067 }
3068
3069 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3070 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3071 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3072 if (pa.origin_origin)
3073 dsl_dataset_rele(pa.origin_origin, FTAG);
3074 dsl_dataset_rele(ds, FTAG);
3075 return (err);
3076 }
3077
3078 struct cloneswaparg {
3079 dsl_dataset_t *cds; /* clone dataset */
3080 dsl_dataset_t *ohds; /* origin's head dataset */
3081 boolean_t force;
3082 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3083 };
3084
3085 /* ARGSUSED */
3086 static int
3087 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3088 {
3089 struct cloneswaparg *csa = arg1;
3090
3091 /* they should both be heads */
3092 if (dsl_dataset_is_snapshot(csa->cds) ||
3093 dsl_dataset_is_snapshot(csa->ohds))
3094 return (EINVAL);
3095
3096 /* the branch point should be just before them */
3097 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3098 return (EINVAL);
3099
3100 /* cds should be the clone (unless they are unrelated) */
3101 if (csa->cds->ds_prev != NULL &&
3102 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3103 csa->ohds->ds_object !=
3104 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3105 return (EINVAL);
3106
3107 /* the clone should be a child of the origin */
3108 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3109 return (EINVAL);
3110
3111 /* ohds shouldn't be modified unless 'force' */
3112 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3113 return (ETXTBSY);
3114
3115 /* adjust amount of any unconsumed refreservation */
3116 csa->unused_refres_delta =
3117 (int64_t)MIN(csa->ohds->ds_reserved,
3118 csa->ohds->ds_phys->ds_unique_bytes) -
3119 (int64_t)MIN(csa->ohds->ds_reserved,
3120 csa->cds->ds_phys->ds_unique_bytes);
3121
3122 if (csa->unused_refres_delta > 0 &&
3123 csa->unused_refres_delta >
3124 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3125 return (ENOSPC);
3126
3127 if (csa->ohds->ds_quota != 0 &&
3128 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3129 return (EDQUOT);
3130
3131 return (0);
3132 }
3133
3134 /* ARGSUSED */
3135 static void
3136 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3137 {
3138 struct cloneswaparg *csa = arg1;
3139 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3140
3141 ASSERT(csa->cds->ds_reserved == 0);
3142 ASSERT(csa->ohds->ds_quota == 0 ||
3143 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3144
3145 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3146 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3147
3148 if (csa->cds->ds_objset != NULL) {
3149 dmu_objset_evict(csa->cds->ds_objset);
3150 csa->cds->ds_objset = NULL;
3151 }
3152
3153 if (csa->ohds->ds_objset != NULL) {
3154 dmu_objset_evict(csa->ohds->ds_objset);
3155 csa->ohds->ds_objset = NULL;
3156 }
3157
3158 /*
3159 * Reset origin's unique bytes, if it exists.
3160 */
3161 if (csa->cds->ds_prev) {
3162 dsl_dataset_t *origin = csa->cds->ds_prev;
3163 uint64_t comp, uncomp;
3164
3165 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3166 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3167 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3168 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3169 }
3170
3171 /* swap blkptrs */
3172 {
3173 blkptr_t tmp;
3174 tmp = csa->ohds->ds_phys->ds_bp;
3175 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3176 csa->cds->ds_phys->ds_bp = tmp;
3177 }
3178
3179 /* set dd_*_bytes */
3180 {
3181 int64_t dused, dcomp, duncomp;
3182 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3183 uint64_t odl_used, odl_comp, odl_uncomp;
3184
3185 ASSERT3U(csa->cds->ds_dir->dd_phys->
3186 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3187
3188 dsl_deadlist_space(&csa->cds->ds_deadlist,
3189 &cdl_used, &cdl_comp, &cdl_uncomp);
3190 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3191 &odl_used, &odl_comp, &odl_uncomp);
3192
3193 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3194 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3195 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3196 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3197 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3198 cdl_uncomp -
3199 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3200
3201 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3202 dused, dcomp, duncomp, tx);
3203 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3204 -dused, -dcomp, -duncomp, tx);
3205
3206 /*
3207 * The difference in the space used by snapshots is the
3208 * difference in snapshot space due to the head's
3209 * deadlist (since that's the only thing that's
3210 * changing that affects the snapused).
3211 */
3212 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3213 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3214 &cdl_used, &cdl_comp, &cdl_uncomp);
3215 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3216 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3217 &odl_used, &odl_comp, &odl_uncomp);
3218 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3219 DD_USED_HEAD, DD_USED_SNAP, tx);
3220 }
3221
3222 /* swap ds_*_bytes */
3223 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3224 csa->cds->ds_phys->ds_used_bytes);
3225 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3226 csa->cds->ds_phys->ds_compressed_bytes);
3227 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3228 csa->cds->ds_phys->ds_uncompressed_bytes);
3229 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3230 csa->cds->ds_phys->ds_unique_bytes);
3231
3232 /* apply any parent delta for change in unconsumed refreservation */
3233 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3234 csa->unused_refres_delta, 0, 0, tx);
3235
3236 /*
3237 * Swap deadlists.
3238 */
3239 dsl_deadlist_close(&csa->cds->ds_deadlist);
3240 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3241 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3242 csa->cds->ds_phys->ds_deadlist_obj);
3243 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3244 csa->cds->ds_phys->ds_deadlist_obj);
3245 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3246 csa->ohds->ds_phys->ds_deadlist_obj);
3247
3248 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3249 }
3250
3251 /*
3252 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3253 * recv" into an existing fs to swizzle the file system to the new
3254 * version, and by "zfs rollback". Can also be used to swap two
3255 * independent head datasets if neither has any snapshots.
3256 */
3257 int
3258 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3259 boolean_t force)
3260 {
3261 struct cloneswaparg csa;
3262 int error;
3263
3264 ASSERT(clone->ds_owner);
3265 ASSERT(origin_head->ds_owner);
3266 retry:
3267 /*
3268 * Need exclusive access for the swap. If we're swapping these
3269 * datasets back after an error, we already hold the locks.
3270 */
3271 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3272 rw_enter(&clone->ds_rwlock, RW_WRITER);
3273 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3274 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3275 rw_exit(&clone->ds_rwlock);
3276 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3277 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3278 rw_exit(&origin_head->ds_rwlock);
3279 goto retry;
3280 }
3281 }
3282 csa.cds = clone;
3283 csa.ohds = origin_head;
3284 csa.force = force;
3285 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3286 dsl_dataset_clone_swap_check,
3287 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3288 return (error);
3289 }
3290
3291 /*
3292 * Given a pool name and a dataset object number in that pool,
3293 * return the name of that dataset.
3294 */
3295 int
3296 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3297 {
3298 spa_t *spa;
3299 dsl_pool_t *dp;
3300 dsl_dataset_t *ds;
3301 int error;
3302
3303 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3304 return (error);
3305 dp = spa_get_dsl(spa);
3306 rw_enter(&dp->dp_config_rwlock, RW_READER);
3307 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3308 dsl_dataset_name(ds, buf);
3309 dsl_dataset_rele(ds, FTAG);
3310 }
3311 rw_exit(&dp->dp_config_rwlock);
3312 spa_close(spa, FTAG);
3313
3314 return (error);
3315 }
3316
3317 int
3318 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3319 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3320 {
3321 int error = 0;
3322
3323 ASSERT3S(asize, >, 0);
3324
3325 /*
3326 * *ref_rsrv is the portion of asize that will come from any
3327 * unconsumed refreservation space.
3328 */
3329 *ref_rsrv = 0;
3330
3331 mutex_enter(&ds->ds_lock);
3332 /*
3333 * Make a space adjustment for reserved bytes.
3334 */
3335 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3336 ASSERT3U(*used, >=,
3337 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3338 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3339 *ref_rsrv =
3340 asize - MIN(asize, parent_delta(ds, asize + inflight));
3341 }
3342
3343 if (!check_quota || ds->ds_quota == 0) {
3344 mutex_exit(&ds->ds_lock);
3345 return (0);
3346 }
3347 /*
3348 * If they are requesting more space, and our current estimate
3349 * is over quota, they get to try again unless the actual
3350 * on-disk is over quota and there are no pending changes (which
3351 * may free up space for us).
3352 */
3353 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3354 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3355 error = ERESTART;
3356 else
3357 error = EDQUOT;
3358
3359 DMU_TX_STAT_BUMP(dmu_tx_quota);
3360 }
3361 mutex_exit(&ds->ds_lock);
3362
3363 return (error);
3364 }
3365
3366 /* ARGSUSED */
3367 static int
3368 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3369 {
3370 dsl_dataset_t *ds = arg1;
3371 dsl_prop_setarg_t *psa = arg2;
3372 int err;
3373
3374 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3375 return (ENOTSUP);
3376
3377 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3378 return (err);
3379
3380 if (psa->psa_effective_value == 0)
3381 return (0);
3382
3383 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3384 psa->psa_effective_value < ds->ds_reserved)
3385 return (ENOSPC);
3386
3387 return (0);
3388 }
3389
3390 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3391
3392 void
3393 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3394 {
3395 dsl_dataset_t *ds = arg1;
3396 dsl_prop_setarg_t *psa = arg2;
3397 uint64_t effective_value = psa->psa_effective_value;
3398
3399 dsl_prop_set_sync(ds, psa, tx);
3400 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3401
3402 if (ds->ds_quota != effective_value) {
3403 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3404 ds->ds_quota = effective_value;
3405
3406 spa_history_log_internal(LOG_DS_REFQUOTA,
3407 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3408 (longlong_t)ds->ds_quota, ds->ds_object);
3409 }
3410 }
3411
3412 int
3413 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3414 {
3415 dsl_dataset_t *ds;
3416 dsl_prop_setarg_t psa;
3417 int err;
3418
3419 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3420
3421 err = dsl_dataset_hold(dsname, FTAG, &ds);
3422 if (err)
3423 return (err);
3424
3425 /*
3426 * If someone removes a file, then tries to set the quota, we
3427 * want to make sure the file freeing takes effect.
3428 */
3429 txg_wait_open(ds->ds_dir->dd_pool, 0);
3430
3431 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3432 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3433 ds, &psa, 0);
3434
3435 dsl_dataset_rele(ds, FTAG);
3436 return (err);
3437 }
3438
3439 static int
3440 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3441 {
3442 dsl_dataset_t *ds = arg1;
3443 dsl_prop_setarg_t *psa = arg2;
3444 uint64_t effective_value;
3445 uint64_t unique;
3446 int err;
3447
3448 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3449 SPA_VERSION_REFRESERVATION)
3450 return (ENOTSUP);
3451
3452 if (dsl_dataset_is_snapshot(ds))
3453 return (EINVAL);
3454
3455 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3456 return (err);
3457
3458 effective_value = psa->psa_effective_value;
3459
3460 /*
3461 * If we are doing the preliminary check in open context, the
3462 * space estimates may be inaccurate.
3463 */
3464 if (!dmu_tx_is_syncing(tx))
3465 return (0);
3466
3467 mutex_enter(&ds->ds_lock);
3468 if (!DS_UNIQUE_IS_ACCURATE(ds))
3469 dsl_dataset_recalc_head_uniq(ds);
3470 unique = ds->ds_phys->ds_unique_bytes;
3471 mutex_exit(&ds->ds_lock);
3472
3473 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3474 uint64_t delta = MAX(unique, effective_value) -
3475 MAX(unique, ds->ds_reserved);
3476
3477 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3478 return (ENOSPC);
3479 if (ds->ds_quota > 0 &&
3480 effective_value > ds->ds_quota)
3481 return (ENOSPC);
3482 }
3483
3484 return (0);
3485 }
3486
3487 static void
3488 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3489 {
3490 dsl_dataset_t *ds = arg1;
3491 dsl_prop_setarg_t *psa = arg2;
3492 uint64_t effective_value = psa->psa_effective_value;
3493 uint64_t unique;
3494 int64_t delta;
3495
3496 dsl_prop_set_sync(ds, psa, tx);
3497 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3498
3499 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3500
3501 mutex_enter(&ds->ds_dir->dd_lock);
3502 mutex_enter(&ds->ds_lock);
3503 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3504 unique = ds->ds_phys->ds_unique_bytes;
3505 delta = MAX(0, (int64_t)(effective_value - unique)) -
3506 MAX(0, (int64_t)(ds->ds_reserved - unique));
3507 ds->ds_reserved = effective_value;
3508 mutex_exit(&ds->ds_lock);
3509
3510 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3511 mutex_exit(&ds->ds_dir->dd_lock);
3512
3513 spa_history_log_internal(LOG_DS_REFRESERV,
3514 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3515 (longlong_t)effective_value, ds->ds_object);
3516 }
3517
3518 int
3519 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3520 uint64_t reservation)
3521 {
3522 dsl_dataset_t *ds;
3523 dsl_prop_setarg_t psa;
3524 int err;
3525
3526 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3527 &reservation);
3528
3529 err = dsl_dataset_hold(dsname, FTAG, &ds);
3530 if (err)
3531 return (err);
3532
3533 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3534 dsl_dataset_set_reservation_check,
3535 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3536
3537 dsl_dataset_rele(ds, FTAG);
3538 return (err);
3539 }
3540
3541 typedef struct zfs_hold_cleanup_arg {
3542 dsl_pool_t *dp;
3543 uint64_t dsobj;
3544 char htag[MAXNAMELEN];
3545 } zfs_hold_cleanup_arg_t;
3546
3547 static void
3548 dsl_dataset_user_release_onexit(void *arg)
3549 {
3550 zfs_hold_cleanup_arg_t *ca = arg;
3551
3552 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3553 B_TRUE);
3554 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3555 }
3556
3557 void
3558 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3559 minor_t minor)
3560 {
3561 zfs_hold_cleanup_arg_t *ca;
3562
3563 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3564 ca->dp = ds->ds_dir->dd_pool;
3565 ca->dsobj = ds->ds_object;
3566 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3567 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3568 dsl_dataset_user_release_onexit, ca, NULL));
3569 }
3570
3571 /*
3572 * If you add new checks here, you may need to add
3573 * additional checks to the "temporary" case in
3574 * snapshot_check() in dmu_objset.c.
3575 */
3576 static int
3577 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3578 {
3579 dsl_dataset_t *ds = arg1;
3580 struct dsl_ds_holdarg *ha = arg2;
3581 char *htag = ha->htag;
3582 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3583 int error = 0;
3584
3585 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3586 return (ENOTSUP);
3587
3588 if (!dsl_dataset_is_snapshot(ds))
3589 return (EINVAL);
3590
3591 /* tags must be unique */
3592 mutex_enter(&ds->ds_lock);
3593 if (ds->ds_phys->ds_userrefs_obj) {
3594 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3595 8, 1, tx);
3596 if (error == 0)
3597 error = EEXIST;
3598 else if (error == ENOENT)
3599 error = 0;
3600 }
3601 mutex_exit(&ds->ds_lock);
3602
3603 if (error == 0 && ha->temphold &&
3604 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3605 error = E2BIG;
3606
3607 return (error);
3608 }
3609
3610 void
3611 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3612 {
3613 dsl_dataset_t *ds = arg1;
3614 struct dsl_ds_holdarg *ha = arg2;
3615 char *htag = ha->htag;
3616 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3617 objset_t *mos = dp->dp_meta_objset;
3618 uint64_t now = gethrestime_sec();
3619 uint64_t zapobj;
3620
3621 mutex_enter(&ds->ds_lock);
3622 if (ds->ds_phys->ds_userrefs_obj == 0) {
3623 /*
3624 * This is the first user hold for this dataset. Create
3625 * the userrefs zap object.
3626 */
3627 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3628 zapobj = ds->ds_phys->ds_userrefs_obj =
3629 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3630 } else {
3631 zapobj = ds->ds_phys->ds_userrefs_obj;
3632 }
3633 ds->ds_userrefs++;
3634 mutex_exit(&ds->ds_lock);
3635
3636 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3637
3638 if (ha->temphold) {
3639 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3640 htag, &now, tx));
3641 }
3642
3643 spa_history_log_internal(LOG_DS_USER_HOLD,
3644 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3645 (int)ha->temphold, ds->ds_object);
3646 }
3647
3648 static int
3649 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3650 {
3651 struct dsl_ds_holdarg *ha = arg;
3652 dsl_dataset_t *ds;
3653 int error;
3654 char *name;
3655
3656 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3657 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3658 error = dsl_dataset_hold(name, ha->dstg, &ds);
3659 strfree(name);
3660 if (error == 0) {
3661 ha->gotone = B_TRUE;
3662 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3663 dsl_dataset_user_hold_sync, ds, ha, 0);
3664 } else if (error == ENOENT && ha->recursive) {
3665 error = 0;
3666 } else {
3667 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3668 }
3669 return (error);
3670 }
3671
3672 int
3673 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3674 boolean_t temphold)
3675 {
3676 struct dsl_ds_holdarg *ha;
3677 int error;
3678
3679 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3680 ha->htag = htag;
3681 ha->temphold = temphold;
3682 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3683 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3684 ds, ha, 0);
3685 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3686
3687 return (error);
3688 }
3689
3690 int
3691 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3692 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3693 {
3694 struct dsl_ds_holdarg *ha;
3695 dsl_sync_task_t *dst;
3696 spa_t *spa;
3697 int error;
3698 minor_t minor = 0;
3699
3700 if (cleanup_fd != -1) {
3701 /* Currently we only support cleanup-on-exit of tempholds. */
3702 if (!temphold)
3703 return (EINVAL);
3704 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3705 if (error)
3706 return (error);
3707 }
3708
3709 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3710
3711 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3712
3713 error = spa_open(dsname, &spa, FTAG);
3714 if (error) {
3715 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3716 if (cleanup_fd != -1)
3717 zfs_onexit_fd_rele(cleanup_fd);
3718 return (error);
3719 }
3720
3721 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3722 ha->htag = htag;
3723 ha->snapname = snapname;
3724 ha->recursive = recursive;
3725 ha->temphold = temphold;
3726
3727 if (recursive) {
3728 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3729 ha, DS_FIND_CHILDREN);
3730 } else {
3731 error = dsl_dataset_user_hold_one(dsname, ha);
3732 }
3733 if (error == 0)
3734 error = dsl_sync_task_group_wait(ha->dstg);
3735
3736 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3737 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3738 dsl_dataset_t *ds = dst->dst_arg1;
3739
3740 if (dst->dst_err) {
3741 dsl_dataset_name(ds, ha->failed);
3742 *strchr(ha->failed, '@') = '\0';
3743 } else if (error == 0 && minor != 0 && temphold) {
3744 /*
3745 * If this hold is to be released upon process exit,
3746 * register that action now.
3747 */
3748 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3749 }
3750 dsl_dataset_rele(ds, ha->dstg);
3751 }
3752
3753 if (error == 0 && recursive && !ha->gotone)
3754 error = ENOENT;
3755
3756 if (error)
3757 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3758
3759 dsl_sync_task_group_destroy(ha->dstg);
3760
3761 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3762 spa_close(spa, FTAG);
3763 if (cleanup_fd != -1)
3764 zfs_onexit_fd_rele(cleanup_fd);
3765 return (error);
3766 }
3767
3768 struct dsl_ds_releasearg {
3769 dsl_dataset_t *ds;
3770 const char *htag;
3771 boolean_t own; /* do we own or just hold ds? */
3772 };
3773
3774 static int
3775 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3776 boolean_t *might_destroy)
3777 {
3778 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3779 uint64_t zapobj;
3780 uint64_t tmp;
3781 int error;
3782
3783 *might_destroy = B_FALSE;
3784
3785 mutex_enter(&ds->ds_lock);
3786 zapobj = ds->ds_phys->ds_userrefs_obj;
3787 if (zapobj == 0) {
3788 /* The tag can't possibly exist */
3789 mutex_exit(&ds->ds_lock);
3790 return (ESRCH);
3791 }
3792
3793 /* Make sure the tag exists */
3794 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3795 if (error) {
3796 mutex_exit(&ds->ds_lock);
3797 if (error == ENOENT)
3798 error = ESRCH;
3799 return (error);
3800 }
3801
3802 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3803 DS_IS_DEFER_DESTROY(ds))
3804 *might_destroy = B_TRUE;
3805
3806 mutex_exit(&ds->ds_lock);
3807 return (0);
3808 }
3809
3810 static int
3811 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3812 {
3813 struct dsl_ds_releasearg *ra = arg1;
3814 dsl_dataset_t *ds = ra->ds;
3815 boolean_t might_destroy;
3816 int error;
3817
3818 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3819 return (ENOTSUP);
3820
3821 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3822 if (error)
3823 return (error);
3824
3825 if (might_destroy) {
3826 struct dsl_ds_destroyarg dsda = {0};
3827
3828 if (dmu_tx_is_syncing(tx)) {
3829 /*
3830 * If we're not prepared to remove the snapshot,
3831 * we can't allow the release to happen right now.
3832 */
3833 if (!ra->own)
3834 return (EBUSY);
3835 }
3836 dsda.ds = ds;
3837 dsda.releasing = B_TRUE;
3838 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3839 }
3840
3841 return (0);
3842 }
3843
3844 static void
3845 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3846 {
3847 struct dsl_ds_releasearg *ra = arg1;
3848 dsl_dataset_t *ds = ra->ds;
3849 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3850 objset_t *mos = dp->dp_meta_objset;
3851 uint64_t zapobj;
3852 uint64_t dsobj = ds->ds_object;
3853 uint64_t refs;
3854 int error;
3855
3856 mutex_enter(&ds->ds_lock);
3857 ds->ds_userrefs--;
3858 refs = ds->ds_userrefs;
3859 mutex_exit(&ds->ds_lock);
3860 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3861 VERIFY(error == 0 || error == ENOENT);
3862 zapobj = ds->ds_phys->ds_userrefs_obj;
3863 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3864
3865 spa_history_log_internal(LOG_DS_USER_RELEASE,
3866 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3867 ra->htag, (longlong_t)refs, dsobj);
3868
3869 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3870 DS_IS_DEFER_DESTROY(ds)) {
3871 struct dsl_ds_destroyarg dsda = {0};
3872
3873 ASSERT(ra->own);
3874 dsda.ds = ds;
3875 dsda.releasing = B_TRUE;
3876 /* We already did the destroy_check */
3877 dsl_dataset_destroy_sync(&dsda, tag, tx);
3878 }
3879 }
3880
3881 static int
3882 dsl_dataset_user_release_one(const char *dsname, void *arg)
3883 {
3884 struct dsl_ds_holdarg *ha = arg;
3885 struct dsl_ds_releasearg *ra;
3886 dsl_dataset_t *ds;
3887 int error;
3888 void *dtag = ha->dstg;
3889 char *name;
3890 boolean_t own = B_FALSE;
3891 boolean_t might_destroy;
3892
3893 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3894 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3895 error = dsl_dataset_hold(name, dtag, &ds);
3896 strfree(name);
3897 if (error == ENOENT && ha->recursive)
3898 return (0);
3899 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3900 if (error)
3901 return (error);
3902
3903 ha->gotone = B_TRUE;
3904
3905 ASSERT(dsl_dataset_is_snapshot(ds));
3906
3907 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3908 if (error) {
3909 dsl_dataset_rele(ds, dtag);
3910 return (error);
3911 }
3912
3913 if (might_destroy) {
3914 #ifdef _KERNEL
3915 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3916 error = zfs_unmount_snap(name, NULL);
3917 strfree(name);
3918 if (error) {
3919 dsl_dataset_rele(ds, dtag);
3920 return (error);
3921 }
3922 #endif
3923 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3924 dsl_dataset_rele(ds, dtag);
3925 return (EBUSY);
3926 } else {
3927 own = B_TRUE;
3928 dsl_dataset_make_exclusive(ds, dtag);
3929 }
3930 }
3931
3932 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3933 ra->ds = ds;
3934 ra->htag = ha->htag;
3935 ra->own = own;
3936 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3937 dsl_dataset_user_release_sync, ra, dtag, 0);
3938
3939 return (0);
3940 }
3941
3942 int
3943 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3944 boolean_t recursive)
3945 {
3946 struct dsl_ds_holdarg *ha;
3947 dsl_sync_task_t *dst;
3948 spa_t *spa;
3949 int error;
3950
3951 top:
3952 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3953
3954 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3955
3956 error = spa_open(dsname, &spa, FTAG);
3957 if (error) {
3958 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3959 return (error);
3960 }
3961
3962 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3963 ha->htag = htag;
3964 ha->snapname = snapname;
3965 ha->recursive = recursive;
3966 if (recursive) {
3967 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3968 ha, DS_FIND_CHILDREN);
3969 } else {
3970 error = dsl_dataset_user_release_one(dsname, ha);
3971 }
3972 if (error == 0)
3973 error = dsl_sync_task_group_wait(ha->dstg);
3974
3975 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3976 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3977 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3978 dsl_dataset_t *ds = ra->ds;
3979
3980 if (dst->dst_err)
3981 dsl_dataset_name(ds, ha->failed);
3982
3983 if (ra->own)
3984 dsl_dataset_disown(ds, ha->dstg);
3985 else
3986 dsl_dataset_rele(ds, ha->dstg);
3987
3988 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3989 }
3990
3991 if (error == 0 && recursive && !ha->gotone)
3992 error = ENOENT;
3993
3994 if (error && error != EBUSY)
3995 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3996
3997 dsl_sync_task_group_destroy(ha->dstg);
3998 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3999 spa_close(spa, FTAG);
4000
4001 /*
4002 * We can get EBUSY if we were racing with deferred destroy and
4003 * dsl_dataset_user_release_check() hadn't done the necessary
4004 * open context setup. We can also get EBUSY if we're racing
4005 * with destroy and that thread is the ds_owner. Either way
4006 * the busy condition should be transient, and we should retry
4007 * the release operation.
4008 */
4009 if (error == EBUSY)
4010 goto top;
4011
4012 return (error);
4013 }
4014
4015 /*
4016 * Called at spa_load time (with retry == B_FALSE) to release a stale
4017 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4018 */
4019 int
4020 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4021 boolean_t retry)
4022 {
4023 dsl_dataset_t *ds;
4024 char *snap;
4025 char *name;
4026 int namelen;
4027 int error;
4028
4029 do {
4030 rw_enter(&dp->dp_config_rwlock, RW_READER);
4031 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4032 rw_exit(&dp->dp_config_rwlock);
4033 if (error)
4034 return (error);
4035 namelen = dsl_dataset_namelen(ds)+1;
4036 name = kmem_alloc(namelen, KM_SLEEP);
4037 dsl_dataset_name(ds, name);
4038 dsl_dataset_rele(ds, FTAG);
4039
4040 snap = strchr(name, '@');
4041 *snap = '\0';
4042 ++snap;
4043 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4044 kmem_free(name, namelen);
4045
4046 /*
4047 * The object can't have been destroyed because we have a hold,
4048 * but it might have been renamed, resulting in ENOENT. Retry
4049 * if we've been requested to do so.
4050 *
4051 * It would be nice if we could use the dsobj all the way
4052 * through and avoid ENOENT entirely. But we might need to
4053 * unmount the snapshot, and there's currently no way to lookup
4054 * a vfsp using a ZFS object id.
4055 */
4056 } while ((error == ENOENT) && retry);
4057
4058 return (error);
4059 }
4060
4061 int
4062 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4063 {
4064 dsl_dataset_t *ds;
4065 int err;
4066
4067 err = dsl_dataset_hold(dsname, FTAG, &ds);
4068 if (err)
4069 return (err);
4070
4071 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4072 if (ds->ds_phys->ds_userrefs_obj != 0) {
4073 zap_attribute_t *za;
4074 zap_cursor_t zc;
4075
4076 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4077 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4078 ds->ds_phys->ds_userrefs_obj);
4079 zap_cursor_retrieve(&zc, za) == 0;
4080 zap_cursor_advance(&zc)) {
4081 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4082 za->za_first_integer));
4083 }
4084 zap_cursor_fini(&zc);
4085 kmem_free(za, sizeof (zap_attribute_t));
4086 }
4087 dsl_dataset_rele(ds, FTAG);
4088 return (0);
4089 }
4090
4091 /*
4092 * Note, this function is used as the callback for dmu_objset_find(). We
4093 * always return 0 so that we will continue to find and process
4094 * inconsistent datasets, even if we encounter an error trying to
4095 * process one of them.
4096 */
4097 /* ARGSUSED */
4098 int
4099 dsl_destroy_inconsistent(const char *dsname, void *arg)
4100 {
4101 dsl_dataset_t *ds;
4102
4103 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4104 if (DS_IS_INCONSISTENT(ds))
4105 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4106 else
4107 dsl_dataset_disown(ds, FTAG);
4108 }
4109 return (0);
4110 }
4111
4112
4113 /*
4114 * Return (in *usedp) the amount of space written in new that is not
4115 * present in oldsnap. New may be a snapshot or the head. Old must be
4116 * a snapshot before new, in new's filesystem (or its origin). If not then
4117 * fail and return EINVAL.
4118 *
4119 * The written space is calculated by considering two components: First, we
4120 * ignore any freed space, and calculate the written as new's used space
4121 * minus old's used space. Next, we add in the amount of space that was freed
4122 * between the two snapshots, thus reducing new's used space relative to old's.
4123 * Specifically, this is the space that was born before old->ds_creation_txg,
4124 * and freed before new (ie. on new's deadlist or a previous deadlist).
4125 *
4126 * space freed [---------------------]
4127 * snapshots ---O-------O--------O-------O------
4128 * oldsnap new
4129 */
4130 int
4131 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4132 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4133 {
4134 int err = 0;
4135 uint64_t snapobj;
4136 dsl_pool_t *dp = new->ds_dir->dd_pool;
4137
4138 *usedp = 0;
4139 *usedp += new->ds_phys->ds_used_bytes;
4140 *usedp -= oldsnap->ds_phys->ds_used_bytes;
4141
4142 *compp = 0;
4143 *compp += new->ds_phys->ds_compressed_bytes;
4144 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4145
4146 *uncompp = 0;
4147 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4148 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4149
4150 rw_enter(&dp->dp_config_rwlock, RW_READER);
4151 snapobj = new->ds_object;
4152 while (snapobj != oldsnap->ds_object) {
4153 dsl_dataset_t *snap;
4154 uint64_t used, comp, uncomp;
4155
4156 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4157 if (err != 0)
4158 break;
4159
4160 if (snap->ds_phys->ds_prev_snap_txg ==
4161 oldsnap->ds_phys->ds_creation_txg) {
4162 /*
4163 * The blocks in the deadlist can not be born after
4164 * ds_prev_snap_txg, so get the whole deadlist space,
4165 * which is more efficient (especially for old-format
4166 * deadlists). Unfortunately the deadlist code
4167 * doesn't have enough information to make this
4168 * optimization itself.
4169 */
4170 dsl_deadlist_space(&snap->ds_deadlist,
4171 &used, &comp, &uncomp);
4172 } else {
4173 dsl_deadlist_space_range(&snap->ds_deadlist,
4174 0, oldsnap->ds_phys->ds_creation_txg,
4175 &used, &comp, &uncomp);
4176 }
4177 *usedp += used;
4178 *compp += comp;
4179 *uncompp += uncomp;
4180
4181 /*
4182 * If we get to the beginning of the chain of snapshots
4183 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4184 * was not a snapshot of/before new.
4185 */
4186 snapobj = snap->ds_phys->ds_prev_snap_obj;
4187 dsl_dataset_rele(snap, FTAG);
4188 if (snapobj == 0) {
4189 err = EINVAL;
4190 break;
4191 }
4192
4193 }
4194 rw_exit(&dp->dp_config_rwlock);
4195 return (err);
4196 }
4197
4198 /*
4199 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4200 * lastsnap, and all snapshots in between are deleted.
4201 *
4202 * blocks that would be freed [---------------------------]
4203 * snapshots ---O-------O--------O-------O--------O
4204 * firstsnap lastsnap
4205 *
4206 * This is the set of blocks that were born after the snap before firstsnap,
4207 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4208 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4209 * We calculate this by iterating over the relevant deadlists (from the snap
4210 * after lastsnap, backward to the snap after firstsnap), summing up the
4211 * space on the deadlist that was born after the snap before firstsnap.
4212 */
4213 int
4214 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4215 dsl_dataset_t *lastsnap,
4216 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4217 {
4218 int err = 0;
4219 uint64_t snapobj;
4220 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4221
4222 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4223 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4224
4225 /*
4226 * Check that the snapshots are in the same dsl_dir, and firstsnap
4227 * is before lastsnap.
4228 */
4229 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4230 firstsnap->ds_phys->ds_creation_txg >
4231 lastsnap->ds_phys->ds_creation_txg)
4232 return (EINVAL);
4233
4234 *usedp = *compp = *uncompp = 0;
4235
4236 rw_enter(&dp->dp_config_rwlock, RW_READER);
4237 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4238 while (snapobj != firstsnap->ds_object) {
4239 dsl_dataset_t *ds;
4240 uint64_t used, comp, uncomp;
4241
4242 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4243 if (err != 0)
4244 break;
4245
4246 dsl_deadlist_space_range(&ds->ds_deadlist,
4247 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4248 &used, &comp, &uncomp);
4249 *usedp += used;
4250 *compp += comp;
4251 *uncompp += uncomp;
4252
4253 snapobj = ds->ds_phys->ds_prev_snap_obj;
4254 ASSERT3U(snapobj, !=, 0);
4255 dsl_dataset_rele(ds, FTAG);
4256 }
4257 rw_exit(&dp->dp_config_rwlock);
4258 return (err);
4259 }
4260
4261 #if defined(_KERNEL) && defined(HAVE_SPL)
4262 EXPORT_SYMBOL(dmu_snapshots_destroy_nvl);
4263 EXPORT_SYMBOL(dsl_dataset_hold);
4264 EXPORT_SYMBOL(dsl_dataset_hold_obj);
4265 EXPORT_SYMBOL(dsl_dataset_own);
4266 EXPORT_SYMBOL(dsl_dataset_own_obj);
4267 EXPORT_SYMBOL(dsl_dataset_name);
4268 EXPORT_SYMBOL(dsl_dataset_rele);
4269 EXPORT_SYMBOL(dsl_dataset_disown);
4270 EXPORT_SYMBOL(dsl_dataset_drop_ref);
4271 EXPORT_SYMBOL(dsl_dataset_tryown);
4272 EXPORT_SYMBOL(dsl_dataset_make_exclusive);
4273 EXPORT_SYMBOL(dsl_dataset_create_sync);
4274 EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
4275 EXPORT_SYMBOL(dsl_dataset_destroy);
4276 EXPORT_SYMBOL(dsl_dataset_destroy_check);
4277 EXPORT_SYMBOL(dsl_dataset_destroy_sync);
4278 EXPORT_SYMBOL(dsl_dataset_snapshot_check);
4279 EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
4280 EXPORT_SYMBOL(dsl_dataset_rename);
4281 EXPORT_SYMBOL(dsl_dataset_promote);
4282 EXPORT_SYMBOL(dsl_dataset_clone_swap);
4283 EXPORT_SYMBOL(dsl_dataset_user_hold);
4284 EXPORT_SYMBOL(dsl_dataset_user_release);
4285 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
4286 EXPORT_SYMBOL(dsl_dataset_get_holds);
4287 EXPORT_SYMBOL(dsl_dataset_get_blkptr);
4288 EXPORT_SYMBOL(dsl_dataset_set_blkptr);
4289 EXPORT_SYMBOL(dsl_dataset_get_spa);
4290 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap);
4291 EXPORT_SYMBOL(dsl_dataset_space_written);
4292 EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
4293 EXPORT_SYMBOL(dsl_dataset_sync);
4294 EXPORT_SYMBOL(dsl_dataset_block_born);
4295 EXPORT_SYMBOL(dsl_dataset_block_kill);
4296 EXPORT_SYMBOL(dsl_dataset_block_freeable);
4297 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg);
4298 EXPORT_SYMBOL(dsl_dataset_dirty);
4299 EXPORT_SYMBOL(dsl_dataset_stats);
4300 EXPORT_SYMBOL(dsl_dataset_fast_stat);
4301 EXPORT_SYMBOL(dsl_dataset_space);
4302 EXPORT_SYMBOL(dsl_dataset_fsid_guid);
4303 EXPORT_SYMBOL(dsl_dsobj_to_dsname);
4304 EXPORT_SYMBOL(dsl_dataset_check_quota);
4305 EXPORT_SYMBOL(dsl_dataset_set_quota);
4306 EXPORT_SYMBOL(dsl_dataset_set_quota_sync);
4307 EXPORT_SYMBOL(dsl_dataset_set_reservation);
4308 EXPORT_SYMBOL(dsl_destroy_inconsistent);
4309 #endif