]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_dataset.c
Illumos #3100: zvol rename fails with EBUSY when dirty.
[mirror_zfs.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/arc.h>
36 #include <sys/zio.h>
37 #include <sys/zap.h>
38 #include <sys/unique.h>
39 #include <sys/zfs_context.h>
40 #include <sys/zfs_ioctl.h>
41 #include <sys/spa.h>
42 #include <sys/zfs_znode.h>
43 #include <sys/zfs_onexit.h>
44 #include <sys/zvol.h>
45 #include <sys/dsl_scan.h>
46 #include <sys/dsl_deadlist.h>
47
48 static char *dsl_reaper = "the grim reaper";
49
50 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
51 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
52 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
53
54 #define SWITCH64(x, y) \
55 { \
56 uint64_t __tmp = (x); \
57 (x) = (y); \
58 (y) = __tmp; \
59 }
60
61 #define DS_REF_MAX (1ULL << 62)
62
63 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
64
65 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
66
67
68 /*
69 * Figure out how much of this delta should be propogated to the dsl_dir
70 * layer. If there's a refreservation, that space has already been
71 * partially accounted for in our ancestors.
72 */
73 static int64_t
74 parent_delta(dsl_dataset_t *ds, int64_t delta)
75 {
76 uint64_t old_bytes, new_bytes;
77
78 if (ds->ds_reserved == 0)
79 return (delta);
80
81 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
82 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
83
84 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
85 return (new_bytes - old_bytes);
86 }
87
88 void
89 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
90 {
91 int used, compressed, uncompressed;
92 int64_t delta;
93
94 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
95 compressed = BP_GET_PSIZE(bp);
96 uncompressed = BP_GET_UCSIZE(bp);
97
98 dprintf_bp(bp, "ds=%p", ds);
99
100 ASSERT(dmu_tx_is_syncing(tx));
101 /* It could have been compressed away to nothing */
102 if (BP_IS_HOLE(bp))
103 return;
104 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
105 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
106 if (ds == NULL) {
107 /*
108 * Account for the meta-objset space in its placeholder
109 * dsl_dir.
110 */
111 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
112 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
113 used, compressed, uncompressed, tx);
114 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
115 return;
116 }
117 dmu_buf_will_dirty(ds->ds_dbuf, tx);
118
119 mutex_enter(&ds->ds_dir->dd_lock);
120 mutex_enter(&ds->ds_lock);
121 delta = parent_delta(ds, used);
122 ds->ds_phys->ds_used_bytes += used;
123 ds->ds_phys->ds_compressed_bytes += compressed;
124 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
125 ds->ds_phys->ds_unique_bytes += used;
126 mutex_exit(&ds->ds_lock);
127 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
128 compressed, uncompressed, tx);
129 dsl_dir_transfer_space(ds->ds_dir, used - delta,
130 DD_USED_REFRSRV, DD_USED_HEAD, tx);
131 mutex_exit(&ds->ds_dir->dd_lock);
132 }
133
134 int
135 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
136 boolean_t async)
137 {
138 int used, compressed, uncompressed;
139
140 if (BP_IS_HOLE(bp))
141 return (0);
142
143 ASSERT(dmu_tx_is_syncing(tx));
144 ASSERT(bp->blk_birth <= tx->tx_txg);
145
146 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
147 compressed = BP_GET_PSIZE(bp);
148 uncompressed = BP_GET_UCSIZE(bp);
149
150 ASSERT(used > 0);
151 if (ds == NULL) {
152 /*
153 * Account for the meta-objset space in its placeholder
154 * dataset.
155 */
156 dsl_free(tx->tx_pool, tx->tx_txg, bp);
157
158 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
159 -used, -compressed, -uncompressed, tx);
160 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
161 return (used);
162 }
163 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
164
165 ASSERT(!dsl_dataset_is_snapshot(ds));
166 dmu_buf_will_dirty(ds->ds_dbuf, tx);
167
168 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
169 int64_t delta;
170
171 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
172 dsl_free(tx->tx_pool, tx->tx_txg, bp);
173
174 mutex_enter(&ds->ds_dir->dd_lock);
175 mutex_enter(&ds->ds_lock);
176 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
177 !DS_UNIQUE_IS_ACCURATE(ds));
178 delta = parent_delta(ds, -used);
179 ds->ds_phys->ds_unique_bytes -= used;
180 mutex_exit(&ds->ds_lock);
181 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
182 delta, -compressed, -uncompressed, tx);
183 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
184 DD_USED_REFRSRV, DD_USED_HEAD, tx);
185 mutex_exit(&ds->ds_dir->dd_lock);
186 } else {
187 dprintf_bp(bp, "putting on dead list: %s", "");
188 if (async) {
189 /*
190 * We are here as part of zio's write done callback,
191 * which means we're a zio interrupt thread. We can't
192 * call dsl_deadlist_insert() now because it may block
193 * waiting for I/O. Instead, put bp on the deferred
194 * queue and let dsl_pool_sync() finish the job.
195 */
196 bplist_append(&ds->ds_pending_deadlist, bp);
197 } else {
198 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
199 }
200 ASSERT3U(ds->ds_prev->ds_object, ==,
201 ds->ds_phys->ds_prev_snap_obj);
202 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
203 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
204 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
205 ds->ds_object && bp->blk_birth >
206 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
207 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
208 mutex_enter(&ds->ds_prev->ds_lock);
209 ds->ds_prev->ds_phys->ds_unique_bytes += used;
210 mutex_exit(&ds->ds_prev->ds_lock);
211 }
212 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
213 dsl_dir_transfer_space(ds->ds_dir, used,
214 DD_USED_HEAD, DD_USED_SNAP, tx);
215 }
216 }
217 mutex_enter(&ds->ds_lock);
218 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
219 ds->ds_phys->ds_used_bytes -= used;
220 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
221 ds->ds_phys->ds_compressed_bytes -= compressed;
222 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
223 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
224 mutex_exit(&ds->ds_lock);
225
226 return (used);
227 }
228
229 uint64_t
230 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
231 {
232 uint64_t trysnap = 0;
233
234 if (ds == NULL)
235 return (0);
236 /*
237 * The snapshot creation could fail, but that would cause an
238 * incorrect FALSE return, which would only result in an
239 * overestimation of the amount of space that an operation would
240 * consume, which is OK.
241 *
242 * There's also a small window where we could miss a pending
243 * snapshot, because we could set the sync task in the quiescing
244 * phase. So this should only be used as a guess.
245 */
246 if (ds->ds_trysnap_txg >
247 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
248 trysnap = ds->ds_trysnap_txg;
249 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
250 }
251
252 boolean_t
253 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
254 uint64_t blk_birth)
255 {
256 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
257 return (B_FALSE);
258
259 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
260
261 return (B_TRUE);
262 }
263
264 /* ARGSUSED */
265 static void
266 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
267 {
268 dsl_dataset_t *ds = dsv;
269
270 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
271
272 unique_remove(ds->ds_fsid_guid);
273
274 if (ds->ds_objset != NULL)
275 dmu_objset_evict(ds->ds_objset);
276
277 if (ds->ds_prev) {
278 dsl_dataset_drop_ref(ds->ds_prev, ds);
279 ds->ds_prev = NULL;
280 }
281
282 bplist_destroy(&ds->ds_pending_deadlist);
283 if (db != NULL) {
284 dsl_deadlist_close(&ds->ds_deadlist);
285 } else {
286 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
287 ASSERT(!ds->ds_deadlist.dl_oldfmt);
288 }
289 if (ds->ds_dir)
290 dsl_dir_close(ds->ds_dir, ds);
291
292 ASSERT(!list_link_active(&ds->ds_synced_link));
293
294 mutex_destroy(&ds->ds_lock);
295 mutex_destroy(&ds->ds_recvlock);
296 mutex_destroy(&ds->ds_opening_lock);
297 rw_destroy(&ds->ds_rwlock);
298 cv_destroy(&ds->ds_exclusive_cv);
299
300 kmem_free(ds, sizeof (dsl_dataset_t));
301 }
302
303 static int
304 dsl_dataset_get_snapname(dsl_dataset_t *ds)
305 {
306 dsl_dataset_phys_t *headphys;
307 int err;
308 dmu_buf_t *headdbuf;
309 dsl_pool_t *dp = ds->ds_dir->dd_pool;
310 objset_t *mos = dp->dp_meta_objset;
311
312 if (ds->ds_snapname[0])
313 return (0);
314 if (ds->ds_phys->ds_next_snap_obj == 0)
315 return (0);
316
317 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
318 FTAG, &headdbuf);
319 if (err)
320 return (err);
321 headphys = headdbuf->db_data;
322 err = zap_value_search(dp->dp_meta_objset,
323 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
324 dmu_buf_rele(headdbuf, FTAG);
325 return (err);
326 }
327
328 static int
329 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
330 {
331 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
332 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
333 matchtype_t mt;
334 int err;
335
336 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
337 mt = MT_FIRST;
338 else
339 mt = MT_EXACT;
340
341 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
342 value, mt, NULL, 0, NULL);
343 if (err == ENOTSUP && mt == MT_FIRST)
344 err = zap_lookup(mos, snapobj, name, 8, 1, value);
345 return (err);
346 }
347
348 static int
349 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
350 {
351 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
352 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
353 matchtype_t mt;
354 int err;
355
356 dsl_dir_snap_cmtime_update(ds->ds_dir);
357
358 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
359 mt = MT_FIRST;
360 else
361 mt = MT_EXACT;
362
363 err = zap_remove_norm(mos, snapobj, name, mt, tx);
364 if (err == ENOTSUP && mt == MT_FIRST)
365 err = zap_remove(mos, snapobj, name, tx);
366 return (err);
367 }
368
369 static int
370 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
371 dsl_dataset_t **dsp)
372 {
373 objset_t *mos = dp->dp_meta_objset;
374 dmu_buf_t *dbuf;
375 dsl_dataset_t *ds;
376 int err;
377 dmu_object_info_t doi;
378
379 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
380 dsl_pool_sync_context(dp));
381
382 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
383 if (err)
384 return (err);
385
386 /* Make sure dsobj has the correct object type. */
387 dmu_object_info_from_db(dbuf, &doi);
388 if (doi.doi_type != DMU_OT_DSL_DATASET)
389 return (EINVAL);
390
391 ds = dmu_buf_get_user(dbuf);
392 if (ds == NULL) {
393 dsl_dataset_t *winner = NULL;
394
395 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_PUSHPAGE);
396 ds->ds_dbuf = dbuf;
397 ds->ds_object = dsobj;
398 ds->ds_phys = dbuf->db_data;
399 list_link_init(&ds->ds_synced_link);
400
401 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
402 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
403 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
404 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
405
406 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
407 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
408
409 bplist_create(&ds->ds_pending_deadlist);
410 dsl_deadlist_open(&ds->ds_deadlist,
411 mos, ds->ds_phys->ds_deadlist_obj);
412
413 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
414 offsetof(dmu_sendarg_t, dsa_link));
415
416 if (err == 0) {
417 err = dsl_dir_open_obj(dp,
418 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
419 }
420 if (err) {
421 mutex_destroy(&ds->ds_lock);
422 mutex_destroy(&ds->ds_recvlock);
423 mutex_destroy(&ds->ds_opening_lock);
424 rw_destroy(&ds->ds_rwlock);
425 cv_destroy(&ds->ds_exclusive_cv);
426 bplist_destroy(&ds->ds_pending_deadlist);
427 dsl_deadlist_close(&ds->ds_deadlist);
428 kmem_free(ds, sizeof (dsl_dataset_t));
429 dmu_buf_rele(dbuf, tag);
430 return (err);
431 }
432
433 if (!dsl_dataset_is_snapshot(ds)) {
434 ds->ds_snapname[0] = '\0';
435 if (ds->ds_phys->ds_prev_snap_obj) {
436 err = dsl_dataset_get_ref(dp,
437 ds->ds_phys->ds_prev_snap_obj,
438 ds, &ds->ds_prev);
439 }
440 } else {
441 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
442 err = dsl_dataset_get_snapname(ds);
443 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
444 err = zap_count(
445 ds->ds_dir->dd_pool->dp_meta_objset,
446 ds->ds_phys->ds_userrefs_obj,
447 &ds->ds_userrefs);
448 }
449 }
450
451 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
452 /*
453 * In sync context, we're called with either no lock
454 * or with the write lock. If we're not syncing,
455 * we're always called with the read lock held.
456 */
457 boolean_t need_lock =
458 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
459 dsl_pool_sync_context(dp);
460
461 if (need_lock)
462 rw_enter(&dp->dp_config_rwlock, RW_READER);
463
464 err = dsl_prop_get_ds(ds,
465 "refreservation", sizeof (uint64_t), 1,
466 &ds->ds_reserved, NULL);
467 if (err == 0) {
468 err = dsl_prop_get_ds(ds,
469 "refquota", sizeof (uint64_t), 1,
470 &ds->ds_quota, NULL);
471 }
472
473 if (need_lock)
474 rw_exit(&dp->dp_config_rwlock);
475 } else {
476 ds->ds_reserved = ds->ds_quota = 0;
477 }
478
479 if (err == 0) {
480 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
481 dsl_dataset_evict);
482 }
483 if (err || winner) {
484 bplist_destroy(&ds->ds_pending_deadlist);
485 dsl_deadlist_close(&ds->ds_deadlist);
486 if (ds->ds_prev)
487 dsl_dataset_drop_ref(ds->ds_prev, ds);
488 dsl_dir_close(ds->ds_dir, ds);
489 mutex_destroy(&ds->ds_lock);
490 mutex_destroy(&ds->ds_recvlock);
491 mutex_destroy(&ds->ds_opening_lock);
492 rw_destroy(&ds->ds_rwlock);
493 cv_destroy(&ds->ds_exclusive_cv);
494 kmem_free(ds, sizeof (dsl_dataset_t));
495 if (err) {
496 dmu_buf_rele(dbuf, tag);
497 return (err);
498 }
499 ds = winner;
500 } else {
501 ds->ds_fsid_guid =
502 unique_insert(ds->ds_phys->ds_fsid_guid);
503 }
504 }
505 ASSERT3P(ds->ds_dbuf, ==, dbuf);
506 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
507 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
508 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
509 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
510 mutex_enter(&ds->ds_lock);
511 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
512 mutex_exit(&ds->ds_lock);
513 dmu_buf_rele(ds->ds_dbuf, tag);
514 return (ENOENT);
515 }
516 mutex_exit(&ds->ds_lock);
517 *dsp = ds;
518 return (0);
519 }
520
521 static int
522 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
523 {
524 dsl_pool_t *dp = ds->ds_dir->dd_pool;
525
526 /*
527 * In syncing context we don't want the rwlock lock: there
528 * may be an existing writer waiting for sync phase to
529 * finish. We don't need to worry about such writers, since
530 * sync phase is single-threaded, so the writer can't be
531 * doing anything while we are active.
532 */
533 if (dsl_pool_sync_context(dp)) {
534 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
535 return (0);
536 }
537
538 /*
539 * Normal users will hold the ds_rwlock as a READER until they
540 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
541 * drop their READER lock after they set the ds_owner field.
542 *
543 * If the dataset is being destroyed, the destroy thread will
544 * obtain a WRITER lock for exclusive access after it's done its
545 * open-context work and then change the ds_owner to
546 * dsl_reaper once destruction is assured. So threads
547 * may block here temporarily, until the "destructability" of
548 * the dataset is determined.
549 */
550 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
551 mutex_enter(&ds->ds_lock);
552 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
553 rw_exit(&dp->dp_config_rwlock);
554 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
555 if (DSL_DATASET_IS_DESTROYED(ds)) {
556 mutex_exit(&ds->ds_lock);
557 dsl_dataset_drop_ref(ds, tag);
558 rw_enter(&dp->dp_config_rwlock, RW_READER);
559 return (ENOENT);
560 }
561 /*
562 * The dp_config_rwlock lives above the ds_lock. And
563 * we need to check DSL_DATASET_IS_DESTROYED() while
564 * holding the ds_lock, so we have to drop and reacquire
565 * the ds_lock here.
566 */
567 mutex_exit(&ds->ds_lock);
568 rw_enter(&dp->dp_config_rwlock, RW_READER);
569 mutex_enter(&ds->ds_lock);
570 }
571 mutex_exit(&ds->ds_lock);
572 return (0);
573 }
574
575 int
576 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
577 dsl_dataset_t **dsp)
578 {
579 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
580
581 if (err)
582 return (err);
583 return (dsl_dataset_hold_ref(*dsp, tag));
584 }
585
586 int
587 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
588 void *tag, dsl_dataset_t **dsp)
589 {
590 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
591 if (err)
592 return (err);
593 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
594 dsl_dataset_rele(*dsp, tag);
595 *dsp = NULL;
596 return (EBUSY);
597 }
598 return (0);
599 }
600
601 int
602 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
603 {
604 dsl_dir_t *dd;
605 dsl_pool_t *dp;
606 const char *snapname;
607 uint64_t obj;
608 int err = 0;
609
610 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
611 if (err)
612 return (err);
613
614 dp = dd->dd_pool;
615 obj = dd->dd_phys->dd_head_dataset_obj;
616 rw_enter(&dp->dp_config_rwlock, RW_READER);
617 if (obj)
618 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
619 else
620 err = ENOENT;
621 if (err)
622 goto out;
623
624 err = dsl_dataset_hold_ref(*dsp, tag);
625
626 /* we may be looking for a snapshot */
627 if (err == 0 && snapname != NULL) {
628 dsl_dataset_t *ds = NULL;
629
630 if (*snapname++ != '@') {
631 dsl_dataset_rele(*dsp, tag);
632 err = ENOENT;
633 goto out;
634 }
635
636 dprintf("looking for snapshot '%s'\n", snapname);
637 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
638 if (err == 0)
639 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
640 dsl_dataset_rele(*dsp, tag);
641
642 ASSERT3U((err == 0), ==, (ds != NULL));
643
644 if (ds) {
645 mutex_enter(&ds->ds_lock);
646 if (ds->ds_snapname[0] == 0)
647 (void) strlcpy(ds->ds_snapname, snapname,
648 sizeof (ds->ds_snapname));
649 mutex_exit(&ds->ds_lock);
650 err = dsl_dataset_hold_ref(ds, tag);
651 *dsp = err ? NULL : ds;
652 }
653 }
654 out:
655 rw_exit(&dp->dp_config_rwlock);
656 dsl_dir_close(dd, FTAG);
657 return (err);
658 }
659
660 int
661 dsl_dataset_own(const char *name, boolean_t inconsistentok,
662 void *tag, dsl_dataset_t **dsp)
663 {
664 int err = dsl_dataset_hold(name, tag, dsp);
665 if (err)
666 return (err);
667 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
668 dsl_dataset_rele(*dsp, tag);
669 return (EBUSY);
670 }
671 return (0);
672 }
673
674 void
675 dsl_dataset_name(dsl_dataset_t *ds, char *name)
676 {
677 if (ds == NULL) {
678 (void) strcpy(name, "mos");
679 } else {
680 dsl_dir_name(ds->ds_dir, name);
681 VERIFY(0 == dsl_dataset_get_snapname(ds));
682 if (ds->ds_snapname[0]) {
683 (void) strcat(name, "@");
684 /*
685 * We use a "recursive" mutex so that we
686 * can call dprintf_ds() with ds_lock held.
687 */
688 if (!MUTEX_HELD(&ds->ds_lock)) {
689 mutex_enter(&ds->ds_lock);
690 (void) strcat(name, ds->ds_snapname);
691 mutex_exit(&ds->ds_lock);
692 } else {
693 (void) strcat(name, ds->ds_snapname);
694 }
695 }
696 }
697 }
698
699 static int
700 dsl_dataset_namelen(dsl_dataset_t *ds)
701 {
702 int result;
703
704 if (ds == NULL) {
705 result = 3; /* "mos" */
706 } else {
707 result = dsl_dir_namelen(ds->ds_dir);
708 VERIFY(0 == dsl_dataset_get_snapname(ds));
709 if (ds->ds_snapname[0]) {
710 ++result; /* adding one for the @-sign */
711 if (!MUTEX_HELD(&ds->ds_lock)) {
712 mutex_enter(&ds->ds_lock);
713 result += strlen(ds->ds_snapname);
714 mutex_exit(&ds->ds_lock);
715 } else {
716 result += strlen(ds->ds_snapname);
717 }
718 }
719 }
720
721 return (result);
722 }
723
724 void
725 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
726 {
727 dmu_buf_rele(ds->ds_dbuf, tag);
728 }
729
730 void
731 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
732 {
733 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
734 rw_exit(&ds->ds_rwlock);
735 }
736 dsl_dataset_drop_ref(ds, tag);
737 }
738
739 void
740 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
741 {
742 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
743 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
744
745 mutex_enter(&ds->ds_lock);
746 ds->ds_owner = NULL;
747 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
748 rw_exit(&ds->ds_rwlock);
749 cv_broadcast(&ds->ds_exclusive_cv);
750 }
751 mutex_exit(&ds->ds_lock);
752 if (ds->ds_dbuf)
753 dsl_dataset_drop_ref(ds, tag);
754 else
755 dsl_dataset_evict(NULL, ds);
756 }
757
758 boolean_t
759 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
760 {
761 boolean_t gotit = FALSE;
762
763 mutex_enter(&ds->ds_lock);
764 if (ds->ds_owner == NULL &&
765 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
766 ds->ds_owner = tag;
767 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
768 rw_exit(&ds->ds_rwlock);
769 gotit = TRUE;
770 }
771 mutex_exit(&ds->ds_lock);
772 return (gotit);
773 }
774
775 void
776 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
777 {
778 ASSERT3P(owner, ==, ds->ds_owner);
779 if (!RW_WRITE_HELD(&ds->ds_rwlock))
780 rw_enter(&ds->ds_rwlock, RW_WRITER);
781 }
782
783 uint64_t
784 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
785 uint64_t flags, dmu_tx_t *tx)
786 {
787 dsl_pool_t *dp = dd->dd_pool;
788 dmu_buf_t *dbuf;
789 dsl_dataset_phys_t *dsphys;
790 uint64_t dsobj;
791 objset_t *mos = dp->dp_meta_objset;
792
793 if (origin == NULL)
794 origin = dp->dp_origin_snap;
795
796 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
797 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
798 ASSERT(dmu_tx_is_syncing(tx));
799 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
800
801 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
802 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
803 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
804 dmu_buf_will_dirty(dbuf, tx);
805 dsphys = dbuf->db_data;
806 bzero(dsphys, sizeof (dsl_dataset_phys_t));
807 dsphys->ds_dir_obj = dd->dd_object;
808 dsphys->ds_flags = flags;
809 dsphys->ds_fsid_guid = unique_create();
810 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
811 sizeof (dsphys->ds_guid));
812 dsphys->ds_snapnames_zapobj =
813 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
814 DMU_OT_NONE, 0, tx);
815 dsphys->ds_creation_time = gethrestime_sec();
816 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
817
818 if (origin == NULL) {
819 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
820 } else {
821 dsl_dataset_t *ohds;
822
823 dsphys->ds_prev_snap_obj = origin->ds_object;
824 dsphys->ds_prev_snap_txg =
825 origin->ds_phys->ds_creation_txg;
826 dsphys->ds_used_bytes =
827 origin->ds_phys->ds_used_bytes;
828 dsphys->ds_compressed_bytes =
829 origin->ds_phys->ds_compressed_bytes;
830 dsphys->ds_uncompressed_bytes =
831 origin->ds_phys->ds_uncompressed_bytes;
832 dsphys->ds_bp = origin->ds_phys->ds_bp;
833 dsphys->ds_flags |= origin->ds_phys->ds_flags;
834
835 dmu_buf_will_dirty(origin->ds_dbuf, tx);
836 origin->ds_phys->ds_num_children++;
837
838 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
839 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
840 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
841 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
842 dsl_dataset_rele(ohds, FTAG);
843
844 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
845 if (origin->ds_phys->ds_next_clones_obj == 0) {
846 origin->ds_phys->ds_next_clones_obj =
847 zap_create(mos,
848 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
849 }
850 VERIFY(0 == zap_add_int(mos,
851 origin->ds_phys->ds_next_clones_obj,
852 dsobj, tx));
853 }
854
855 dmu_buf_will_dirty(dd->dd_dbuf, tx);
856 dd->dd_phys->dd_origin_obj = origin->ds_object;
857 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
858 if (origin->ds_dir->dd_phys->dd_clones == 0) {
859 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
860 origin->ds_dir->dd_phys->dd_clones =
861 zap_create(mos,
862 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
863 }
864 VERIFY3U(0, ==, zap_add_int(mos,
865 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
866 }
867 }
868
869 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
870 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
871
872 dmu_buf_rele(dbuf, FTAG);
873
874 dmu_buf_will_dirty(dd->dd_dbuf, tx);
875 dd->dd_phys->dd_head_dataset_obj = dsobj;
876
877 return (dsobj);
878 }
879
880 uint64_t
881 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
882 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
883 {
884 dsl_pool_t *dp = pdd->dd_pool;
885 uint64_t dsobj, ddobj;
886 dsl_dir_t *dd;
887
888 ASSERT(lastname[0] != '@');
889
890 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
891 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
892
893 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
894
895 dsl_deleg_set_create_perms(dd, tx, cr);
896
897 dsl_dir_close(dd, FTAG);
898
899 /*
900 * If we are creating a clone, make sure we zero out any stale
901 * data from the origin snapshots zil header.
902 */
903 if (origin != NULL) {
904 dsl_dataset_t *ds;
905 objset_t *os;
906
907 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
908 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
909 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
910 dsl_dataset_dirty(ds, tx);
911 dsl_dataset_rele(ds, FTAG);
912 }
913
914 return (dsobj);
915 }
916
917 /*
918 * The snapshots must all be in the same pool.
919 */
920 int
921 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
922 {
923 int err;
924 dsl_sync_task_t *dst;
925 spa_t *spa;
926 nvpair_t *pair;
927 dsl_sync_task_group_t *dstg;
928
929 pair = nvlist_next_nvpair(snaps, NULL);
930 if (pair == NULL)
931 return (0);
932
933 err = spa_open(nvpair_name(pair), &spa, FTAG);
934 if (err)
935 return (err);
936 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
937
938 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
939 pair = nvlist_next_nvpair(snaps, pair)) {
940 dsl_dataset_t *ds;
941 int err;
942
943 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
944 if (err == 0) {
945 struct dsl_ds_destroyarg *dsda;
946
947 dsl_dataset_make_exclusive(ds, dstg);
948 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
949 KM_SLEEP);
950 dsda->ds = ds;
951 dsda->defer = defer;
952 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
953 dsl_dataset_destroy_sync, dsda, dstg, 0);
954 } else if (err == ENOENT) {
955 err = 0;
956 } else {
957 (void) strcpy(failed, nvpair_name(pair));
958 break;
959 }
960 }
961
962 if (err == 0)
963 err = dsl_sync_task_group_wait(dstg);
964
965 for (dst = list_head(&dstg->dstg_tasks); dst;
966 dst = list_next(&dstg->dstg_tasks, dst)) {
967 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
968 dsl_dataset_t *ds = dsda->ds;
969
970 /*
971 * Return the file system name that triggered the error
972 */
973 if (dst->dst_err) {
974 dsl_dataset_name(ds, failed);
975 }
976 ASSERT3P(dsda->rm_origin, ==, NULL);
977 dsl_dataset_disown(ds, dstg);
978 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
979 }
980
981 dsl_sync_task_group_destroy(dstg);
982 spa_close(spa, FTAG);
983 return (err);
984
985 }
986
987 static boolean_t
988 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
989 {
990 boolean_t might_destroy = B_FALSE;
991
992 mutex_enter(&ds->ds_lock);
993 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
994 DS_IS_DEFER_DESTROY(ds))
995 might_destroy = B_TRUE;
996 mutex_exit(&ds->ds_lock);
997
998 return (might_destroy);
999 }
1000
1001 /*
1002 * If we're removing a clone, and these three conditions are true:
1003 * 1) the clone's origin has no other children
1004 * 2) the clone's origin has no user references
1005 * 3) the clone's origin has been marked for deferred destruction
1006 * Then, prepare to remove the origin as part of this sync task group.
1007 */
1008 static int
1009 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1010 {
1011 dsl_dataset_t *ds = dsda->ds;
1012 dsl_dataset_t *origin = ds->ds_prev;
1013
1014 if (dsl_dataset_might_destroy_origin(origin)) {
1015 char *name;
1016 int namelen;
1017 int error;
1018
1019 namelen = dsl_dataset_namelen(origin) + 1;
1020 name = kmem_alloc(namelen, KM_SLEEP);
1021 dsl_dataset_name(origin, name);
1022 #ifdef _KERNEL
1023 error = zfs_unmount_snap(name, NULL);
1024 if (error) {
1025 kmem_free(name, namelen);
1026 return (error);
1027 }
1028 #endif
1029 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1030 kmem_free(name, namelen);
1031 if (error)
1032 return (error);
1033 dsda->rm_origin = origin;
1034 dsl_dataset_make_exclusive(origin, tag);
1035 }
1036
1037 return (0);
1038 }
1039
1040 /*
1041 * ds must be opened as OWNER. On return (whether successful or not),
1042 * ds will be closed and caller can no longer dereference it.
1043 */
1044 int
1045 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1046 {
1047 int err;
1048 dsl_sync_task_group_t *dstg;
1049 objset_t *os;
1050 dsl_dir_t *dd;
1051 uint64_t obj;
1052 struct dsl_ds_destroyarg dsda = { 0 };
1053 dsl_dataset_t *dummy_ds;
1054
1055 dsda.ds = ds;
1056
1057 if (dsl_dataset_is_snapshot(ds)) {
1058 /* Destroying a snapshot is simpler */
1059 dsl_dataset_make_exclusive(ds, tag);
1060
1061 dsda.defer = defer;
1062 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1063 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1064 &dsda, tag, 0);
1065 ASSERT3P(dsda.rm_origin, ==, NULL);
1066 goto out;
1067 } else if (defer) {
1068 err = EINVAL;
1069 goto out;
1070 }
1071
1072 dd = ds->ds_dir;
1073 dummy_ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
1074 dummy_ds->ds_dir = dd;
1075 dummy_ds->ds_object = ds->ds_object;
1076
1077 /*
1078 * Check for errors and mark this ds as inconsistent, in
1079 * case we crash while freeing the objects.
1080 */
1081 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1082 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1083 if (err)
1084 goto out_free;
1085
1086 err = dmu_objset_from_ds(ds, &os);
1087 if (err)
1088 goto out_free;
1089
1090 /*
1091 * remove the objects in open context, so that we won't
1092 * have too much to do in syncing context.
1093 */
1094 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1095 ds->ds_phys->ds_prev_snap_txg)) {
1096 /*
1097 * Ignore errors, if there is not enough disk space
1098 * we will deal with it in dsl_dataset_destroy_sync().
1099 */
1100 (void) dmu_free_object(os, obj);
1101 }
1102 if (err != ESRCH)
1103 goto out_free;
1104
1105 /*
1106 * Only the ZIL knows how to free log blocks.
1107 */
1108 zil_destroy(dmu_objset_zil(os), B_FALSE);
1109
1110 /*
1111 * Sync out all in-flight IO.
1112 */
1113 txg_wait_synced(dd->dd_pool, 0);
1114
1115 /*
1116 * If we managed to free all the objects in open
1117 * context, the user space accounting should be zero.
1118 */
1119 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1120 dmu_objset_userused_enabled(os)) {
1121 ASSERTV(uint64_t count);
1122 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1123 count == 0);
1124 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1125 count == 0);
1126 }
1127
1128 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1129 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1130 rw_exit(&dd->dd_pool->dp_config_rwlock);
1131
1132 if (err)
1133 goto out_free;
1134
1135 /*
1136 * Blow away the dsl_dir + head dataset.
1137 */
1138 dsl_dataset_make_exclusive(ds, tag);
1139 /*
1140 * If we're removing a clone, we might also need to remove its
1141 * origin.
1142 */
1143 do {
1144 dsda.need_prep = B_FALSE;
1145 if (dsl_dir_is_clone(dd)) {
1146 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1147 if (err) {
1148 dsl_dir_close(dd, FTAG);
1149 goto out_free;
1150 }
1151 }
1152
1153 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1154 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1155 dsl_dataset_destroy_sync, &dsda, tag, 0);
1156 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1157 dsl_dir_destroy_sync, dummy_ds, FTAG, 0);
1158 err = dsl_sync_task_group_wait(dstg);
1159 dsl_sync_task_group_destroy(dstg);
1160
1161 /*
1162 * We could be racing against 'zfs release' or 'zfs destroy -d'
1163 * on the origin snap, in which case we can get EBUSY if we
1164 * needed to destroy the origin snap but were not ready to
1165 * do so.
1166 */
1167 if (dsda.need_prep) {
1168 ASSERT(err == EBUSY);
1169 ASSERT(dsl_dir_is_clone(dd));
1170 ASSERT(dsda.rm_origin == NULL);
1171 }
1172 } while (dsda.need_prep);
1173
1174 if (dsda.rm_origin != NULL)
1175 dsl_dataset_disown(dsda.rm_origin, tag);
1176
1177 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1178 if (err)
1179 dsl_dir_close(dd, FTAG);
1180
1181 out_free:
1182 kmem_free(dummy_ds, sizeof (dsl_dataset_t));
1183 out:
1184 dsl_dataset_disown(ds, tag);
1185 return (err);
1186 }
1187
1188 blkptr_t *
1189 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1190 {
1191 return (&ds->ds_phys->ds_bp);
1192 }
1193
1194 void
1195 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1196 {
1197 ASSERT(dmu_tx_is_syncing(tx));
1198 /* If it's the meta-objset, set dp_meta_rootbp */
1199 if (ds == NULL) {
1200 tx->tx_pool->dp_meta_rootbp = *bp;
1201 } else {
1202 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1203 ds->ds_phys->ds_bp = *bp;
1204 }
1205 }
1206
1207 spa_t *
1208 dsl_dataset_get_spa(dsl_dataset_t *ds)
1209 {
1210 return (ds->ds_dir->dd_pool->dp_spa);
1211 }
1212
1213 void
1214 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1215 {
1216 dsl_pool_t *dp;
1217
1218 if (ds == NULL) /* this is the meta-objset */
1219 return;
1220
1221 ASSERT(ds->ds_objset != NULL);
1222
1223 if (ds->ds_phys->ds_next_snap_obj != 0)
1224 panic("dirtying snapshot!");
1225
1226 dp = ds->ds_dir->dd_pool;
1227
1228 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1229 /* up the hold count until we can be written out */
1230 dmu_buf_add_ref(ds->ds_dbuf, ds);
1231 }
1232 }
1233
1234 boolean_t
1235 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1236 {
1237 int t;
1238
1239 for (t = 0; t < TXG_SIZE; t++) {
1240 if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1241 ds, t))
1242 return (B_TRUE);
1243 }
1244 return (B_FALSE);
1245 }
1246
1247 /*
1248 * The unique space in the head dataset can be calculated by subtracting
1249 * the space used in the most recent snapshot, that is still being used
1250 * in this file system, from the space currently in use. To figure out
1251 * the space in the most recent snapshot still in use, we need to take
1252 * the total space used in the snapshot and subtract out the space that
1253 * has been freed up since the snapshot was taken.
1254 */
1255 static void
1256 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1257 {
1258 uint64_t mrs_used;
1259 uint64_t dlused, dlcomp, dluncomp;
1260
1261 ASSERT(!dsl_dataset_is_snapshot(ds));
1262
1263 if (ds->ds_phys->ds_prev_snap_obj != 0)
1264 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1265 else
1266 mrs_used = 0;
1267
1268 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1269
1270 ASSERT3U(dlused, <=, mrs_used);
1271 ds->ds_phys->ds_unique_bytes =
1272 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1273
1274 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1275 SPA_VERSION_UNIQUE_ACCURATE)
1276 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1277 }
1278
1279 struct killarg {
1280 dsl_dataset_t *ds;
1281 dmu_tx_t *tx;
1282 };
1283
1284 /* ARGSUSED */
1285 static int
1286 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1287 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1288 {
1289 struct killarg *ka = arg;
1290 dmu_tx_t *tx = ka->tx;
1291
1292 if (bp == NULL)
1293 return (0);
1294
1295 if (zb->zb_level == ZB_ZIL_LEVEL) {
1296 ASSERT(zilog != NULL);
1297 /*
1298 * It's a block in the intent log. It has no
1299 * accounting, so just free it.
1300 */
1301 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1302 } else {
1303 ASSERT(zilog == NULL);
1304 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1305 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1306 }
1307
1308 return (0);
1309 }
1310
1311 /* ARGSUSED */
1312 static int
1313 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1314 {
1315 dsl_dataset_t *ds = arg1;
1316 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1317 uint64_t count;
1318 int err;
1319
1320 /*
1321 * Can't delete a head dataset if there are snapshots of it.
1322 * (Except if the only snapshots are from the branch we cloned
1323 * from.)
1324 */
1325 if (ds->ds_prev != NULL &&
1326 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1327 return (EBUSY);
1328
1329 /*
1330 * This is really a dsl_dir thing, but check it here so that
1331 * we'll be less likely to leave this dataset inconsistent &
1332 * nearly destroyed.
1333 */
1334 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1335 if (err)
1336 return (err);
1337 if (count != 0)
1338 return (EEXIST);
1339
1340 return (0);
1341 }
1342
1343 /* ARGSUSED */
1344 static void
1345 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1346 {
1347 dsl_dataset_t *ds = arg1;
1348 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1349
1350 /* Mark it as inconsistent on-disk, in case we crash */
1351 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1352 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1353
1354 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1355 "dataset = %llu", ds->ds_object);
1356 }
1357
1358 static int
1359 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1360 dmu_tx_t *tx)
1361 {
1362 dsl_dataset_t *ds = dsda->ds;
1363 dsl_dataset_t *ds_prev = ds->ds_prev;
1364
1365 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1366 struct dsl_ds_destroyarg ndsda = {0};
1367
1368 /*
1369 * If we're not prepared to remove the origin, don't remove
1370 * the clone either.
1371 */
1372 if (dsda->rm_origin == NULL) {
1373 dsda->need_prep = B_TRUE;
1374 return (EBUSY);
1375 }
1376
1377 ndsda.ds = ds_prev;
1378 ndsda.is_origin_rm = B_TRUE;
1379 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1380 }
1381
1382 /*
1383 * If we're not going to remove the origin after all,
1384 * undo the open context setup.
1385 */
1386 if (dsda->rm_origin != NULL) {
1387 dsl_dataset_disown(dsda->rm_origin, tag);
1388 dsda->rm_origin = NULL;
1389 }
1390
1391 return (0);
1392 }
1393
1394 /*
1395 * If you add new checks here, you may need to add
1396 * additional checks to the "temporary" case in
1397 * snapshot_check() in dmu_objset.c.
1398 */
1399 /* ARGSUSED */
1400 int
1401 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1402 {
1403 struct dsl_ds_destroyarg *dsda = arg1;
1404 dsl_dataset_t *ds = dsda->ds;
1405
1406 /* we have an owner hold, so noone else can destroy us */
1407 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1408
1409 /*
1410 * Only allow deferred destroy on pools that support it.
1411 * NOTE: deferred destroy is only supported on snapshots.
1412 */
1413 if (dsda->defer) {
1414 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1415 SPA_VERSION_USERREFS)
1416 return (ENOTSUP);
1417 ASSERT(dsl_dataset_is_snapshot(ds));
1418 return (0);
1419 }
1420
1421 /*
1422 * Can't delete a head dataset if there are snapshots of it.
1423 * (Except if the only snapshots are from the branch we cloned
1424 * from.)
1425 */
1426 if (ds->ds_prev != NULL &&
1427 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1428 return (EBUSY);
1429
1430 /*
1431 * If we made changes this txg, traverse_dsl_dataset won't find
1432 * them. Try again.
1433 */
1434 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1435 return (EAGAIN);
1436
1437 if (dsl_dataset_is_snapshot(ds)) {
1438 /*
1439 * If this snapshot has an elevated user reference count,
1440 * we can't destroy it yet.
1441 */
1442 if (ds->ds_userrefs > 0 && !dsda->releasing)
1443 return (EBUSY);
1444
1445 mutex_enter(&ds->ds_lock);
1446 /*
1447 * Can't delete a branch point. However, if we're destroying
1448 * a clone and removing its origin due to it having a user
1449 * hold count of 0 and having been marked for deferred destroy,
1450 * it's OK for the origin to have a single clone.
1451 */
1452 if (ds->ds_phys->ds_num_children >
1453 (dsda->is_origin_rm ? 2 : 1)) {
1454 mutex_exit(&ds->ds_lock);
1455 return (EEXIST);
1456 }
1457 mutex_exit(&ds->ds_lock);
1458 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1459 return (dsl_dataset_origin_check(dsda, arg2, tx));
1460 }
1461
1462 /* XXX we should do some i/o error checking... */
1463 return (0);
1464 }
1465
1466 struct refsarg {
1467 kmutex_t lock;
1468 boolean_t gone;
1469 kcondvar_t cv;
1470 };
1471
1472 /* ARGSUSED */
1473 static void
1474 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1475 {
1476 struct refsarg *arg = argv;
1477
1478 mutex_enter(&arg->lock);
1479 arg->gone = TRUE;
1480 cv_signal(&arg->cv);
1481 mutex_exit(&arg->lock);
1482 }
1483
1484 static void
1485 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1486 {
1487 struct refsarg arg;
1488
1489 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1490 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1491 arg.gone = FALSE;
1492 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1493 dsl_dataset_refs_gone);
1494 dmu_buf_rele(ds->ds_dbuf, tag);
1495 mutex_enter(&arg.lock);
1496 while (!arg.gone)
1497 cv_wait(&arg.cv, &arg.lock);
1498 ASSERT(arg.gone);
1499 mutex_exit(&arg.lock);
1500 ds->ds_dbuf = NULL;
1501 ds->ds_phys = NULL;
1502 mutex_destroy(&arg.lock);
1503 cv_destroy(&arg.cv);
1504 }
1505
1506 static void
1507 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1508 {
1509 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1510 int err;
1511 ASSERTV(uint64_t count);
1512
1513 ASSERT(ds->ds_phys->ds_num_children >= 2);
1514 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1515 /*
1516 * The err should not be ENOENT, but a bug in a previous version
1517 * of the code could cause upgrade_clones_cb() to not set
1518 * ds_next_snap_obj when it should, leading to a missing entry.
1519 * If we knew that the pool was created after
1520 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1521 * ENOENT. However, at least we can check that we don't have
1522 * too many entries in the next_clones_obj even after failing to
1523 * remove this one.
1524 */
1525 if (err != ENOENT) {
1526 VERIFY3U(err, ==, 0);
1527 }
1528 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1529 &count));
1530 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1531 }
1532
1533 static void
1534 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1535 {
1536 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1537 zap_cursor_t zc;
1538 zap_attribute_t za;
1539
1540 /*
1541 * If it is the old version, dd_clones doesn't exist so we can't
1542 * find the clones, but deadlist_remove_key() is a no-op so it
1543 * doesn't matter.
1544 */
1545 if (ds->ds_dir->dd_phys->dd_clones == 0)
1546 return;
1547
1548 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1549 zap_cursor_retrieve(&zc, &za) == 0;
1550 zap_cursor_advance(&zc)) {
1551 dsl_dataset_t *clone;
1552
1553 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1554 za.za_first_integer, FTAG, &clone));
1555 if (clone->ds_dir->dd_origin_txg > mintxg) {
1556 dsl_deadlist_remove_key(&clone->ds_deadlist,
1557 mintxg, tx);
1558 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1559 }
1560 dsl_dataset_rele(clone, FTAG);
1561 }
1562 zap_cursor_fini(&zc);
1563 }
1564
1565 struct process_old_arg {
1566 dsl_dataset_t *ds;
1567 dsl_dataset_t *ds_prev;
1568 boolean_t after_branch_point;
1569 zio_t *pio;
1570 uint64_t used, comp, uncomp;
1571 };
1572
1573 static int
1574 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1575 {
1576 struct process_old_arg *poa = arg;
1577 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1578
1579 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1580 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1581 if (poa->ds_prev && !poa->after_branch_point &&
1582 bp->blk_birth >
1583 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1584 poa->ds_prev->ds_phys->ds_unique_bytes +=
1585 bp_get_dsize_sync(dp->dp_spa, bp);
1586 }
1587 } else {
1588 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1589 poa->comp += BP_GET_PSIZE(bp);
1590 poa->uncomp += BP_GET_UCSIZE(bp);
1591 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1592 }
1593 return (0);
1594 }
1595
1596 static void
1597 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1598 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1599 {
1600 struct process_old_arg poa = { 0 };
1601 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1602 objset_t *mos = dp->dp_meta_objset;
1603
1604 ASSERT(ds->ds_deadlist.dl_oldfmt);
1605 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1606
1607 poa.ds = ds;
1608 poa.ds_prev = ds_prev;
1609 poa.after_branch_point = after_branch_point;
1610 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1611 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1612 process_old_cb, &poa, tx));
1613 VERIFY3U(zio_wait(poa.pio), ==, 0);
1614 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1615
1616 /* change snapused */
1617 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1618 -poa.used, -poa.comp, -poa.uncomp, tx);
1619
1620 /* swap next's deadlist to our deadlist */
1621 dsl_deadlist_close(&ds->ds_deadlist);
1622 dsl_deadlist_close(&ds_next->ds_deadlist);
1623 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1624 ds->ds_phys->ds_deadlist_obj);
1625 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1626 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1627 ds_next->ds_phys->ds_deadlist_obj);
1628 }
1629
1630 void
1631 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1632 {
1633 struct dsl_ds_destroyarg *dsda = arg1;
1634 dsl_dataset_t *ds = dsda->ds;
1635 int err;
1636 int after_branch_point = FALSE;
1637 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1638 objset_t *mos = dp->dp_meta_objset;
1639 dsl_dataset_t *ds_prev = NULL;
1640 boolean_t wont_destroy;
1641 uint64_t obj;
1642
1643 wont_destroy = (dsda->defer &&
1644 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1645
1646 ASSERT(ds->ds_owner || wont_destroy);
1647 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1648 ASSERT(ds->ds_prev == NULL ||
1649 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1650 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1651
1652 if (wont_destroy) {
1653 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1654 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1655 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1656 return;
1657 }
1658
1659 /* signal any waiters that this dataset is going away */
1660 mutex_enter(&ds->ds_lock);
1661 ds->ds_owner = dsl_reaper;
1662 cv_broadcast(&ds->ds_exclusive_cv);
1663 mutex_exit(&ds->ds_lock);
1664
1665 /* Remove our reservation */
1666 if (ds->ds_reserved != 0) {
1667 dsl_prop_setarg_t psa;
1668 uint64_t value = 0;
1669
1670 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1671 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1672 &value);
1673 psa.psa_effective_value = 0; /* predict default value */
1674
1675 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1676 ASSERT3U(ds->ds_reserved, ==, 0);
1677 }
1678
1679 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1680
1681 dsl_scan_ds_destroyed(ds, tx);
1682
1683 obj = ds->ds_object;
1684
1685 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1686 if (ds->ds_prev) {
1687 ds_prev = ds->ds_prev;
1688 } else {
1689 VERIFY(0 == dsl_dataset_hold_obj(dp,
1690 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1691 }
1692 after_branch_point =
1693 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1694
1695 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1696 if (after_branch_point &&
1697 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1698 remove_from_next_clones(ds_prev, obj, tx);
1699 if (ds->ds_phys->ds_next_snap_obj != 0) {
1700 VERIFY(0 == zap_add_int(mos,
1701 ds_prev->ds_phys->ds_next_clones_obj,
1702 ds->ds_phys->ds_next_snap_obj, tx));
1703 }
1704 }
1705 if (after_branch_point &&
1706 ds->ds_phys->ds_next_snap_obj == 0) {
1707 /* This clone is toast. */
1708 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1709 ds_prev->ds_phys->ds_num_children--;
1710
1711 /*
1712 * If the clone's origin has no other clones, no
1713 * user holds, and has been marked for deferred
1714 * deletion, then we should have done the necessary
1715 * destroy setup for it.
1716 */
1717 if (ds_prev->ds_phys->ds_num_children == 1 &&
1718 ds_prev->ds_userrefs == 0 &&
1719 DS_IS_DEFER_DESTROY(ds_prev)) {
1720 ASSERT3P(dsda->rm_origin, !=, NULL);
1721 } else {
1722 ASSERT3P(dsda->rm_origin, ==, NULL);
1723 }
1724 } else if (!after_branch_point) {
1725 ds_prev->ds_phys->ds_next_snap_obj =
1726 ds->ds_phys->ds_next_snap_obj;
1727 }
1728 }
1729
1730 if (dsl_dataset_is_snapshot(ds)) {
1731 dsl_dataset_t *ds_next;
1732 uint64_t old_unique;
1733 uint64_t used = 0, comp = 0, uncomp = 0;
1734
1735 VERIFY(0 == dsl_dataset_hold_obj(dp,
1736 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1737 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1738
1739 old_unique = ds_next->ds_phys->ds_unique_bytes;
1740
1741 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1742 ds_next->ds_phys->ds_prev_snap_obj =
1743 ds->ds_phys->ds_prev_snap_obj;
1744 ds_next->ds_phys->ds_prev_snap_txg =
1745 ds->ds_phys->ds_prev_snap_txg;
1746 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1747 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1748
1749
1750 if (ds_next->ds_deadlist.dl_oldfmt) {
1751 process_old_deadlist(ds, ds_prev, ds_next,
1752 after_branch_point, tx);
1753 } else {
1754 /* Adjust prev's unique space. */
1755 if (ds_prev && !after_branch_point) {
1756 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1757 ds_prev->ds_phys->ds_prev_snap_txg,
1758 ds->ds_phys->ds_prev_snap_txg,
1759 &used, &comp, &uncomp);
1760 ds_prev->ds_phys->ds_unique_bytes += used;
1761 }
1762
1763 /* Adjust snapused. */
1764 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1765 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1766 &used, &comp, &uncomp);
1767 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1768 -used, -comp, -uncomp, tx);
1769
1770 /* Move blocks to be freed to pool's free list. */
1771 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1772 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1773 tx);
1774 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1775 DD_USED_HEAD, used, comp, uncomp, tx);
1776 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1777
1778 /* Merge our deadlist into next's and free it. */
1779 dsl_deadlist_merge(&ds_next->ds_deadlist,
1780 ds->ds_phys->ds_deadlist_obj, tx);
1781 }
1782 dsl_deadlist_close(&ds->ds_deadlist);
1783 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1784
1785 /* Collapse range in clone heads */
1786 dsl_dataset_remove_clones_key(ds,
1787 ds->ds_phys->ds_creation_txg, tx);
1788
1789 if (dsl_dataset_is_snapshot(ds_next)) {
1790 dsl_dataset_t *ds_nextnext;
1791 dsl_dataset_t *hds;
1792
1793 /*
1794 * Update next's unique to include blocks which
1795 * were previously shared by only this snapshot
1796 * and it. Those blocks will be born after the
1797 * prev snap and before this snap, and will have
1798 * died after the next snap and before the one
1799 * after that (ie. be on the snap after next's
1800 * deadlist).
1801 */
1802 VERIFY(0 == dsl_dataset_hold_obj(dp,
1803 ds_next->ds_phys->ds_next_snap_obj,
1804 FTAG, &ds_nextnext));
1805 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1806 ds->ds_phys->ds_prev_snap_txg,
1807 ds->ds_phys->ds_creation_txg,
1808 &used, &comp, &uncomp);
1809 ds_next->ds_phys->ds_unique_bytes += used;
1810 dsl_dataset_rele(ds_nextnext, FTAG);
1811 ASSERT3P(ds_next->ds_prev, ==, NULL);
1812
1813 /* Collapse range in this head. */
1814 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1815 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1816 FTAG, &hds));
1817 dsl_deadlist_remove_key(&hds->ds_deadlist,
1818 ds->ds_phys->ds_creation_txg, tx);
1819 dsl_dataset_rele(hds, FTAG);
1820
1821 } else {
1822 ASSERT3P(ds_next->ds_prev, ==, ds);
1823 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1824 ds_next->ds_prev = NULL;
1825 if (ds_prev) {
1826 VERIFY(0 == dsl_dataset_get_ref(dp,
1827 ds->ds_phys->ds_prev_snap_obj,
1828 ds_next, &ds_next->ds_prev));
1829 }
1830
1831 dsl_dataset_recalc_head_uniq(ds_next);
1832
1833 /*
1834 * Reduce the amount of our unconsmed refreservation
1835 * being charged to our parent by the amount of
1836 * new unique data we have gained.
1837 */
1838 if (old_unique < ds_next->ds_reserved) {
1839 int64_t mrsdelta;
1840 uint64_t new_unique =
1841 ds_next->ds_phys->ds_unique_bytes;
1842
1843 ASSERT(old_unique <= new_unique);
1844 mrsdelta = MIN(new_unique - old_unique,
1845 ds_next->ds_reserved - old_unique);
1846 dsl_dir_diduse_space(ds->ds_dir,
1847 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1848 }
1849 }
1850 dsl_dataset_rele(ds_next, FTAG);
1851 } else {
1852 /*
1853 * There's no next snapshot, so this is a head dataset.
1854 * Destroy the deadlist. Unless it's a clone, the
1855 * deadlist should be empty. (If it's a clone, it's
1856 * safe to ignore the deadlist contents.)
1857 */
1858 struct killarg ka;
1859
1860 dsl_deadlist_close(&ds->ds_deadlist);
1861 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1862 ds->ds_phys->ds_deadlist_obj = 0;
1863
1864 /*
1865 * Free everything that we point to (that's born after
1866 * the previous snapshot, if we are a clone)
1867 *
1868 * NB: this should be very quick, because we already
1869 * freed all the objects in open context.
1870 */
1871 ka.ds = ds;
1872 ka.tx = tx;
1873 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1874 TRAVERSE_POST, kill_blkptr, &ka);
1875 ASSERT3U(err, ==, 0);
1876 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1877 ds->ds_phys->ds_unique_bytes == 0);
1878
1879 if (ds->ds_prev != NULL) {
1880 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1881 VERIFY3U(0, ==, zap_remove_int(mos,
1882 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1883 ds->ds_object, tx));
1884 }
1885 dsl_dataset_rele(ds->ds_prev, ds);
1886 ds->ds_prev = ds_prev = NULL;
1887 }
1888 }
1889
1890 /*
1891 * This must be done after the dsl_traverse(), because it will
1892 * re-open the objset.
1893 */
1894 if (ds->ds_objset) {
1895 dmu_objset_evict(ds->ds_objset);
1896 ds->ds_objset = NULL;
1897 }
1898
1899 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1900 /* Erase the link in the dir */
1901 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1902 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1903 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1904 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1905 ASSERT(err == 0);
1906 } else {
1907 /* remove from snapshot namespace */
1908 dsl_dataset_t *ds_head;
1909 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1910 VERIFY(0 == dsl_dataset_hold_obj(dp,
1911 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1912 VERIFY(0 == dsl_dataset_get_snapname(ds));
1913 #ifdef ZFS_DEBUG
1914 {
1915 uint64_t val;
1916
1917 err = dsl_dataset_snap_lookup(ds_head,
1918 ds->ds_snapname, &val);
1919 ASSERT3U(err, ==, 0);
1920 ASSERT3U(val, ==, obj);
1921 }
1922 #endif
1923 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1924 ASSERT(err == 0);
1925 dsl_dataset_rele(ds_head, FTAG);
1926 }
1927
1928 if (ds_prev && ds->ds_prev != ds_prev)
1929 dsl_dataset_rele(ds_prev, FTAG);
1930
1931 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1932 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1933 "dataset = %llu", ds->ds_object);
1934
1935 if (ds->ds_phys->ds_next_clones_obj != 0) {
1936 ASSERTV(uint64_t count);
1937 ASSERT(0 == zap_count(mos,
1938 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1939 VERIFY(0 == dmu_object_free(mos,
1940 ds->ds_phys->ds_next_clones_obj, tx));
1941 }
1942 if (ds->ds_phys->ds_props_obj != 0)
1943 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1944 if (ds->ds_phys->ds_userrefs_obj != 0)
1945 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1946 dsl_dir_close(ds->ds_dir, ds);
1947 ds->ds_dir = NULL;
1948 dsl_dataset_drain_refs(ds, tag);
1949 VERIFY(0 == dmu_object_free(mos, obj, tx));
1950
1951 if (dsda->rm_origin) {
1952 /*
1953 * Remove the origin of the clone we just destroyed.
1954 */
1955 struct dsl_ds_destroyarg ndsda = {0};
1956
1957 ndsda.ds = dsda->rm_origin;
1958 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1959 }
1960 }
1961
1962 static int
1963 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1964 {
1965 uint64_t asize;
1966
1967 if (!dmu_tx_is_syncing(tx))
1968 return (0);
1969
1970 /*
1971 * If there's an fs-only reservation, any blocks that might become
1972 * owned by the snapshot dataset must be accommodated by space
1973 * outside of the reservation.
1974 */
1975 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1976 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1977 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1978 return (ENOSPC);
1979
1980 /*
1981 * Propogate any reserved space for this snapshot to other
1982 * snapshot checks in this sync group.
1983 */
1984 if (asize > 0)
1985 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1986
1987 return (0);
1988 }
1989
1990 int
1991 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1992 {
1993 dsl_dataset_t *ds = arg1;
1994 const char *snapname = arg2;
1995 int err;
1996 uint64_t value;
1997
1998 /*
1999 * We don't allow multiple snapshots of the same txg. If there
2000 * is already one, try again.
2001 */
2002 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2003 return (EAGAIN);
2004
2005 /*
2006 * Check for conflicting name snapshot name.
2007 */
2008 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2009 if (err == 0)
2010 return (EEXIST);
2011 if (err != ENOENT)
2012 return (err);
2013
2014 /*
2015 * Check that the dataset's name is not too long. Name consists
2016 * of the dataset's length + 1 for the @-sign + snapshot name's length
2017 */
2018 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2019 return (ENAMETOOLONG);
2020
2021 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2022 if (err)
2023 return (err);
2024
2025 ds->ds_trysnap_txg = tx->tx_txg;
2026 return (0);
2027 }
2028
2029 void
2030 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2031 {
2032 dsl_dataset_t *ds = arg1;
2033 const char *snapname = arg2;
2034 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2035 dmu_buf_t *dbuf;
2036 dsl_dataset_phys_t *dsphys;
2037 uint64_t dsobj, crtxg;
2038 objset_t *mos = dp->dp_meta_objset;
2039 int err;
2040
2041 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2042
2043 /*
2044 * The origin's ds_creation_txg has to be < TXG_INITIAL
2045 */
2046 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2047 crtxg = 1;
2048 else
2049 crtxg = tx->tx_txg;
2050
2051 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2052 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2053 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2054 dmu_buf_will_dirty(dbuf, tx);
2055 dsphys = dbuf->db_data;
2056 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2057 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2058 dsphys->ds_fsid_guid = unique_create();
2059 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2060 sizeof (dsphys->ds_guid));
2061 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2062 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2063 dsphys->ds_next_snap_obj = ds->ds_object;
2064 dsphys->ds_num_children = 1;
2065 dsphys->ds_creation_time = gethrestime_sec();
2066 dsphys->ds_creation_txg = crtxg;
2067 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2068 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2069 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2070 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2071 dsphys->ds_flags = ds->ds_phys->ds_flags;
2072 dsphys->ds_bp = ds->ds_phys->ds_bp;
2073 dmu_buf_rele(dbuf, FTAG);
2074
2075 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2076 if (ds->ds_prev) {
2077 uint64_t next_clones_obj =
2078 ds->ds_prev->ds_phys->ds_next_clones_obj;
2079 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2080 ds->ds_object ||
2081 ds->ds_prev->ds_phys->ds_num_children > 1);
2082 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2083 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2084 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2085 ds->ds_prev->ds_phys->ds_creation_txg);
2086 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2087 } else if (next_clones_obj != 0) {
2088 remove_from_next_clones(ds->ds_prev,
2089 dsphys->ds_next_snap_obj, tx);
2090 VERIFY3U(0, ==, zap_add_int(mos,
2091 next_clones_obj, dsobj, tx));
2092 }
2093 }
2094
2095 /*
2096 * If we have a reference-reservation on this dataset, we will
2097 * need to increase the amount of refreservation being charged
2098 * since our unique space is going to zero.
2099 */
2100 if (ds->ds_reserved) {
2101 int64_t delta;
2102 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2103 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2104 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2105 delta, 0, 0, tx);
2106 }
2107
2108 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2109 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2110 ds->ds_dir->dd_myname, snapname, dsobj,
2111 ds->ds_phys->ds_prev_snap_txg);
2112 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2113 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2114 dsl_deadlist_close(&ds->ds_deadlist);
2115 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2116 dsl_deadlist_add_key(&ds->ds_deadlist,
2117 ds->ds_phys->ds_prev_snap_txg, tx);
2118
2119 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2120 ds->ds_phys->ds_prev_snap_obj = dsobj;
2121 ds->ds_phys->ds_prev_snap_txg = crtxg;
2122 ds->ds_phys->ds_unique_bytes = 0;
2123 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2124 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2125
2126 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2127 snapname, 8, 1, &dsobj, tx);
2128 ASSERT(err == 0);
2129
2130 if (ds->ds_prev)
2131 dsl_dataset_drop_ref(ds->ds_prev, ds);
2132 VERIFY(0 == dsl_dataset_get_ref(dp,
2133 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2134
2135 dsl_scan_ds_snapshotted(ds, tx);
2136
2137 dsl_dir_snap_cmtime_update(ds->ds_dir);
2138
2139 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2140 "dataset = %llu", dsobj);
2141 }
2142
2143 void
2144 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2145 {
2146 ASSERT(dmu_tx_is_syncing(tx));
2147 ASSERT(ds->ds_objset != NULL);
2148 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2149
2150 /*
2151 * in case we had to change ds_fsid_guid when we opened it,
2152 * sync it out now.
2153 */
2154 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2155 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2156
2157 dsl_dir_dirty(ds->ds_dir, tx);
2158 dmu_objset_sync(ds->ds_objset, zio, tx);
2159 }
2160
2161 static void
2162 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2163 {
2164 uint64_t count = 0;
2165 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2166 zap_cursor_t zc;
2167 zap_attribute_t za;
2168 nvlist_t *propval;
2169 nvlist_t *val;
2170
2171 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2172 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2173 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2174
2175 /*
2176 * There may me missing entries in ds_next_clones_obj
2177 * due to a bug in a previous version of the code.
2178 * Only trust it if it has the right number of entries.
2179 */
2180 if (ds->ds_phys->ds_next_clones_obj != 0) {
2181 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2182 &count));
2183 }
2184 if (count != ds->ds_phys->ds_num_children - 1) {
2185 goto fail;
2186 }
2187 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2188 zap_cursor_retrieve(&zc, &za) == 0;
2189 zap_cursor_advance(&zc)) {
2190 dsl_dataset_t *clone;
2191 char buf[ZFS_MAXNAMELEN];
2192 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2193 za.za_first_integer, FTAG, &clone) != 0) {
2194 goto fail;
2195 }
2196 dsl_dir_name(clone->ds_dir, buf);
2197 VERIFY(nvlist_add_boolean(val, buf) == 0);
2198 dsl_dataset_rele(clone, FTAG);
2199 }
2200 zap_cursor_fini(&zc);
2201 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2202 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2203 propval) == 0);
2204 fail:
2205 nvlist_free(val);
2206 nvlist_free(propval);
2207 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2208 }
2209
2210 void
2211 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2212 {
2213 uint64_t refd, avail, uobjs, aobjs, ratio;
2214
2215 dsl_dir_stats(ds->ds_dir, nv);
2216
2217 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2218 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2219 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2220
2221 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2222 ds->ds_phys->ds_creation_time);
2223 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2224 ds->ds_phys->ds_creation_txg);
2225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2226 ds->ds_quota);
2227 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2228 ds->ds_reserved);
2229 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2230 ds->ds_phys->ds_guid);
2231 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2232 ds->ds_phys->ds_unique_bytes);
2233 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2234 ds->ds_object);
2235 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2236 ds->ds_userrefs);
2237 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2238 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2239
2240 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2241 uint64_t written, comp, uncomp;
2242 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2243 dsl_dataset_t *prev;
2244 int err;
2245
2246 rw_enter(&dp->dp_config_rwlock, RW_READER);
2247 err = dsl_dataset_hold_obj(dp,
2248 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2249 rw_exit(&dp->dp_config_rwlock);
2250 if (err == 0) {
2251 err = dsl_dataset_space_written(prev, ds, &written,
2252 &comp, &uncomp);
2253 dsl_dataset_rele(prev, FTAG);
2254 if (err == 0) {
2255 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2256 written);
2257 }
2258 }
2259 }
2260
2261 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2262 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2263 ds->ds_phys->ds_compressed_bytes);
2264 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2265
2266 if (ds->ds_phys->ds_next_snap_obj) {
2267 /*
2268 * This is a snapshot; override the dd's space used with
2269 * our unique space and compression ratio.
2270 */
2271 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2272 ds->ds_phys->ds_unique_bytes);
2273 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2274
2275 get_clones_stat(ds, nv);
2276 }
2277 }
2278
2279 void
2280 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2281 {
2282 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2283 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2284 stat->dds_guid = ds->ds_phys->ds_guid;
2285 if (ds->ds_phys->ds_next_snap_obj) {
2286 stat->dds_is_snapshot = B_TRUE;
2287 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2288 } else {
2289 stat->dds_is_snapshot = B_FALSE;
2290 stat->dds_num_clones = 0;
2291 }
2292
2293 /* clone origin is really a dsl_dir thing... */
2294 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2295 if (dsl_dir_is_clone(ds->ds_dir)) {
2296 dsl_dataset_t *ods;
2297
2298 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2299 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2300 dsl_dataset_name(ods, stat->dds_origin);
2301 dsl_dataset_drop_ref(ods, FTAG);
2302 } else {
2303 stat->dds_origin[0] = '\0';
2304 }
2305 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2306 }
2307
2308 uint64_t
2309 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2310 {
2311 return (ds->ds_fsid_guid);
2312 }
2313
2314 void
2315 dsl_dataset_space(dsl_dataset_t *ds,
2316 uint64_t *refdbytesp, uint64_t *availbytesp,
2317 uint64_t *usedobjsp, uint64_t *availobjsp)
2318 {
2319 *refdbytesp = ds->ds_phys->ds_used_bytes;
2320 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2321 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2322 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2323 if (ds->ds_quota != 0) {
2324 /*
2325 * Adjust available bytes according to refquota
2326 */
2327 if (*refdbytesp < ds->ds_quota)
2328 *availbytesp = MIN(*availbytesp,
2329 ds->ds_quota - *refdbytesp);
2330 else
2331 *availbytesp = 0;
2332 }
2333 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2334 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2335 }
2336
2337 boolean_t
2338 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2339 {
2340 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2341
2342 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2343 dsl_pool_sync_context(dp));
2344 if (ds->ds_prev == NULL)
2345 return (B_FALSE);
2346 if (ds->ds_phys->ds_bp.blk_birth >
2347 ds->ds_prev->ds_phys->ds_creation_txg) {
2348 objset_t *os, *os_prev;
2349 /*
2350 * It may be that only the ZIL differs, because it was
2351 * reset in the head. Don't count that as being
2352 * modified.
2353 */
2354 if (dmu_objset_from_ds(ds, &os) != 0)
2355 return (B_TRUE);
2356 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2357 return (B_TRUE);
2358 return (bcmp(&os->os_phys->os_meta_dnode,
2359 &os_prev->os_phys->os_meta_dnode,
2360 sizeof (os->os_phys->os_meta_dnode)) != 0);
2361 }
2362 return (B_FALSE);
2363 }
2364
2365 /* ARGSUSED */
2366 static int
2367 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2368 {
2369 dsl_dataset_t *ds = arg1;
2370 char *newsnapname = arg2;
2371 dsl_dir_t *dd = ds->ds_dir;
2372 dsl_dataset_t *hds;
2373 uint64_t val;
2374 int err;
2375
2376 err = dsl_dataset_hold_obj(dd->dd_pool,
2377 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2378 if (err)
2379 return (err);
2380
2381 /* new name better not be in use */
2382 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2383 dsl_dataset_rele(hds, FTAG);
2384
2385 if (err == 0)
2386 err = EEXIST;
2387 else if (err == ENOENT)
2388 err = 0;
2389
2390 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2391 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2392 err = ENAMETOOLONG;
2393
2394 return (err);
2395 }
2396
2397 static void
2398 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2399 {
2400 dsl_dataset_t *ds = arg1;
2401 const char *newsnapname = arg2;
2402 dsl_dir_t *dd = ds->ds_dir;
2403 objset_t *mos = dd->dd_pool->dp_meta_objset;
2404 dsl_dataset_t *hds;
2405 int err;
2406
2407 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2408
2409 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2410 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2411
2412 VERIFY(0 == dsl_dataset_get_snapname(ds));
2413 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2414 ASSERT3U(err, ==, 0);
2415 mutex_enter(&ds->ds_lock);
2416 (void) strcpy(ds->ds_snapname, newsnapname);
2417 mutex_exit(&ds->ds_lock);
2418 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2419 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2420 ASSERT3U(err, ==, 0);
2421
2422 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2423 "dataset = %llu", ds->ds_object);
2424 dsl_dataset_rele(hds, FTAG);
2425 }
2426
2427 struct renamesnaparg {
2428 dsl_sync_task_group_t *dstg;
2429 char failed[MAXPATHLEN];
2430 char *oldsnap;
2431 char *newsnap;
2432 };
2433
2434 static int
2435 dsl_snapshot_rename_one(const char *name, void *arg)
2436 {
2437 struct renamesnaparg *ra = arg;
2438 dsl_dataset_t *ds = NULL;
2439 char *snapname;
2440 int err;
2441
2442 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2443 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2444
2445 /*
2446 * For recursive snapshot renames the parent won't be changing
2447 * so we just pass name for both the to/from argument.
2448 */
2449 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2450 if (err != 0) {
2451 strfree(snapname);
2452 return (err == ENOENT ? 0 : err);
2453 }
2454
2455 #ifdef _KERNEL
2456 /*
2457 * For all filesystems undergoing rename, we'll need to unmount it.
2458 */
2459 (void) zfs_unmount_snap(snapname, NULL);
2460 #endif
2461 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2462 strfree(snapname);
2463 if (err != 0)
2464 return (err == ENOENT ? 0 : err);
2465
2466 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2467 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2468
2469 return (0);
2470 }
2471
2472 static int
2473 dsl_recursive_rename(char *oldname, const char *newname)
2474 {
2475 int err;
2476 struct renamesnaparg *ra;
2477 dsl_sync_task_t *dst;
2478 spa_t *spa;
2479 char *cp, *fsname = spa_strdup(oldname);
2480 int len = strlen(oldname) + 1;
2481
2482 /* truncate the snapshot name to get the fsname */
2483 cp = strchr(fsname, '@');
2484 *cp = '\0';
2485
2486 err = spa_open(fsname, &spa, FTAG);
2487 if (err) {
2488 kmem_free(fsname, len);
2489 return (err);
2490 }
2491 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2492 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2493
2494 ra->oldsnap = strchr(oldname, '@') + 1;
2495 ra->newsnap = strchr(newname, '@') + 1;
2496 *ra->failed = '\0';
2497
2498 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2499 DS_FIND_CHILDREN);
2500 kmem_free(fsname, len);
2501
2502 if (err == 0) {
2503 err = dsl_sync_task_group_wait(ra->dstg);
2504 }
2505
2506 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2507 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2508 dsl_dataset_t *ds = dst->dst_arg1;
2509 if (dst->dst_err) {
2510 dsl_dir_name(ds->ds_dir, ra->failed);
2511 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2512 (void) strlcat(ra->failed, ra->newsnap,
2513 sizeof (ra->failed));
2514 }
2515 dsl_dataset_rele(ds, ra->dstg);
2516 }
2517
2518 if (err)
2519 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2520
2521 dsl_sync_task_group_destroy(ra->dstg);
2522 kmem_free(ra, sizeof (struct renamesnaparg));
2523 spa_close(spa, FTAG);
2524 return (err);
2525 }
2526
2527 static int
2528 dsl_valid_rename(const char *oldname, void *arg)
2529 {
2530 int delta = *(int *)arg;
2531
2532 if (strlen(oldname) + delta >= MAXNAMELEN)
2533 return (ENAMETOOLONG);
2534
2535 return (0);
2536 }
2537
2538 #pragma weak dmu_objset_rename = dsl_dataset_rename
2539 int
2540 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2541 {
2542 dsl_dir_t *dd;
2543 dsl_dataset_t *ds;
2544 const char *tail;
2545 int err;
2546
2547 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2548 if (err)
2549 return (err);
2550
2551 if (tail == NULL) {
2552 int delta = strlen(newname) - strlen(oldname);
2553
2554 /* if we're growing, validate child name lengths */
2555 if (delta > 0)
2556 err = dmu_objset_find(oldname, dsl_valid_rename,
2557 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2558
2559 if (err == 0)
2560 err = dsl_dir_rename(dd, newname);
2561 dsl_dir_close(dd, FTAG);
2562 return (err);
2563 }
2564
2565 if (tail[0] != '@') {
2566 /* the name ended in a nonexistent component */
2567 dsl_dir_close(dd, FTAG);
2568 return (ENOENT);
2569 }
2570
2571 dsl_dir_close(dd, FTAG);
2572
2573 /* new name must be snapshot in same filesystem */
2574 tail = strchr(newname, '@');
2575 if (tail == NULL)
2576 return (EINVAL);
2577 tail++;
2578 if (strncmp(oldname, newname, tail - newname) != 0)
2579 return (EXDEV);
2580
2581 if (recursive) {
2582 err = dsl_recursive_rename(oldname, newname);
2583 } else {
2584 err = dsl_dataset_hold(oldname, FTAG, &ds);
2585 if (err)
2586 return (err);
2587
2588 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2589 dsl_dataset_snapshot_rename_check,
2590 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2591
2592 dsl_dataset_rele(ds, FTAG);
2593 }
2594
2595 return (err);
2596 }
2597
2598 struct promotenode {
2599 list_node_t link;
2600 dsl_dataset_t *ds;
2601 };
2602
2603 struct promotearg {
2604 list_t shared_snaps, origin_snaps, clone_snaps;
2605 dsl_dataset_t *origin_origin;
2606 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2607 char *err_ds;
2608 };
2609
2610 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2611
2612 static int
2613 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2614 {
2615 dsl_dataset_t *hds = arg1;
2616 struct promotearg *pa = arg2;
2617 struct promotenode *snap = list_head(&pa->shared_snaps);
2618 dsl_dataset_t *origin_ds = snap->ds;
2619 int err;
2620 uint64_t unused;
2621
2622 /* Check that it is a real clone */
2623 if (!dsl_dir_is_clone(hds->ds_dir))
2624 return (EINVAL);
2625
2626 /* Since this is so expensive, don't do the preliminary check */
2627 if (!dmu_tx_is_syncing(tx))
2628 return (0);
2629
2630 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2631 return (EXDEV);
2632
2633 /* compute origin's new unique space */
2634 snap = list_tail(&pa->clone_snaps);
2635 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2636 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2637 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2638 &pa->unique, &unused, &unused);
2639
2640 /*
2641 * Walk the snapshots that we are moving
2642 *
2643 * Compute space to transfer. Consider the incremental changes
2644 * to used for each snapshot:
2645 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2646 * So each snapshot gave birth to:
2647 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2648 * So a sequence would look like:
2649 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2650 * Which simplifies to:
2651 * uN + kN + kN-1 + ... + k1 + k0
2652 * Note however, if we stop before we reach the ORIGIN we get:
2653 * uN + kN + kN-1 + ... + kM - uM-1
2654 */
2655 pa->used = origin_ds->ds_phys->ds_used_bytes;
2656 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2657 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2658 for (snap = list_head(&pa->shared_snaps); snap;
2659 snap = list_next(&pa->shared_snaps, snap)) {
2660 uint64_t val, dlused, dlcomp, dluncomp;
2661 dsl_dataset_t *ds = snap->ds;
2662
2663 /* Check that the snapshot name does not conflict */
2664 VERIFY(0 == dsl_dataset_get_snapname(ds));
2665 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2666 if (err == 0) {
2667 err = EEXIST;
2668 goto out;
2669 }
2670 if (err != ENOENT)
2671 goto out;
2672
2673 /* The very first snapshot does not have a deadlist */
2674 if (ds->ds_phys->ds_prev_snap_obj == 0)
2675 continue;
2676
2677 dsl_deadlist_space(&ds->ds_deadlist,
2678 &dlused, &dlcomp, &dluncomp);
2679 pa->used += dlused;
2680 pa->comp += dlcomp;
2681 pa->uncomp += dluncomp;
2682 }
2683
2684 /*
2685 * If we are a clone of a clone then we never reached ORIGIN,
2686 * so we need to subtract out the clone origin's used space.
2687 */
2688 if (pa->origin_origin) {
2689 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2690 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2691 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2692 }
2693
2694 /* Check that there is enough space here */
2695 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2696 pa->used);
2697 if (err)
2698 return (err);
2699
2700 /*
2701 * Compute the amounts of space that will be used by snapshots
2702 * after the promotion (for both origin and clone). For each,
2703 * it is the amount of space that will be on all of their
2704 * deadlists (that was not born before their new origin).
2705 */
2706 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2707 uint64_t space;
2708
2709 /*
2710 * Note, typically this will not be a clone of a clone,
2711 * so dd_origin_txg will be < TXG_INITIAL, so
2712 * these snaplist_space() -> dsl_deadlist_space_range()
2713 * calls will be fast because they do not have to
2714 * iterate over all bps.
2715 */
2716 snap = list_head(&pa->origin_snaps);
2717 err = snaplist_space(&pa->shared_snaps,
2718 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2719 if (err)
2720 return (err);
2721
2722 err = snaplist_space(&pa->clone_snaps,
2723 snap->ds->ds_dir->dd_origin_txg, &space);
2724 if (err)
2725 return (err);
2726 pa->cloneusedsnap += space;
2727 }
2728 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2729 err = snaplist_space(&pa->origin_snaps,
2730 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2731 if (err)
2732 return (err);
2733 }
2734
2735 return (0);
2736 out:
2737 pa->err_ds = snap->ds->ds_snapname;
2738 return (err);
2739 }
2740
2741 static void
2742 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2743 {
2744 dsl_dataset_t *hds = arg1;
2745 struct promotearg *pa = arg2;
2746 struct promotenode *snap = list_head(&pa->shared_snaps);
2747 dsl_dataset_t *origin_ds = snap->ds;
2748 dsl_dataset_t *origin_head;
2749 dsl_dir_t *dd = hds->ds_dir;
2750 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2751 dsl_dir_t *odd = NULL;
2752 uint64_t oldnext_obj;
2753 int64_t delta;
2754
2755 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2756
2757 snap = list_head(&pa->origin_snaps);
2758 origin_head = snap->ds;
2759
2760 /*
2761 * We need to explicitly open odd, since origin_ds's dd will be
2762 * changing.
2763 */
2764 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2765 NULL, FTAG, &odd));
2766
2767 /* change origin's next snap */
2768 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2769 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2770 snap = list_tail(&pa->clone_snaps);
2771 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2772 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2773
2774 /* change the origin's next clone */
2775 if (origin_ds->ds_phys->ds_next_clones_obj) {
2776 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2777 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2778 origin_ds->ds_phys->ds_next_clones_obj,
2779 oldnext_obj, tx));
2780 }
2781
2782 /* change origin */
2783 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2784 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2785 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2786 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2787 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2788 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2789 origin_head->ds_dir->dd_origin_txg =
2790 origin_ds->ds_phys->ds_creation_txg;
2791
2792 /* change dd_clone entries */
2793 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2794 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2795 odd->dd_phys->dd_clones, hds->ds_object, tx));
2796 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2797 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2798 hds->ds_object, tx));
2799
2800 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2801 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2802 origin_head->ds_object, tx));
2803 if (dd->dd_phys->dd_clones == 0) {
2804 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2805 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2806 }
2807 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2808 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2809
2810 }
2811
2812 /* move snapshots to this dir */
2813 for (snap = list_head(&pa->shared_snaps); snap;
2814 snap = list_next(&pa->shared_snaps, snap)) {
2815 dsl_dataset_t *ds = snap->ds;
2816
2817 /* unregister props as dsl_dir is changing */
2818 if (ds->ds_objset) {
2819 dmu_objset_evict(ds->ds_objset);
2820 ds->ds_objset = NULL;
2821 }
2822 /* move snap name entry */
2823 VERIFY(0 == dsl_dataset_get_snapname(ds));
2824 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2825 ds->ds_snapname, tx));
2826 VERIFY(0 == zap_add(dp->dp_meta_objset,
2827 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2828 8, 1, &ds->ds_object, tx));
2829
2830 /* change containing dsl_dir */
2831 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2832 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2833 ds->ds_phys->ds_dir_obj = dd->dd_object;
2834 ASSERT3P(ds->ds_dir, ==, odd);
2835 dsl_dir_close(ds->ds_dir, ds);
2836 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2837 NULL, ds, &ds->ds_dir));
2838
2839 /* move any clone references */
2840 if (ds->ds_phys->ds_next_clones_obj &&
2841 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2842 zap_cursor_t zc;
2843 zap_attribute_t za;
2844
2845 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2846 ds->ds_phys->ds_next_clones_obj);
2847 zap_cursor_retrieve(&zc, &za) == 0;
2848 zap_cursor_advance(&zc)) {
2849 dsl_dataset_t *cnds;
2850 uint64_t o;
2851
2852 if (za.za_first_integer == oldnext_obj) {
2853 /*
2854 * We've already moved the
2855 * origin's reference.
2856 */
2857 continue;
2858 }
2859
2860 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2861 za.za_first_integer, FTAG, &cnds));
2862 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2863
2864 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2865 odd->dd_phys->dd_clones, o, tx), ==, 0);
2866 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2867 dd->dd_phys->dd_clones, o, tx), ==, 0);
2868 dsl_dataset_rele(cnds, FTAG);
2869 }
2870 zap_cursor_fini(&zc);
2871 }
2872
2873 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2874 }
2875
2876 /*
2877 * Change space accounting.
2878 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2879 * both be valid, or both be 0 (resulting in delta == 0). This
2880 * is true for each of {clone,origin} independently.
2881 */
2882
2883 delta = pa->cloneusedsnap -
2884 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2885 ASSERT3S(delta, >=, 0);
2886 ASSERT3U(pa->used, >=, delta);
2887 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2888 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2889 pa->used - delta, pa->comp, pa->uncomp, tx);
2890
2891 delta = pa->originusedsnap -
2892 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2893 ASSERT3S(delta, <=, 0);
2894 ASSERT3U(pa->used, >=, -delta);
2895 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2896 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2897 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2898
2899 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2900
2901 /* log history record */
2902 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2903 "dataset = %llu", hds->ds_object);
2904
2905 dsl_dir_close(odd, FTAG);
2906 }
2907
2908 static char *snaplist_tag = "snaplist";
2909 /*
2910 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2911 * (exclusive) and last_obj (inclusive). The list will be in reverse
2912 * order (last_obj will be the list_head()). If first_obj == 0, do all
2913 * snapshots back to this dataset's origin.
2914 */
2915 static int
2916 snaplist_make(dsl_pool_t *dp, boolean_t own,
2917 uint64_t first_obj, uint64_t last_obj, list_t *l)
2918 {
2919 uint64_t obj = last_obj;
2920
2921 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2922
2923 list_create(l, sizeof (struct promotenode),
2924 offsetof(struct promotenode, link));
2925
2926 while (obj != first_obj) {
2927 dsl_dataset_t *ds;
2928 struct promotenode *snap;
2929 int err;
2930
2931 if (own) {
2932 err = dsl_dataset_own_obj(dp, obj,
2933 0, snaplist_tag, &ds);
2934 if (err == 0)
2935 dsl_dataset_make_exclusive(ds, snaplist_tag);
2936 } else {
2937 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2938 }
2939 if (err == ENOENT) {
2940 /* lost race with snapshot destroy */
2941 struct promotenode *last = list_tail(l);
2942 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2943 obj = last->ds->ds_phys->ds_prev_snap_obj;
2944 continue;
2945 } else if (err) {
2946 return (err);
2947 }
2948
2949 if (first_obj == 0)
2950 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2951
2952 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2953 snap->ds = ds;
2954 list_insert_tail(l, snap);
2955 obj = ds->ds_phys->ds_prev_snap_obj;
2956 }
2957
2958 return (0);
2959 }
2960
2961 static int
2962 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2963 {
2964 struct promotenode *snap;
2965
2966 *spacep = 0;
2967 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2968 uint64_t used, comp, uncomp;
2969 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2970 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2971 *spacep += used;
2972 }
2973 return (0);
2974 }
2975
2976 static void
2977 snaplist_destroy(list_t *l, boolean_t own)
2978 {
2979 struct promotenode *snap;
2980
2981 if (!l || !list_link_active(&l->list_head))
2982 return;
2983
2984 while ((snap = list_tail(l)) != NULL) {
2985 list_remove(l, snap);
2986 if (own)
2987 dsl_dataset_disown(snap->ds, snaplist_tag);
2988 else
2989 dsl_dataset_rele(snap->ds, snaplist_tag);
2990 kmem_free(snap, sizeof (struct promotenode));
2991 }
2992 list_destroy(l);
2993 }
2994
2995 /*
2996 * Promote a clone. Nomenclature note:
2997 * "clone" or "cds": the original clone which is being promoted
2998 * "origin" or "ods": the snapshot which is originally clone's origin
2999 * "origin head" or "ohds": the dataset which is the head
3000 * (filesystem/volume) for the origin
3001 * "origin origin": the origin of the origin's filesystem (typically
3002 * NULL, indicating that the clone is not a clone of a clone).
3003 */
3004 int
3005 dsl_dataset_promote(const char *name, char *conflsnap)
3006 {
3007 dsl_dataset_t *ds;
3008 dsl_dir_t *dd;
3009 dsl_pool_t *dp;
3010 dmu_object_info_t doi;
3011 struct promotearg pa;
3012 struct promotenode *snap;
3013 int err;
3014
3015 bzero(&pa, sizeof(struct promotearg));
3016 err = dsl_dataset_hold(name, FTAG, &ds);
3017 if (err)
3018 return (err);
3019 dd = ds->ds_dir;
3020 dp = dd->dd_pool;
3021
3022 err = dmu_object_info(dp->dp_meta_objset,
3023 ds->ds_phys->ds_snapnames_zapobj, &doi);
3024 if (err) {
3025 dsl_dataset_rele(ds, FTAG);
3026 return (err);
3027 }
3028
3029 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3030 dsl_dataset_rele(ds, FTAG);
3031 return (EINVAL);
3032 }
3033
3034 /*
3035 * We are going to inherit all the snapshots taken before our
3036 * origin (i.e., our new origin will be our parent's origin).
3037 * Take ownership of them so that we can rename them into our
3038 * namespace.
3039 */
3040 rw_enter(&dp->dp_config_rwlock, RW_READER);
3041
3042 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3043 &pa.shared_snaps);
3044 if (err != 0)
3045 goto out;
3046
3047 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3048 if (err != 0)
3049 goto out;
3050
3051 snap = list_head(&pa.shared_snaps);
3052 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3053 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3054 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3055 if (err != 0)
3056 goto out;
3057
3058 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3059 err = dsl_dataset_hold_obj(dp,
3060 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3061 FTAG, &pa.origin_origin);
3062 if (err != 0)
3063 goto out;
3064 }
3065
3066 out:
3067 rw_exit(&dp->dp_config_rwlock);
3068
3069 /*
3070 * Add in 128x the snapnames zapobj size, since we will be moving
3071 * a bunch of snapnames to the promoted ds, and dirtying their
3072 * bonus buffers.
3073 */
3074 if (err == 0) {
3075 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3076 dsl_dataset_promote_sync, ds, &pa,
3077 2 + 2 * doi.doi_physical_blocks_512);
3078 if (err && pa.err_ds && conflsnap)
3079 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3080 }
3081
3082 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3083 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3084 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3085 if (pa.origin_origin)
3086 dsl_dataset_rele(pa.origin_origin, FTAG);
3087 dsl_dataset_rele(ds, FTAG);
3088 return (err);
3089 }
3090
3091 struct cloneswaparg {
3092 dsl_dataset_t *cds; /* clone dataset */
3093 dsl_dataset_t *ohds; /* origin's head dataset */
3094 boolean_t force;
3095 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3096 };
3097
3098 /* ARGSUSED */
3099 static int
3100 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3101 {
3102 struct cloneswaparg *csa = arg1;
3103
3104 /* they should both be heads */
3105 if (dsl_dataset_is_snapshot(csa->cds) ||
3106 dsl_dataset_is_snapshot(csa->ohds))
3107 return (EINVAL);
3108
3109 /* the branch point should be just before them */
3110 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3111 return (EINVAL);
3112
3113 /* cds should be the clone (unless they are unrelated) */
3114 if (csa->cds->ds_prev != NULL &&
3115 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3116 csa->ohds->ds_object !=
3117 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3118 return (EINVAL);
3119
3120 /* the clone should be a child of the origin */
3121 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3122 return (EINVAL);
3123
3124 /* ohds shouldn't be modified unless 'force' */
3125 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3126 return (ETXTBSY);
3127
3128 /* adjust amount of any unconsumed refreservation */
3129 csa->unused_refres_delta =
3130 (int64_t)MIN(csa->ohds->ds_reserved,
3131 csa->ohds->ds_phys->ds_unique_bytes) -
3132 (int64_t)MIN(csa->ohds->ds_reserved,
3133 csa->cds->ds_phys->ds_unique_bytes);
3134
3135 if (csa->unused_refres_delta > 0 &&
3136 csa->unused_refres_delta >
3137 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3138 return (ENOSPC);
3139
3140 if (csa->ohds->ds_quota != 0 &&
3141 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3142 return (EDQUOT);
3143
3144 return (0);
3145 }
3146
3147 /* ARGSUSED */
3148 static void
3149 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3150 {
3151 struct cloneswaparg *csa = arg1;
3152 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3153
3154 ASSERT(csa->cds->ds_reserved == 0);
3155 ASSERT(csa->ohds->ds_quota == 0 ||
3156 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3157
3158 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3159 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3160
3161 if (csa->cds->ds_objset != NULL) {
3162 dmu_objset_evict(csa->cds->ds_objset);
3163 csa->cds->ds_objset = NULL;
3164 }
3165
3166 if (csa->ohds->ds_objset != NULL) {
3167 dmu_objset_evict(csa->ohds->ds_objset);
3168 csa->ohds->ds_objset = NULL;
3169 }
3170
3171 /*
3172 * Reset origin's unique bytes, if it exists.
3173 */
3174 if (csa->cds->ds_prev) {
3175 dsl_dataset_t *origin = csa->cds->ds_prev;
3176 uint64_t comp, uncomp;
3177
3178 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3179 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3180 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3181 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3182 }
3183
3184 /* swap blkptrs */
3185 {
3186 blkptr_t tmp;
3187 tmp = csa->ohds->ds_phys->ds_bp;
3188 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3189 csa->cds->ds_phys->ds_bp = tmp;
3190 }
3191
3192 /* set dd_*_bytes */
3193 {
3194 int64_t dused, dcomp, duncomp;
3195 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3196 uint64_t odl_used, odl_comp, odl_uncomp;
3197
3198 ASSERT3U(csa->cds->ds_dir->dd_phys->
3199 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3200
3201 dsl_deadlist_space(&csa->cds->ds_deadlist,
3202 &cdl_used, &cdl_comp, &cdl_uncomp);
3203 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3204 &odl_used, &odl_comp, &odl_uncomp);
3205
3206 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3207 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3208 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3209 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3210 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3211 cdl_uncomp -
3212 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3213
3214 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3215 dused, dcomp, duncomp, tx);
3216 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3217 -dused, -dcomp, -duncomp, tx);
3218
3219 /*
3220 * The difference in the space used by snapshots is the
3221 * difference in snapshot space due to the head's
3222 * deadlist (since that's the only thing that's
3223 * changing that affects the snapused).
3224 */
3225 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3226 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3227 &cdl_used, &cdl_comp, &cdl_uncomp);
3228 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3229 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3230 &odl_used, &odl_comp, &odl_uncomp);
3231 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3232 DD_USED_HEAD, DD_USED_SNAP, tx);
3233 }
3234
3235 /* swap ds_*_bytes */
3236 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3237 csa->cds->ds_phys->ds_used_bytes);
3238 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3239 csa->cds->ds_phys->ds_compressed_bytes);
3240 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3241 csa->cds->ds_phys->ds_uncompressed_bytes);
3242 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3243 csa->cds->ds_phys->ds_unique_bytes);
3244
3245 /* apply any parent delta for change in unconsumed refreservation */
3246 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3247 csa->unused_refres_delta, 0, 0, tx);
3248
3249 /*
3250 * Swap deadlists.
3251 */
3252 dsl_deadlist_close(&csa->cds->ds_deadlist);
3253 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3254 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3255 csa->cds->ds_phys->ds_deadlist_obj);
3256 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3257 csa->cds->ds_phys->ds_deadlist_obj);
3258 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3259 csa->ohds->ds_phys->ds_deadlist_obj);
3260
3261 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3262 }
3263
3264 /*
3265 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3266 * recv" into an existing fs to swizzle the file system to the new
3267 * version, and by "zfs rollback". Can also be used to swap two
3268 * independent head datasets if neither has any snapshots.
3269 */
3270 int
3271 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3272 boolean_t force)
3273 {
3274 struct cloneswaparg csa;
3275 int error;
3276
3277 ASSERT(clone->ds_owner);
3278 ASSERT(origin_head->ds_owner);
3279 retry:
3280 /*
3281 * Need exclusive access for the swap. If we're swapping these
3282 * datasets back after an error, we already hold the locks.
3283 */
3284 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3285 rw_enter(&clone->ds_rwlock, RW_WRITER);
3286 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3287 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3288 rw_exit(&clone->ds_rwlock);
3289 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3290 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3291 rw_exit(&origin_head->ds_rwlock);
3292 goto retry;
3293 }
3294 }
3295 csa.cds = clone;
3296 csa.ohds = origin_head;
3297 csa.force = force;
3298 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3299 dsl_dataset_clone_swap_check,
3300 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3301 return (error);
3302 }
3303
3304 /*
3305 * Given a pool name and a dataset object number in that pool,
3306 * return the name of that dataset.
3307 */
3308 int
3309 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3310 {
3311 spa_t *spa;
3312 dsl_pool_t *dp;
3313 dsl_dataset_t *ds;
3314 int error;
3315
3316 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3317 return (error);
3318 dp = spa_get_dsl(spa);
3319 rw_enter(&dp->dp_config_rwlock, RW_READER);
3320 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3321 dsl_dataset_name(ds, buf);
3322 dsl_dataset_rele(ds, FTAG);
3323 }
3324 rw_exit(&dp->dp_config_rwlock);
3325 spa_close(spa, FTAG);
3326
3327 return (error);
3328 }
3329
3330 int
3331 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3332 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3333 {
3334 int error = 0;
3335
3336 ASSERT3S(asize, >, 0);
3337
3338 /*
3339 * *ref_rsrv is the portion of asize that will come from any
3340 * unconsumed refreservation space.
3341 */
3342 *ref_rsrv = 0;
3343
3344 mutex_enter(&ds->ds_lock);
3345 /*
3346 * Make a space adjustment for reserved bytes.
3347 */
3348 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3349 ASSERT3U(*used, >=,
3350 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3351 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3352 *ref_rsrv =
3353 asize - MIN(asize, parent_delta(ds, asize + inflight));
3354 }
3355
3356 if (!check_quota || ds->ds_quota == 0) {
3357 mutex_exit(&ds->ds_lock);
3358 return (0);
3359 }
3360 /*
3361 * If they are requesting more space, and our current estimate
3362 * is over quota, they get to try again unless the actual
3363 * on-disk is over quota and there are no pending changes (which
3364 * may free up space for us).
3365 */
3366 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3367 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3368 error = ERESTART;
3369 else
3370 error = EDQUOT;
3371
3372 DMU_TX_STAT_BUMP(dmu_tx_quota);
3373 }
3374 mutex_exit(&ds->ds_lock);
3375
3376 return (error);
3377 }
3378
3379 /* ARGSUSED */
3380 static int
3381 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3382 {
3383 dsl_dataset_t *ds = arg1;
3384 dsl_prop_setarg_t *psa = arg2;
3385 int err;
3386
3387 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3388 return (ENOTSUP);
3389
3390 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3391 return (err);
3392
3393 if (psa->psa_effective_value == 0)
3394 return (0);
3395
3396 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3397 psa->psa_effective_value < ds->ds_reserved)
3398 return (ENOSPC);
3399
3400 return (0);
3401 }
3402
3403 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3404
3405 void
3406 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3407 {
3408 dsl_dataset_t *ds = arg1;
3409 dsl_prop_setarg_t *psa = arg2;
3410 uint64_t effective_value = psa->psa_effective_value;
3411
3412 dsl_prop_set_sync(ds, psa, tx);
3413 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3414
3415 if (ds->ds_quota != effective_value) {
3416 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3417 ds->ds_quota = effective_value;
3418 }
3419 }
3420
3421 int
3422 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3423 {
3424 dsl_dataset_t *ds;
3425 dsl_prop_setarg_t psa;
3426 int err;
3427
3428 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3429
3430 err = dsl_dataset_hold(dsname, FTAG, &ds);
3431 if (err)
3432 return (err);
3433
3434 /*
3435 * If someone removes a file, then tries to set the quota, we
3436 * want to make sure the file freeing takes effect.
3437 */
3438 txg_wait_open(ds->ds_dir->dd_pool, 0);
3439
3440 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3441 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3442 ds, &psa, 0);
3443
3444 dsl_dataset_rele(ds, FTAG);
3445 return (err);
3446 }
3447
3448 static int
3449 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3450 {
3451 dsl_dataset_t *ds = arg1;
3452 dsl_prop_setarg_t *psa = arg2;
3453 uint64_t effective_value;
3454 uint64_t unique;
3455 int err;
3456
3457 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3458 SPA_VERSION_REFRESERVATION)
3459 return (ENOTSUP);
3460
3461 if (dsl_dataset_is_snapshot(ds))
3462 return (EINVAL);
3463
3464 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3465 return (err);
3466
3467 effective_value = psa->psa_effective_value;
3468
3469 /*
3470 * If we are doing the preliminary check in open context, the
3471 * space estimates may be inaccurate.
3472 */
3473 if (!dmu_tx_is_syncing(tx))
3474 return (0);
3475
3476 mutex_enter(&ds->ds_lock);
3477 if (!DS_UNIQUE_IS_ACCURATE(ds))
3478 dsl_dataset_recalc_head_uniq(ds);
3479 unique = ds->ds_phys->ds_unique_bytes;
3480 mutex_exit(&ds->ds_lock);
3481
3482 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3483 uint64_t delta = MAX(unique, effective_value) -
3484 MAX(unique, ds->ds_reserved);
3485
3486 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3487 return (ENOSPC);
3488 if (ds->ds_quota > 0 &&
3489 effective_value > ds->ds_quota)
3490 return (ENOSPC);
3491 }
3492
3493 return (0);
3494 }
3495
3496 static void
3497 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3498 {
3499 dsl_dataset_t *ds = arg1;
3500 dsl_prop_setarg_t *psa = arg2;
3501 uint64_t effective_value = psa->psa_effective_value;
3502 uint64_t unique;
3503 int64_t delta;
3504
3505 dsl_prop_set_sync(ds, psa, tx);
3506 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3507
3508 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3509
3510 mutex_enter(&ds->ds_dir->dd_lock);
3511 mutex_enter(&ds->ds_lock);
3512 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3513 unique = ds->ds_phys->ds_unique_bytes;
3514 delta = MAX(0, (int64_t)(effective_value - unique)) -
3515 MAX(0, (int64_t)(ds->ds_reserved - unique));
3516 ds->ds_reserved = effective_value;
3517 mutex_exit(&ds->ds_lock);
3518
3519 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3520 mutex_exit(&ds->ds_dir->dd_lock);
3521 }
3522
3523 int
3524 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3525 uint64_t reservation)
3526 {
3527 dsl_dataset_t *ds;
3528 dsl_prop_setarg_t psa;
3529 int err;
3530
3531 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3532 &reservation);
3533
3534 err = dsl_dataset_hold(dsname, FTAG, &ds);
3535 if (err)
3536 return (err);
3537
3538 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3539 dsl_dataset_set_reservation_check,
3540 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3541
3542 dsl_dataset_rele(ds, FTAG);
3543 return (err);
3544 }
3545
3546 typedef struct zfs_hold_cleanup_arg {
3547 dsl_pool_t *dp;
3548 uint64_t dsobj;
3549 char htag[MAXNAMELEN];
3550 } zfs_hold_cleanup_arg_t;
3551
3552 static void
3553 dsl_dataset_user_release_onexit(void *arg)
3554 {
3555 zfs_hold_cleanup_arg_t *ca = arg;
3556
3557 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3558 B_TRUE);
3559 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3560 }
3561
3562 void
3563 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3564 minor_t minor)
3565 {
3566 zfs_hold_cleanup_arg_t *ca;
3567
3568 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3569 ca->dp = ds->ds_dir->dd_pool;
3570 ca->dsobj = ds->ds_object;
3571 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3572 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3573 dsl_dataset_user_release_onexit, ca, NULL));
3574 }
3575
3576 /*
3577 * If you add new checks here, you may need to add
3578 * additional checks to the "temporary" case in
3579 * snapshot_check() in dmu_objset.c.
3580 */
3581 static int
3582 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3583 {
3584 dsl_dataset_t *ds = arg1;
3585 struct dsl_ds_holdarg *ha = arg2;
3586 char *htag = ha->htag;
3587 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3588 int error = 0;
3589
3590 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3591 return (ENOTSUP);
3592
3593 if (!dsl_dataset_is_snapshot(ds))
3594 return (EINVAL);
3595
3596 /* tags must be unique */
3597 mutex_enter(&ds->ds_lock);
3598 if (ds->ds_phys->ds_userrefs_obj) {
3599 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3600 8, 1, tx);
3601 if (error == 0)
3602 error = EEXIST;
3603 else if (error == ENOENT)
3604 error = 0;
3605 }
3606 mutex_exit(&ds->ds_lock);
3607
3608 if (error == 0 && ha->temphold &&
3609 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3610 error = E2BIG;
3611
3612 return (error);
3613 }
3614
3615 void
3616 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3617 {
3618 dsl_dataset_t *ds = arg1;
3619 struct dsl_ds_holdarg *ha = arg2;
3620 char *htag = ha->htag;
3621 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3622 objset_t *mos = dp->dp_meta_objset;
3623 uint64_t now = gethrestime_sec();
3624 uint64_t zapobj;
3625
3626 mutex_enter(&ds->ds_lock);
3627 if (ds->ds_phys->ds_userrefs_obj == 0) {
3628 /*
3629 * This is the first user hold for this dataset. Create
3630 * the userrefs zap object.
3631 */
3632 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3633 zapobj = ds->ds_phys->ds_userrefs_obj =
3634 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3635 } else {
3636 zapobj = ds->ds_phys->ds_userrefs_obj;
3637 }
3638 ds->ds_userrefs++;
3639 mutex_exit(&ds->ds_lock);
3640
3641 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3642
3643 if (ha->temphold) {
3644 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3645 htag, &now, tx));
3646 }
3647
3648 spa_history_log_internal(LOG_DS_USER_HOLD,
3649 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3650 (int)ha->temphold, ds->ds_object);
3651 }
3652
3653 static int
3654 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3655 {
3656 struct dsl_ds_holdarg *ha = arg;
3657 dsl_dataset_t *ds;
3658 int error;
3659 char *name;
3660
3661 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3662 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3663 error = dsl_dataset_hold(name, ha->dstg, &ds);
3664 strfree(name);
3665 if (error == 0) {
3666 ha->gotone = B_TRUE;
3667 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3668 dsl_dataset_user_hold_sync, ds, ha, 0);
3669 } else if (error == ENOENT && ha->recursive) {
3670 error = 0;
3671 } else {
3672 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3673 }
3674 return (error);
3675 }
3676
3677 int
3678 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3679 boolean_t temphold)
3680 {
3681 struct dsl_ds_holdarg *ha;
3682 int error;
3683
3684 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3685 ha->htag = htag;
3686 ha->temphold = temphold;
3687 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3688 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3689 ds, ha, 0);
3690 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3691
3692 return (error);
3693 }
3694
3695 int
3696 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3697 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3698 {
3699 struct dsl_ds_holdarg *ha;
3700 dsl_sync_task_t *dst;
3701 spa_t *spa;
3702 int error;
3703 minor_t minor = 0;
3704
3705 if (cleanup_fd != -1) {
3706 /* Currently we only support cleanup-on-exit of tempholds. */
3707 if (!temphold)
3708 return (EINVAL);
3709 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3710 if (error)
3711 return (error);
3712 }
3713
3714 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3715
3716 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3717
3718 error = spa_open(dsname, &spa, FTAG);
3719 if (error) {
3720 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3721 if (cleanup_fd != -1)
3722 zfs_onexit_fd_rele(cleanup_fd);
3723 return (error);
3724 }
3725
3726 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3727 ha->htag = htag;
3728 ha->snapname = snapname;
3729 ha->recursive = recursive;
3730 ha->temphold = temphold;
3731
3732 if (recursive) {
3733 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3734 ha, DS_FIND_CHILDREN);
3735 } else {
3736 error = dsl_dataset_user_hold_one(dsname, ha);
3737 }
3738 if (error == 0)
3739 error = dsl_sync_task_group_wait(ha->dstg);
3740
3741 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3742 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3743 dsl_dataset_t *ds = dst->dst_arg1;
3744
3745 if (dst->dst_err) {
3746 dsl_dataset_name(ds, ha->failed);
3747 *strchr(ha->failed, '@') = '\0';
3748 } else if (error == 0 && minor != 0 && temphold) {
3749 /*
3750 * If this hold is to be released upon process exit,
3751 * register that action now.
3752 */
3753 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3754 }
3755 dsl_dataset_rele(ds, ha->dstg);
3756 }
3757
3758 if (error == 0 && recursive && !ha->gotone)
3759 error = ENOENT;
3760
3761 if (error)
3762 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3763
3764 dsl_sync_task_group_destroy(ha->dstg);
3765
3766 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3767 spa_close(spa, FTAG);
3768 if (cleanup_fd != -1)
3769 zfs_onexit_fd_rele(cleanup_fd);
3770 return (error);
3771 }
3772
3773 struct dsl_ds_releasearg {
3774 dsl_dataset_t *ds;
3775 const char *htag;
3776 boolean_t own; /* do we own or just hold ds? */
3777 };
3778
3779 static int
3780 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3781 boolean_t *might_destroy)
3782 {
3783 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3784 uint64_t zapobj;
3785 uint64_t tmp;
3786 int error;
3787
3788 *might_destroy = B_FALSE;
3789
3790 mutex_enter(&ds->ds_lock);
3791 zapobj = ds->ds_phys->ds_userrefs_obj;
3792 if (zapobj == 0) {
3793 /* The tag can't possibly exist */
3794 mutex_exit(&ds->ds_lock);
3795 return (ESRCH);
3796 }
3797
3798 /* Make sure the tag exists */
3799 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3800 if (error) {
3801 mutex_exit(&ds->ds_lock);
3802 if (error == ENOENT)
3803 error = ESRCH;
3804 return (error);
3805 }
3806
3807 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3808 DS_IS_DEFER_DESTROY(ds))
3809 *might_destroy = B_TRUE;
3810
3811 mutex_exit(&ds->ds_lock);
3812 return (0);
3813 }
3814
3815 static int
3816 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3817 {
3818 struct dsl_ds_releasearg *ra = arg1;
3819 dsl_dataset_t *ds = ra->ds;
3820 boolean_t might_destroy;
3821 int error;
3822
3823 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3824 return (ENOTSUP);
3825
3826 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3827 if (error)
3828 return (error);
3829
3830 if (might_destroy) {
3831 struct dsl_ds_destroyarg dsda = {0};
3832
3833 if (dmu_tx_is_syncing(tx)) {
3834 /*
3835 * If we're not prepared to remove the snapshot,
3836 * we can't allow the release to happen right now.
3837 */
3838 if (!ra->own)
3839 return (EBUSY);
3840 }
3841 dsda.ds = ds;
3842 dsda.releasing = B_TRUE;
3843 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3844 }
3845
3846 return (0);
3847 }
3848
3849 static void
3850 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3851 {
3852 struct dsl_ds_releasearg *ra = arg1;
3853 dsl_dataset_t *ds = ra->ds;
3854 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3855 objset_t *mos = dp->dp_meta_objset;
3856 uint64_t zapobj;
3857 uint64_t dsobj = ds->ds_object;
3858 uint64_t refs;
3859 int error;
3860
3861 mutex_enter(&ds->ds_lock);
3862 ds->ds_userrefs--;
3863 refs = ds->ds_userrefs;
3864 mutex_exit(&ds->ds_lock);
3865 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3866 VERIFY(error == 0 || error == ENOENT);
3867 zapobj = ds->ds_phys->ds_userrefs_obj;
3868 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3869
3870 spa_history_log_internal(LOG_DS_USER_RELEASE,
3871 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3872 ra->htag, (longlong_t)refs, dsobj);
3873
3874 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3875 DS_IS_DEFER_DESTROY(ds)) {
3876 struct dsl_ds_destroyarg dsda = {0};
3877
3878 ASSERT(ra->own);
3879 dsda.ds = ds;
3880 dsda.releasing = B_TRUE;
3881 /* We already did the destroy_check */
3882 dsl_dataset_destroy_sync(&dsda, tag, tx);
3883 }
3884 }
3885
3886 static int
3887 dsl_dataset_user_release_one(const char *dsname, void *arg)
3888 {
3889 struct dsl_ds_holdarg *ha = arg;
3890 struct dsl_ds_releasearg *ra;
3891 dsl_dataset_t *ds;
3892 int error;
3893 void *dtag = ha->dstg;
3894 char *name;
3895 boolean_t own = B_FALSE;
3896 boolean_t might_destroy;
3897
3898 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3899 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3900 error = dsl_dataset_hold(name, dtag, &ds);
3901 strfree(name);
3902 if (error == ENOENT && ha->recursive)
3903 return (0);
3904 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3905 if (error)
3906 return (error);
3907
3908 ha->gotone = B_TRUE;
3909
3910 ASSERT(dsl_dataset_is_snapshot(ds));
3911
3912 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3913 if (error) {
3914 dsl_dataset_rele(ds, dtag);
3915 return (error);
3916 }
3917
3918 if (might_destroy) {
3919 #ifdef _KERNEL
3920 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3921 error = zfs_unmount_snap(name, NULL);
3922 strfree(name);
3923 if (error) {
3924 dsl_dataset_rele(ds, dtag);
3925 return (error);
3926 }
3927 #endif
3928 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3929 dsl_dataset_rele(ds, dtag);
3930 return (EBUSY);
3931 } else {
3932 own = B_TRUE;
3933 dsl_dataset_make_exclusive(ds, dtag);
3934 }
3935 }
3936
3937 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3938 ra->ds = ds;
3939 ra->htag = ha->htag;
3940 ra->own = own;
3941 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3942 dsl_dataset_user_release_sync, ra, dtag, 0);
3943
3944 return (0);
3945 }
3946
3947 int
3948 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3949 boolean_t recursive)
3950 {
3951 struct dsl_ds_holdarg *ha;
3952 dsl_sync_task_t *dst;
3953 spa_t *spa;
3954 int error;
3955
3956 top:
3957 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3958
3959 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3960
3961 error = spa_open(dsname, &spa, FTAG);
3962 if (error) {
3963 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3964 return (error);
3965 }
3966
3967 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3968 ha->htag = htag;
3969 ha->snapname = snapname;
3970 ha->recursive = recursive;
3971 if (recursive) {
3972 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3973 ha, DS_FIND_CHILDREN);
3974 } else {
3975 error = dsl_dataset_user_release_one(dsname, ha);
3976 }
3977 if (error == 0)
3978 error = dsl_sync_task_group_wait(ha->dstg);
3979
3980 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3981 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3982 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3983 dsl_dataset_t *ds = ra->ds;
3984
3985 if (dst->dst_err)
3986 dsl_dataset_name(ds, ha->failed);
3987
3988 if (ra->own)
3989 dsl_dataset_disown(ds, ha->dstg);
3990 else
3991 dsl_dataset_rele(ds, ha->dstg);
3992
3993 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3994 }
3995
3996 if (error == 0 && recursive && !ha->gotone)
3997 error = ENOENT;
3998
3999 if (error && error != EBUSY)
4000 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4001
4002 dsl_sync_task_group_destroy(ha->dstg);
4003 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4004 spa_close(spa, FTAG);
4005
4006 /*
4007 * We can get EBUSY if we were racing with deferred destroy and
4008 * dsl_dataset_user_release_check() hadn't done the necessary
4009 * open context setup. We can also get EBUSY if we're racing
4010 * with destroy and that thread is the ds_owner. Either way
4011 * the busy condition should be transient, and we should retry
4012 * the release operation.
4013 */
4014 if (error == EBUSY)
4015 goto top;
4016
4017 return (error);
4018 }
4019
4020 /*
4021 * Called at spa_load time (with retry == B_FALSE) to release a stale
4022 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4023 */
4024 int
4025 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4026 boolean_t retry)
4027 {
4028 dsl_dataset_t *ds;
4029 char *snap;
4030 char *name;
4031 int namelen;
4032 int error;
4033
4034 do {
4035 rw_enter(&dp->dp_config_rwlock, RW_READER);
4036 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4037 rw_exit(&dp->dp_config_rwlock);
4038 if (error)
4039 return (error);
4040 namelen = dsl_dataset_namelen(ds)+1;
4041 name = kmem_alloc(namelen, KM_SLEEP);
4042 dsl_dataset_name(ds, name);
4043 dsl_dataset_rele(ds, FTAG);
4044
4045 snap = strchr(name, '@');
4046 *snap = '\0';
4047 ++snap;
4048 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4049 kmem_free(name, namelen);
4050
4051 /*
4052 * The object can't have been destroyed because we have a hold,
4053 * but it might have been renamed, resulting in ENOENT. Retry
4054 * if we've been requested to do so.
4055 *
4056 * It would be nice if we could use the dsobj all the way
4057 * through and avoid ENOENT entirely. But we might need to
4058 * unmount the snapshot, and there's currently no way to lookup
4059 * a vfsp using a ZFS object id.
4060 */
4061 } while ((error == ENOENT) && retry);
4062
4063 return (error);
4064 }
4065
4066 int
4067 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4068 {
4069 dsl_dataset_t *ds;
4070 int err;
4071
4072 err = dsl_dataset_hold(dsname, FTAG, &ds);
4073 if (err)
4074 return (err);
4075
4076 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4077 if (ds->ds_phys->ds_userrefs_obj != 0) {
4078 zap_attribute_t *za;
4079 zap_cursor_t zc;
4080
4081 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4082 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4083 ds->ds_phys->ds_userrefs_obj);
4084 zap_cursor_retrieve(&zc, za) == 0;
4085 zap_cursor_advance(&zc)) {
4086 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4087 za->za_first_integer));
4088 }
4089 zap_cursor_fini(&zc);
4090 kmem_free(za, sizeof (zap_attribute_t));
4091 }
4092 dsl_dataset_rele(ds, FTAG);
4093 return (0);
4094 }
4095
4096 /*
4097 * Note, this function is used as the callback for dmu_objset_find(). We
4098 * always return 0 so that we will continue to find and process
4099 * inconsistent datasets, even if we encounter an error trying to
4100 * process one of them.
4101 */
4102 /* ARGSUSED */
4103 int
4104 dsl_destroy_inconsistent(const char *dsname, void *arg)
4105 {
4106 dsl_dataset_t *ds;
4107
4108 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4109 if (DS_IS_INCONSISTENT(ds))
4110 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4111 else
4112 dsl_dataset_disown(ds, FTAG);
4113 }
4114 return (0);
4115 }
4116
4117
4118 /*
4119 * Return (in *usedp) the amount of space written in new that is not
4120 * present in oldsnap. New may be a snapshot or the head. Old must be
4121 * a snapshot before new, in new's filesystem (or its origin). If not then
4122 * fail and return EINVAL.
4123 *
4124 * The written space is calculated by considering two components: First, we
4125 * ignore any freed space, and calculate the written as new's used space
4126 * minus old's used space. Next, we add in the amount of space that was freed
4127 * between the two snapshots, thus reducing new's used space relative to old's.
4128 * Specifically, this is the space that was born before old->ds_creation_txg,
4129 * and freed before new (ie. on new's deadlist or a previous deadlist).
4130 *
4131 * space freed [---------------------]
4132 * snapshots ---O-------O--------O-------O------
4133 * oldsnap new
4134 */
4135 int
4136 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4137 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4138 {
4139 int err = 0;
4140 uint64_t snapobj;
4141 dsl_pool_t *dp = new->ds_dir->dd_pool;
4142
4143 *usedp = 0;
4144 *usedp += new->ds_phys->ds_used_bytes;
4145 *usedp -= oldsnap->ds_phys->ds_used_bytes;
4146
4147 *compp = 0;
4148 *compp += new->ds_phys->ds_compressed_bytes;
4149 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4150
4151 *uncompp = 0;
4152 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4153 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4154
4155 rw_enter(&dp->dp_config_rwlock, RW_READER);
4156 snapobj = new->ds_object;
4157 while (snapobj != oldsnap->ds_object) {
4158 dsl_dataset_t *snap;
4159 uint64_t used, comp, uncomp;
4160
4161 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4162 if (err != 0)
4163 break;
4164
4165 if (snap->ds_phys->ds_prev_snap_txg ==
4166 oldsnap->ds_phys->ds_creation_txg) {
4167 /*
4168 * The blocks in the deadlist can not be born after
4169 * ds_prev_snap_txg, so get the whole deadlist space,
4170 * which is more efficient (especially for old-format
4171 * deadlists). Unfortunately the deadlist code
4172 * doesn't have enough information to make this
4173 * optimization itself.
4174 */
4175 dsl_deadlist_space(&snap->ds_deadlist,
4176 &used, &comp, &uncomp);
4177 } else {
4178 dsl_deadlist_space_range(&snap->ds_deadlist,
4179 0, oldsnap->ds_phys->ds_creation_txg,
4180 &used, &comp, &uncomp);
4181 }
4182 *usedp += used;
4183 *compp += comp;
4184 *uncompp += uncomp;
4185
4186 /*
4187 * If we get to the beginning of the chain of snapshots
4188 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4189 * was not a snapshot of/before new.
4190 */
4191 snapobj = snap->ds_phys->ds_prev_snap_obj;
4192 dsl_dataset_rele(snap, FTAG);
4193 if (snapobj == 0) {
4194 err = EINVAL;
4195 break;
4196 }
4197
4198 }
4199 rw_exit(&dp->dp_config_rwlock);
4200 return (err);
4201 }
4202
4203 /*
4204 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4205 * lastsnap, and all snapshots in between are deleted.
4206 *
4207 * blocks that would be freed [---------------------------]
4208 * snapshots ---O-------O--------O-------O--------O
4209 * firstsnap lastsnap
4210 *
4211 * This is the set of blocks that were born after the snap before firstsnap,
4212 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4213 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4214 * We calculate this by iterating over the relevant deadlists (from the snap
4215 * after lastsnap, backward to the snap after firstsnap), summing up the
4216 * space on the deadlist that was born after the snap before firstsnap.
4217 */
4218 int
4219 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4220 dsl_dataset_t *lastsnap,
4221 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4222 {
4223 int err = 0;
4224 uint64_t snapobj;
4225 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4226
4227 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4228 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4229
4230 /*
4231 * Check that the snapshots are in the same dsl_dir, and firstsnap
4232 * is before lastsnap.
4233 */
4234 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4235 firstsnap->ds_phys->ds_creation_txg >
4236 lastsnap->ds_phys->ds_creation_txg)
4237 return (EINVAL);
4238
4239 *usedp = *compp = *uncompp = 0;
4240
4241 rw_enter(&dp->dp_config_rwlock, RW_READER);
4242 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4243 while (snapobj != firstsnap->ds_object) {
4244 dsl_dataset_t *ds;
4245 uint64_t used, comp, uncomp;
4246
4247 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4248 if (err != 0)
4249 break;
4250
4251 dsl_deadlist_space_range(&ds->ds_deadlist,
4252 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4253 &used, &comp, &uncomp);
4254 *usedp += used;
4255 *compp += comp;
4256 *uncompp += uncomp;
4257
4258 snapobj = ds->ds_phys->ds_prev_snap_obj;
4259 ASSERT3U(snapobj, !=, 0);
4260 dsl_dataset_rele(ds, FTAG);
4261 }
4262 rw_exit(&dp->dp_config_rwlock);
4263 return (err);
4264 }
4265
4266 #if defined(_KERNEL) && defined(HAVE_SPL)
4267 EXPORT_SYMBOL(dmu_snapshots_destroy_nvl);
4268 EXPORT_SYMBOL(dsl_dataset_hold);
4269 EXPORT_SYMBOL(dsl_dataset_hold_obj);
4270 EXPORT_SYMBOL(dsl_dataset_own);
4271 EXPORT_SYMBOL(dsl_dataset_own_obj);
4272 EXPORT_SYMBOL(dsl_dataset_name);
4273 EXPORT_SYMBOL(dsl_dataset_rele);
4274 EXPORT_SYMBOL(dsl_dataset_disown);
4275 EXPORT_SYMBOL(dsl_dataset_drop_ref);
4276 EXPORT_SYMBOL(dsl_dataset_tryown);
4277 EXPORT_SYMBOL(dsl_dataset_make_exclusive);
4278 EXPORT_SYMBOL(dsl_dataset_create_sync);
4279 EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
4280 EXPORT_SYMBOL(dsl_dataset_destroy);
4281 EXPORT_SYMBOL(dsl_dataset_destroy_check);
4282 EXPORT_SYMBOL(dsl_dataset_destroy_sync);
4283 EXPORT_SYMBOL(dsl_dataset_snapshot_check);
4284 EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
4285 EXPORT_SYMBOL(dsl_dataset_rename);
4286 EXPORT_SYMBOL(dsl_dataset_promote);
4287 EXPORT_SYMBOL(dsl_dataset_clone_swap);
4288 EXPORT_SYMBOL(dsl_dataset_user_hold);
4289 EXPORT_SYMBOL(dsl_dataset_user_release);
4290 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
4291 EXPORT_SYMBOL(dsl_dataset_get_holds);
4292 EXPORT_SYMBOL(dsl_dataset_get_blkptr);
4293 EXPORT_SYMBOL(dsl_dataset_set_blkptr);
4294 EXPORT_SYMBOL(dsl_dataset_get_spa);
4295 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap);
4296 EXPORT_SYMBOL(dsl_dataset_space_written);
4297 EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
4298 EXPORT_SYMBOL(dsl_dataset_sync);
4299 EXPORT_SYMBOL(dsl_dataset_block_born);
4300 EXPORT_SYMBOL(dsl_dataset_block_kill);
4301 EXPORT_SYMBOL(dsl_dataset_block_freeable);
4302 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg);
4303 EXPORT_SYMBOL(dsl_dataset_dirty);
4304 EXPORT_SYMBOL(dsl_dataset_stats);
4305 EXPORT_SYMBOL(dsl_dataset_fast_stat);
4306 EXPORT_SYMBOL(dsl_dataset_space);
4307 EXPORT_SYMBOL(dsl_dataset_fsid_guid);
4308 EXPORT_SYMBOL(dsl_dsobj_to_dsname);
4309 EXPORT_SYMBOL(dsl_dataset_check_quota);
4310 EXPORT_SYMBOL(dsl_dataset_set_quota);
4311 EXPORT_SYMBOL(dsl_dataset_set_quota_sync);
4312 EXPORT_SYMBOL(dsl_dataset_set_reservation);
4313 EXPORT_SYMBOL(dsl_destroy_inconsistent);
4314 #endif