]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_dataset.c
Use dsl_dataset_snap_lookup()
[mirror_zfs.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/arc.h>
36 #include <sys/zio.h>
37 #include <sys/zap.h>
38 #include <sys/zfeature.h>
39 #include <sys/unique.h>
40 #include <sys/zfs_context.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/spa.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/zfs_onexit.h>
45 #include <sys/zvol.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/dsl_deadlist.h>
48
49 static char *dsl_reaper = "the grim reaper";
50
51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
54
55 #define SWITCH64(x, y) \
56 { \
57 uint64_t __tmp = (x); \
58 (x) = (y); \
59 (y) = __tmp; \
60 }
61
62 #define DS_REF_MAX (1ULL << 62)
63
64 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
65
66 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
67
68
69 /*
70 * Figure out how much of this delta should be propogated to the dsl_dir
71 * layer. If there's a refreservation, that space has already been
72 * partially accounted for in our ancestors.
73 */
74 static int64_t
75 parent_delta(dsl_dataset_t *ds, int64_t delta)
76 {
77 uint64_t old_bytes, new_bytes;
78
79 if (ds->ds_reserved == 0)
80 return (delta);
81
82 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
84
85 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 return (new_bytes - old_bytes);
87 }
88
89 void
90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
91 {
92 int used, compressed, uncompressed;
93 int64_t delta;
94
95 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
96 compressed = BP_GET_PSIZE(bp);
97 uncompressed = BP_GET_UCSIZE(bp);
98
99 dprintf_bp(bp, "ds=%p", ds);
100
101 ASSERT(dmu_tx_is_syncing(tx));
102 /* It could have been compressed away to nothing */
103 if (BP_IS_HOLE(bp))
104 return;
105 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
106 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
107 if (ds == NULL) {
108 dsl_pool_mos_diduse_space(tx->tx_pool,
109 used, compressed, uncompressed);
110 return;
111 }
112 dmu_buf_will_dirty(ds->ds_dbuf, tx);
113
114 mutex_enter(&ds->ds_dir->dd_lock);
115 mutex_enter(&ds->ds_lock);
116 delta = parent_delta(ds, used);
117 ds->ds_phys->ds_referenced_bytes += used;
118 ds->ds_phys->ds_compressed_bytes += compressed;
119 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
120 ds->ds_phys->ds_unique_bytes += used;
121 mutex_exit(&ds->ds_lock);
122 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
123 compressed, uncompressed, tx);
124 dsl_dir_transfer_space(ds->ds_dir, used - delta,
125 DD_USED_REFRSRV, DD_USED_HEAD, tx);
126 mutex_exit(&ds->ds_dir->dd_lock);
127 }
128
129 int
130 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
131 boolean_t async)
132 {
133 int used, compressed, uncompressed;
134
135 if (BP_IS_HOLE(bp))
136 return (0);
137
138 ASSERT(dmu_tx_is_syncing(tx));
139 ASSERT(bp->blk_birth <= tx->tx_txg);
140
141 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
142 compressed = BP_GET_PSIZE(bp);
143 uncompressed = BP_GET_UCSIZE(bp);
144
145 ASSERT(used > 0);
146 if (ds == NULL) {
147 dsl_free(tx->tx_pool, tx->tx_txg, bp);
148 dsl_pool_mos_diduse_space(tx->tx_pool,
149 -used, -compressed, -uncompressed);
150 return (used);
151 }
152 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
153
154 ASSERT(!dsl_dataset_is_snapshot(ds));
155 dmu_buf_will_dirty(ds->ds_dbuf, tx);
156
157 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
158 int64_t delta;
159
160 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
161 dsl_free(tx->tx_pool, tx->tx_txg, bp);
162
163 mutex_enter(&ds->ds_dir->dd_lock);
164 mutex_enter(&ds->ds_lock);
165 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
166 !DS_UNIQUE_IS_ACCURATE(ds));
167 delta = parent_delta(ds, -used);
168 ds->ds_phys->ds_unique_bytes -= used;
169 mutex_exit(&ds->ds_lock);
170 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
171 delta, -compressed, -uncompressed, tx);
172 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
173 DD_USED_REFRSRV, DD_USED_HEAD, tx);
174 mutex_exit(&ds->ds_dir->dd_lock);
175 } else {
176 dprintf_bp(bp, "putting on dead list: %s", "");
177 if (async) {
178 /*
179 * We are here as part of zio's write done callback,
180 * which means we're a zio interrupt thread. We can't
181 * call dsl_deadlist_insert() now because it may block
182 * waiting for I/O. Instead, put bp on the deferred
183 * queue and let dsl_pool_sync() finish the job.
184 */
185 bplist_append(&ds->ds_pending_deadlist, bp);
186 } else {
187 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
188 }
189 ASSERT3U(ds->ds_prev->ds_object, ==,
190 ds->ds_phys->ds_prev_snap_obj);
191 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
192 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
193 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
194 ds->ds_object && bp->blk_birth >
195 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
196 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
197 mutex_enter(&ds->ds_prev->ds_lock);
198 ds->ds_prev->ds_phys->ds_unique_bytes += used;
199 mutex_exit(&ds->ds_prev->ds_lock);
200 }
201 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
202 dsl_dir_transfer_space(ds->ds_dir, used,
203 DD_USED_HEAD, DD_USED_SNAP, tx);
204 }
205 }
206 mutex_enter(&ds->ds_lock);
207 ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
208 ds->ds_phys->ds_referenced_bytes -= used;
209 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
210 ds->ds_phys->ds_compressed_bytes -= compressed;
211 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
212 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
213 mutex_exit(&ds->ds_lock);
214
215 return (used);
216 }
217
218 uint64_t
219 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
220 {
221 uint64_t trysnap = 0;
222
223 if (ds == NULL)
224 return (0);
225 /*
226 * The snapshot creation could fail, but that would cause an
227 * incorrect FALSE return, which would only result in an
228 * overestimation of the amount of space that an operation would
229 * consume, which is OK.
230 *
231 * There's also a small window where we could miss a pending
232 * snapshot, because we could set the sync task in the quiescing
233 * phase. So this should only be used as a guess.
234 */
235 if (ds->ds_trysnap_txg >
236 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
237 trysnap = ds->ds_trysnap_txg;
238 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
239 }
240
241 boolean_t
242 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
243 uint64_t blk_birth)
244 {
245 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
246 return (B_FALSE);
247
248 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
249
250 return (B_TRUE);
251 }
252
253 /* ARGSUSED */
254 static void
255 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
256 {
257 dsl_dataset_t *ds = dsv;
258
259 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
260
261 unique_remove(ds->ds_fsid_guid);
262
263 if (ds->ds_objset != NULL)
264 dmu_objset_evict(ds->ds_objset);
265
266 if (ds->ds_prev) {
267 dsl_dataset_drop_ref(ds->ds_prev, ds);
268 ds->ds_prev = NULL;
269 }
270
271 bplist_destroy(&ds->ds_pending_deadlist);
272 if (db != NULL) {
273 dsl_deadlist_close(&ds->ds_deadlist);
274 } else {
275 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
276 ASSERT(!ds->ds_deadlist.dl_oldfmt);
277 }
278 if (ds->ds_dir)
279 dsl_dir_close(ds->ds_dir, ds);
280
281 ASSERT(!list_link_active(&ds->ds_synced_link));
282
283 mutex_destroy(&ds->ds_lock);
284 mutex_destroy(&ds->ds_recvlock);
285 mutex_destroy(&ds->ds_opening_lock);
286 rw_destroy(&ds->ds_rwlock);
287 cv_destroy(&ds->ds_exclusive_cv);
288
289 kmem_free(ds, sizeof (dsl_dataset_t));
290 }
291
292 static int
293 dsl_dataset_get_snapname(dsl_dataset_t *ds)
294 {
295 dsl_dataset_phys_t *headphys;
296 int err;
297 dmu_buf_t *headdbuf;
298 dsl_pool_t *dp = ds->ds_dir->dd_pool;
299 objset_t *mos = dp->dp_meta_objset;
300
301 if (ds->ds_snapname[0])
302 return (0);
303 if (ds->ds_phys->ds_next_snap_obj == 0)
304 return (0);
305
306 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
307 FTAG, &headdbuf);
308 if (err)
309 return (err);
310 headphys = headdbuf->db_data;
311 err = zap_value_search(dp->dp_meta_objset,
312 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
313 dmu_buf_rele(headdbuf, FTAG);
314 return (err);
315 }
316
317 int
318 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
319 {
320 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
321 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
322 matchtype_t mt;
323 int err;
324
325 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
326 mt = MT_FIRST;
327 else
328 mt = MT_EXACT;
329
330 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
331 value, mt, NULL, 0, NULL);
332 if (err == ENOTSUP && mt == MT_FIRST)
333 err = zap_lookup(mos, snapobj, name, 8, 1, value);
334 return (err);
335 }
336
337 static int
338 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
339 {
340 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
341 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
342 matchtype_t mt;
343 int err;
344
345 dsl_dir_snap_cmtime_update(ds->ds_dir);
346
347 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
348 mt = MT_FIRST;
349 else
350 mt = MT_EXACT;
351
352 err = zap_remove_norm(mos, snapobj, name, mt, tx);
353 if (err == ENOTSUP && mt == MT_FIRST)
354 err = zap_remove(mos, snapobj, name, tx);
355 return (err);
356 }
357
358 static int
359 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
360 dsl_dataset_t **dsp)
361 {
362 objset_t *mos = dp->dp_meta_objset;
363 dmu_buf_t *dbuf;
364 dsl_dataset_t *ds;
365 int err;
366 dmu_object_info_t doi;
367
368 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
369 dsl_pool_sync_context(dp));
370
371 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
372 if (err)
373 return (err);
374
375 /* Make sure dsobj has the correct object type. */
376 dmu_object_info_from_db(dbuf, &doi);
377 if (doi.doi_type != DMU_OT_DSL_DATASET)
378 return (EINVAL);
379
380 ds = dmu_buf_get_user(dbuf);
381 if (ds == NULL) {
382 dsl_dataset_t *winner = NULL;
383
384 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_PUSHPAGE);
385 ds->ds_dbuf = dbuf;
386 ds->ds_object = dsobj;
387 ds->ds_phys = dbuf->db_data;
388 list_link_init(&ds->ds_synced_link);
389
390 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
391 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
392 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
393 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
394
395 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
396 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
397
398 bplist_create(&ds->ds_pending_deadlist);
399 dsl_deadlist_open(&ds->ds_deadlist,
400 mos, ds->ds_phys->ds_deadlist_obj);
401
402 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
403 offsetof(dmu_sendarg_t, dsa_link));
404
405 if (err == 0) {
406 err = dsl_dir_open_obj(dp,
407 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
408 }
409 if (err) {
410 mutex_destroy(&ds->ds_lock);
411 mutex_destroy(&ds->ds_recvlock);
412 mutex_destroy(&ds->ds_opening_lock);
413 rw_destroy(&ds->ds_rwlock);
414 cv_destroy(&ds->ds_exclusive_cv);
415 bplist_destroy(&ds->ds_pending_deadlist);
416 dsl_deadlist_close(&ds->ds_deadlist);
417 kmem_free(ds, sizeof (dsl_dataset_t));
418 dmu_buf_rele(dbuf, tag);
419 return (err);
420 }
421
422 if (!dsl_dataset_is_snapshot(ds)) {
423 ds->ds_snapname[0] = '\0';
424 if (ds->ds_phys->ds_prev_snap_obj) {
425 err = dsl_dataset_get_ref(dp,
426 ds->ds_phys->ds_prev_snap_obj,
427 ds, &ds->ds_prev);
428 }
429 } else {
430 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
431 err = dsl_dataset_get_snapname(ds);
432 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
433 err = zap_count(
434 ds->ds_dir->dd_pool->dp_meta_objset,
435 ds->ds_phys->ds_userrefs_obj,
436 &ds->ds_userrefs);
437 }
438 }
439
440 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
441 /*
442 * In sync context, we're called with either no lock
443 * or with the write lock. If we're not syncing,
444 * we're always called with the read lock held.
445 */
446 boolean_t need_lock =
447 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
448 dsl_pool_sync_context(dp);
449
450 if (need_lock)
451 rw_enter(&dp->dp_config_rwlock, RW_READER);
452
453 err = dsl_prop_get_ds(ds,
454 "refreservation", sizeof (uint64_t), 1,
455 &ds->ds_reserved, NULL);
456 if (err == 0) {
457 err = dsl_prop_get_ds(ds,
458 "refquota", sizeof (uint64_t), 1,
459 &ds->ds_quota, NULL);
460 }
461
462 if (need_lock)
463 rw_exit(&dp->dp_config_rwlock);
464 } else {
465 ds->ds_reserved = ds->ds_quota = 0;
466 }
467
468 if (err == 0) {
469 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
470 dsl_dataset_evict);
471 }
472 if (err || winner) {
473 bplist_destroy(&ds->ds_pending_deadlist);
474 dsl_deadlist_close(&ds->ds_deadlist);
475 if (ds->ds_prev)
476 dsl_dataset_drop_ref(ds->ds_prev, ds);
477 dsl_dir_close(ds->ds_dir, ds);
478 mutex_destroy(&ds->ds_lock);
479 mutex_destroy(&ds->ds_recvlock);
480 mutex_destroy(&ds->ds_opening_lock);
481 rw_destroy(&ds->ds_rwlock);
482 cv_destroy(&ds->ds_exclusive_cv);
483 kmem_free(ds, sizeof (dsl_dataset_t));
484 if (err) {
485 dmu_buf_rele(dbuf, tag);
486 return (err);
487 }
488 ds = winner;
489 } else {
490 ds->ds_fsid_guid =
491 unique_insert(ds->ds_phys->ds_fsid_guid);
492 }
493 }
494 ASSERT3P(ds->ds_dbuf, ==, dbuf);
495 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
496 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
497 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
498 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
499 mutex_enter(&ds->ds_lock);
500 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
501 mutex_exit(&ds->ds_lock);
502 dmu_buf_rele(ds->ds_dbuf, tag);
503 return (ENOENT);
504 }
505 mutex_exit(&ds->ds_lock);
506 *dsp = ds;
507 return (0);
508 }
509
510 static int
511 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
512 {
513 dsl_pool_t *dp = ds->ds_dir->dd_pool;
514
515 /*
516 * In syncing context we don't want the rwlock lock: there
517 * may be an existing writer waiting for sync phase to
518 * finish. We don't need to worry about such writers, since
519 * sync phase is single-threaded, so the writer can't be
520 * doing anything while we are active.
521 */
522 if (dsl_pool_sync_context(dp)) {
523 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
524 return (0);
525 }
526
527 /*
528 * Normal users will hold the ds_rwlock as a READER until they
529 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
530 * drop their READER lock after they set the ds_owner field.
531 *
532 * If the dataset is being destroyed, the destroy thread will
533 * obtain a WRITER lock for exclusive access after it's done its
534 * open-context work and then change the ds_owner to
535 * dsl_reaper once destruction is assured. So threads
536 * may block here temporarily, until the "destructability" of
537 * the dataset is determined.
538 */
539 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
540 mutex_enter(&ds->ds_lock);
541 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
542 rw_exit(&dp->dp_config_rwlock);
543 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
544 if (DSL_DATASET_IS_DESTROYED(ds)) {
545 mutex_exit(&ds->ds_lock);
546 dsl_dataset_drop_ref(ds, tag);
547 rw_enter(&dp->dp_config_rwlock, RW_READER);
548 return (ENOENT);
549 }
550 /*
551 * The dp_config_rwlock lives above the ds_lock. And
552 * we need to check DSL_DATASET_IS_DESTROYED() while
553 * holding the ds_lock, so we have to drop and reacquire
554 * the ds_lock here.
555 */
556 mutex_exit(&ds->ds_lock);
557 rw_enter(&dp->dp_config_rwlock, RW_READER);
558 mutex_enter(&ds->ds_lock);
559 }
560 mutex_exit(&ds->ds_lock);
561 return (0);
562 }
563
564 int
565 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
566 dsl_dataset_t **dsp)
567 {
568 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
569
570 if (err)
571 return (err);
572 return (dsl_dataset_hold_ref(*dsp, tag));
573 }
574
575 int
576 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
577 void *tag, dsl_dataset_t **dsp)
578 {
579 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
580 if (err)
581 return (err);
582 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
583 dsl_dataset_rele(*dsp, tag);
584 *dsp = NULL;
585 return (EBUSY);
586 }
587 return (0);
588 }
589
590 int
591 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
592 {
593 dsl_dir_t *dd;
594 dsl_pool_t *dp;
595 const char *snapname;
596 uint64_t obj;
597 int err = 0;
598
599 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
600 if (err)
601 return (err);
602
603 dp = dd->dd_pool;
604 obj = dd->dd_phys->dd_head_dataset_obj;
605 rw_enter(&dp->dp_config_rwlock, RW_READER);
606 if (obj)
607 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
608 else
609 err = ENOENT;
610 if (err)
611 goto out;
612
613 err = dsl_dataset_hold_ref(*dsp, tag);
614
615 /* we may be looking for a snapshot */
616 if (err == 0 && snapname != NULL) {
617 dsl_dataset_t *ds = NULL;
618
619 if (*snapname++ != '@') {
620 dsl_dataset_rele(*dsp, tag);
621 err = ENOENT;
622 goto out;
623 }
624
625 dprintf("looking for snapshot '%s'\n", snapname);
626 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
627 if (err == 0)
628 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
629 dsl_dataset_rele(*dsp, tag);
630
631 ASSERT3U((err == 0), ==, (ds != NULL));
632
633 if (ds) {
634 mutex_enter(&ds->ds_lock);
635 if (ds->ds_snapname[0] == 0)
636 (void) strlcpy(ds->ds_snapname, snapname,
637 sizeof (ds->ds_snapname));
638 mutex_exit(&ds->ds_lock);
639 err = dsl_dataset_hold_ref(ds, tag);
640 *dsp = err ? NULL : ds;
641 }
642 }
643 out:
644 rw_exit(&dp->dp_config_rwlock);
645 dsl_dir_close(dd, FTAG);
646 return (err);
647 }
648
649 int
650 dsl_dataset_own(const char *name, boolean_t inconsistentok,
651 void *tag, dsl_dataset_t **dsp)
652 {
653 int err = dsl_dataset_hold(name, tag, dsp);
654 if (err)
655 return (err);
656 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
657 dsl_dataset_rele(*dsp, tag);
658 return (EBUSY);
659 }
660 return (0);
661 }
662
663 void
664 dsl_dataset_name(dsl_dataset_t *ds, char *name)
665 {
666 if (ds == NULL) {
667 (void) strcpy(name, "mos");
668 } else {
669 dsl_dir_name(ds->ds_dir, name);
670 VERIFY(0 == dsl_dataset_get_snapname(ds));
671 if (ds->ds_snapname[0]) {
672 (void) strcat(name, "@");
673 /*
674 * We use a "recursive" mutex so that we
675 * can call dprintf_ds() with ds_lock held.
676 */
677 if (!MUTEX_HELD(&ds->ds_lock)) {
678 mutex_enter(&ds->ds_lock);
679 (void) strcat(name, ds->ds_snapname);
680 mutex_exit(&ds->ds_lock);
681 } else {
682 (void) strcat(name, ds->ds_snapname);
683 }
684 }
685 }
686 }
687
688 static int
689 dsl_dataset_namelen(dsl_dataset_t *ds)
690 {
691 int result;
692
693 if (ds == NULL) {
694 result = 3; /* "mos" */
695 } else {
696 result = dsl_dir_namelen(ds->ds_dir);
697 VERIFY(0 == dsl_dataset_get_snapname(ds));
698 if (ds->ds_snapname[0]) {
699 ++result; /* adding one for the @-sign */
700 if (!MUTEX_HELD(&ds->ds_lock)) {
701 mutex_enter(&ds->ds_lock);
702 result += strlen(ds->ds_snapname);
703 mutex_exit(&ds->ds_lock);
704 } else {
705 result += strlen(ds->ds_snapname);
706 }
707 }
708 }
709
710 return (result);
711 }
712
713 void
714 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
715 {
716 dmu_buf_rele(ds->ds_dbuf, tag);
717 }
718
719 void
720 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
721 {
722 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
723 rw_exit(&ds->ds_rwlock);
724 }
725 dsl_dataset_drop_ref(ds, tag);
726 }
727
728 void
729 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
730 {
731 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
732 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
733
734 mutex_enter(&ds->ds_lock);
735 ds->ds_owner = NULL;
736 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
737 rw_exit(&ds->ds_rwlock);
738 cv_broadcast(&ds->ds_exclusive_cv);
739 }
740 mutex_exit(&ds->ds_lock);
741 if (ds->ds_dbuf)
742 dsl_dataset_drop_ref(ds, tag);
743 else
744 dsl_dataset_evict(NULL, ds);
745 }
746
747 boolean_t
748 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
749 {
750 boolean_t gotit = FALSE;
751
752 mutex_enter(&ds->ds_lock);
753 if (ds->ds_owner == NULL &&
754 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
755 ds->ds_owner = tag;
756 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
757 rw_exit(&ds->ds_rwlock);
758 gotit = TRUE;
759 }
760 mutex_exit(&ds->ds_lock);
761 return (gotit);
762 }
763
764 void
765 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
766 {
767 ASSERT3P(owner, ==, ds->ds_owner);
768 if (!RW_WRITE_HELD(&ds->ds_rwlock))
769 rw_enter(&ds->ds_rwlock, RW_WRITER);
770 }
771
772 uint64_t
773 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
774 uint64_t flags, dmu_tx_t *tx)
775 {
776 dsl_pool_t *dp = dd->dd_pool;
777 dmu_buf_t *dbuf;
778 dsl_dataset_phys_t *dsphys;
779 uint64_t dsobj;
780 objset_t *mos = dp->dp_meta_objset;
781
782 if (origin == NULL)
783 origin = dp->dp_origin_snap;
784
785 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
786 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
787 ASSERT(dmu_tx_is_syncing(tx));
788 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
789
790 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
791 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
792 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
793 dmu_buf_will_dirty(dbuf, tx);
794 dsphys = dbuf->db_data;
795 bzero(dsphys, sizeof (dsl_dataset_phys_t));
796 dsphys->ds_dir_obj = dd->dd_object;
797 dsphys->ds_flags = flags;
798 dsphys->ds_fsid_guid = unique_create();
799 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
800 sizeof (dsphys->ds_guid));
801 dsphys->ds_snapnames_zapobj =
802 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
803 DMU_OT_NONE, 0, tx);
804 dsphys->ds_creation_time = gethrestime_sec();
805 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
806
807 if (origin == NULL) {
808 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
809 } else {
810 dsl_dataset_t *ohds;
811
812 dsphys->ds_prev_snap_obj = origin->ds_object;
813 dsphys->ds_prev_snap_txg =
814 origin->ds_phys->ds_creation_txg;
815 dsphys->ds_referenced_bytes =
816 origin->ds_phys->ds_referenced_bytes;
817 dsphys->ds_compressed_bytes =
818 origin->ds_phys->ds_compressed_bytes;
819 dsphys->ds_uncompressed_bytes =
820 origin->ds_phys->ds_uncompressed_bytes;
821 dsphys->ds_bp = origin->ds_phys->ds_bp;
822 dsphys->ds_flags |= origin->ds_phys->ds_flags;
823
824 dmu_buf_will_dirty(origin->ds_dbuf, tx);
825 origin->ds_phys->ds_num_children++;
826
827 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
828 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
829 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
830 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
831 dsl_dataset_rele(ohds, FTAG);
832
833 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
834 if (origin->ds_phys->ds_next_clones_obj == 0) {
835 origin->ds_phys->ds_next_clones_obj =
836 zap_create(mos,
837 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
838 }
839 VERIFY(0 == zap_add_int(mos,
840 origin->ds_phys->ds_next_clones_obj,
841 dsobj, tx));
842 }
843
844 dmu_buf_will_dirty(dd->dd_dbuf, tx);
845 dd->dd_phys->dd_origin_obj = origin->ds_object;
846 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
847 if (origin->ds_dir->dd_phys->dd_clones == 0) {
848 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
849 origin->ds_dir->dd_phys->dd_clones =
850 zap_create(mos,
851 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
852 }
853 VERIFY3U(0, ==, zap_add_int(mos,
854 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
855 }
856 }
857
858 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
859 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
860
861 dmu_buf_rele(dbuf, FTAG);
862
863 dmu_buf_will_dirty(dd->dd_dbuf, tx);
864 dd->dd_phys->dd_head_dataset_obj = dsobj;
865
866 return (dsobj);
867 }
868
869 uint64_t
870 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
871 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
872 {
873 dsl_pool_t *dp = pdd->dd_pool;
874 uint64_t dsobj, ddobj;
875 dsl_dir_t *dd;
876
877 ASSERT(lastname[0] != '@');
878
879 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
880 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
881
882 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
883
884 dsl_deleg_set_create_perms(dd, tx, cr);
885
886 dsl_dir_close(dd, FTAG);
887
888 /*
889 * If we are creating a clone, make sure we zero out any stale
890 * data from the origin snapshots zil header.
891 */
892 if (origin != NULL) {
893 dsl_dataset_t *ds;
894 objset_t *os;
895
896 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
897 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
898 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
899 dsl_dataset_dirty(ds, tx);
900 dsl_dataset_rele(ds, FTAG);
901 }
902
903 return (dsobj);
904 }
905
906 /*
907 * The snapshots must all be in the same pool.
908 */
909 int
910 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
911 {
912 int err;
913 dsl_sync_task_t *dst;
914 spa_t *spa;
915 nvpair_t *pair;
916 dsl_sync_task_group_t *dstg;
917
918 pair = nvlist_next_nvpair(snaps, NULL);
919 if (pair == NULL)
920 return (0);
921
922 err = spa_open(nvpair_name(pair), &spa, FTAG);
923 if (err)
924 return (err);
925 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
926
927 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
928 pair = nvlist_next_nvpair(snaps, pair)) {
929 dsl_dataset_t *ds;
930
931 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
932 if (err == 0) {
933 struct dsl_ds_destroyarg *dsda;
934
935 dsl_dataset_make_exclusive(ds, dstg);
936 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
937 KM_SLEEP);
938 dsda->ds = ds;
939 dsda->defer = defer;
940 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
941 dsl_dataset_destroy_sync, dsda, dstg, 0);
942 } else if (err == ENOENT) {
943 err = 0;
944 } else {
945 (void) strcpy(failed, nvpair_name(pair));
946 break;
947 }
948 }
949
950 if (err == 0)
951 err = dsl_sync_task_group_wait(dstg);
952
953 for (dst = list_head(&dstg->dstg_tasks); dst;
954 dst = list_next(&dstg->dstg_tasks, dst)) {
955 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
956 dsl_dataset_t *ds = dsda->ds;
957
958 /*
959 * Return the file system name that triggered the error
960 */
961 if (dst->dst_err) {
962 dsl_dataset_name(ds, failed);
963 }
964 ASSERT3P(dsda->rm_origin, ==, NULL);
965 dsl_dataset_disown(ds, dstg);
966 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
967 }
968
969 dsl_sync_task_group_destroy(dstg);
970 spa_close(spa, FTAG);
971 return (err);
972
973 }
974
975 static boolean_t
976 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
977 {
978 boolean_t might_destroy = B_FALSE;
979
980 mutex_enter(&ds->ds_lock);
981 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
982 DS_IS_DEFER_DESTROY(ds))
983 might_destroy = B_TRUE;
984 mutex_exit(&ds->ds_lock);
985
986 return (might_destroy);
987 }
988
989 /*
990 * If we're removing a clone, and these three conditions are true:
991 * 1) the clone's origin has no other children
992 * 2) the clone's origin has no user references
993 * 3) the clone's origin has been marked for deferred destruction
994 * Then, prepare to remove the origin as part of this sync task group.
995 */
996 static int
997 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
998 {
999 dsl_dataset_t *ds = dsda->ds;
1000 dsl_dataset_t *origin = ds->ds_prev;
1001
1002 if (dsl_dataset_might_destroy_origin(origin)) {
1003 char *name;
1004 int namelen;
1005 int error;
1006
1007 namelen = dsl_dataset_namelen(origin) + 1;
1008 name = kmem_alloc(namelen, KM_SLEEP);
1009 dsl_dataset_name(origin, name);
1010 #ifdef _KERNEL
1011 error = zfs_unmount_snap(name, NULL);
1012 if (error) {
1013 kmem_free(name, namelen);
1014 return (error);
1015 }
1016 #endif
1017 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1018 kmem_free(name, namelen);
1019 if (error)
1020 return (error);
1021 dsda->rm_origin = origin;
1022 dsl_dataset_make_exclusive(origin, tag);
1023 }
1024
1025 return (0);
1026 }
1027
1028 /*
1029 * ds must be opened as OWNER. On return (whether successful or not),
1030 * ds will be closed and caller can no longer dereference it.
1031 */
1032 int
1033 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1034 {
1035 int err;
1036 dsl_sync_task_group_t *dstg;
1037 objset_t *os;
1038 dsl_dir_t *dd;
1039 uint64_t obj;
1040 struct dsl_ds_destroyarg dsda = { 0 };
1041 dsl_dataset_t *dummy_ds;
1042
1043 dsda.ds = ds;
1044
1045 if (dsl_dataset_is_snapshot(ds)) {
1046 /* Destroying a snapshot is simpler */
1047 dsl_dataset_make_exclusive(ds, tag);
1048
1049 dsda.defer = defer;
1050 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1051 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1052 &dsda, tag, 0);
1053 ASSERT3P(dsda.rm_origin, ==, NULL);
1054 goto out;
1055 } else if (defer) {
1056 err = EINVAL;
1057 goto out;
1058 }
1059
1060 dd = ds->ds_dir;
1061 dummy_ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
1062 dummy_ds->ds_dir = dd;
1063 dummy_ds->ds_object = ds->ds_object;
1064
1065 if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1066 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1067 /*
1068 * Check for errors and mark this ds as inconsistent, in
1069 * case we crash while freeing the objects.
1070 */
1071 err = dsl_sync_task_do(dd->dd_pool,
1072 dsl_dataset_destroy_begin_check,
1073 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1074 if (err)
1075 goto out_free;
1076
1077 err = dmu_objset_from_ds(ds, &os);
1078 if (err)
1079 goto out_free;
1080
1081 /*
1082 * Remove all objects while in the open context so that
1083 * there is less work to do in the syncing context.
1084 */
1085 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1086 ds->ds_phys->ds_prev_snap_txg)) {
1087 /*
1088 * Ignore errors, if there is not enough disk space
1089 * we will deal with it in dsl_dataset_destroy_sync().
1090 */
1091 (void) dmu_free_object(os, obj);
1092 }
1093 if (err != ESRCH)
1094 goto out_free;
1095
1096 /*
1097 * Sync out all in-flight IO.
1098 */
1099 txg_wait_synced(dd->dd_pool, 0);
1100
1101 /*
1102 * If we managed to free all the objects in open
1103 * context, the user space accounting should be zero.
1104 */
1105 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1106 dmu_objset_userused_enabled(os)) {
1107 ASSERTV(uint64_t count);
1108
1109 ASSERT(zap_count(os, DMU_USERUSED_OBJECT,
1110 &count) != 0 || count == 0);
1111 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT,
1112 &count) != 0 || count == 0);
1113 }
1114 }
1115
1116 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1117 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1118 rw_exit(&dd->dd_pool->dp_config_rwlock);
1119
1120 if (err)
1121 goto out_free;
1122
1123 /*
1124 * Blow away the dsl_dir + head dataset.
1125 */
1126 dsl_dataset_make_exclusive(ds, tag);
1127 /*
1128 * If we're removing a clone, we might also need to remove its
1129 * origin.
1130 */
1131 do {
1132 dsda.need_prep = B_FALSE;
1133 if (dsl_dir_is_clone(dd)) {
1134 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1135 if (err) {
1136 dsl_dir_close(dd, FTAG);
1137 goto out_free;
1138 }
1139 }
1140
1141 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1142 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1143 dsl_dataset_destroy_sync, &dsda, tag, 0);
1144 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1145 dsl_dir_destroy_sync, dummy_ds, FTAG, 0);
1146 err = dsl_sync_task_group_wait(dstg);
1147 dsl_sync_task_group_destroy(dstg);
1148
1149 /*
1150 * We could be racing against 'zfs release' or 'zfs destroy -d'
1151 * on the origin snap, in which case we can get EBUSY if we
1152 * needed to destroy the origin snap but were not ready to
1153 * do so.
1154 */
1155 if (dsda.need_prep) {
1156 ASSERT(err == EBUSY);
1157 ASSERT(dsl_dir_is_clone(dd));
1158 ASSERT(dsda.rm_origin == NULL);
1159 }
1160 } while (dsda.need_prep);
1161
1162 if (dsda.rm_origin != NULL)
1163 dsl_dataset_disown(dsda.rm_origin, tag);
1164
1165 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1166 if (err)
1167 dsl_dir_close(dd, FTAG);
1168
1169 out_free:
1170 kmem_free(dummy_ds, sizeof (dsl_dataset_t));
1171 out:
1172 dsl_dataset_disown(ds, tag);
1173 return (err);
1174 }
1175
1176 blkptr_t *
1177 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1178 {
1179 return (&ds->ds_phys->ds_bp);
1180 }
1181
1182 void
1183 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1184 {
1185 ASSERT(dmu_tx_is_syncing(tx));
1186 /* If it's the meta-objset, set dp_meta_rootbp */
1187 if (ds == NULL) {
1188 tx->tx_pool->dp_meta_rootbp = *bp;
1189 } else {
1190 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1191 ds->ds_phys->ds_bp = *bp;
1192 }
1193 }
1194
1195 spa_t *
1196 dsl_dataset_get_spa(dsl_dataset_t *ds)
1197 {
1198 return (ds->ds_dir->dd_pool->dp_spa);
1199 }
1200
1201 void
1202 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1203 {
1204 dsl_pool_t *dp;
1205
1206 if (ds == NULL) /* this is the meta-objset */
1207 return;
1208
1209 ASSERT(ds->ds_objset != NULL);
1210
1211 if (ds->ds_phys->ds_next_snap_obj != 0)
1212 panic("dirtying snapshot!");
1213
1214 dp = ds->ds_dir->dd_pool;
1215
1216 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1217 /* up the hold count until we can be written out */
1218 dmu_buf_add_ref(ds->ds_dbuf, ds);
1219 }
1220 }
1221
1222 boolean_t
1223 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1224 {
1225 int t;
1226
1227 for (t = 0; t < TXG_SIZE; t++) {
1228 if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1229 ds, t))
1230 return (B_TRUE);
1231 }
1232 return (B_FALSE);
1233 }
1234
1235 /*
1236 * The unique space in the head dataset can be calculated by subtracting
1237 * the space used in the most recent snapshot, that is still being used
1238 * in this file system, from the space currently in use. To figure out
1239 * the space in the most recent snapshot still in use, we need to take
1240 * the total space used in the snapshot and subtract out the space that
1241 * has been freed up since the snapshot was taken.
1242 */
1243 static void
1244 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1245 {
1246 uint64_t mrs_used;
1247 uint64_t dlused, dlcomp, dluncomp;
1248
1249 ASSERT(!dsl_dataset_is_snapshot(ds));
1250
1251 if (ds->ds_phys->ds_prev_snap_obj != 0)
1252 mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1253 else
1254 mrs_used = 0;
1255
1256 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1257
1258 ASSERT3U(dlused, <=, mrs_used);
1259 ds->ds_phys->ds_unique_bytes =
1260 ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1261
1262 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1263 SPA_VERSION_UNIQUE_ACCURATE)
1264 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1265 }
1266
1267 struct killarg {
1268 dsl_dataset_t *ds;
1269 dmu_tx_t *tx;
1270 };
1271
1272 /* ARGSUSED */
1273 static int
1274 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1275 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1276 {
1277 struct killarg *ka = arg;
1278 dmu_tx_t *tx = ka->tx;
1279
1280 if (bp == NULL)
1281 return (0);
1282
1283 if (zb->zb_level == ZB_ZIL_LEVEL) {
1284 ASSERT(zilog != NULL);
1285 /*
1286 * It's a block in the intent log. It has no
1287 * accounting, so just free it.
1288 */
1289 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1290 } else {
1291 ASSERT(zilog == NULL);
1292 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1293 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1294 }
1295
1296 return (0);
1297 }
1298
1299 /* ARGSUSED */
1300 static int
1301 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1302 {
1303 dsl_dataset_t *ds = arg1;
1304 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1305 uint64_t count;
1306 int err;
1307
1308 /*
1309 * Can't delete a head dataset if there are snapshots of it.
1310 * (Except if the only snapshots are from the branch we cloned
1311 * from.)
1312 */
1313 if (ds->ds_prev != NULL &&
1314 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1315 return (EBUSY);
1316
1317 /*
1318 * This is really a dsl_dir thing, but check it here so that
1319 * we'll be less likely to leave this dataset inconsistent &
1320 * nearly destroyed.
1321 */
1322 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1323 if (err)
1324 return (err);
1325 if (count != 0)
1326 return (EEXIST);
1327
1328 return (0);
1329 }
1330
1331 /* ARGSUSED */
1332 static void
1333 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1334 {
1335 dsl_dataset_t *ds = arg1;
1336 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1337
1338 /* Mark it as inconsistent on-disk, in case we crash */
1339 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1340 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1341
1342 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1343 "dataset = %llu", ds->ds_object);
1344 }
1345
1346 static int
1347 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1348 dmu_tx_t *tx)
1349 {
1350 dsl_dataset_t *ds = dsda->ds;
1351 dsl_dataset_t *ds_prev = ds->ds_prev;
1352
1353 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1354 struct dsl_ds_destroyarg ndsda = {0};
1355
1356 /*
1357 * If we're not prepared to remove the origin, don't remove
1358 * the clone either.
1359 */
1360 if (dsda->rm_origin == NULL) {
1361 dsda->need_prep = B_TRUE;
1362 return (EBUSY);
1363 }
1364
1365 ndsda.ds = ds_prev;
1366 ndsda.is_origin_rm = B_TRUE;
1367 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1368 }
1369
1370 /*
1371 * If we're not going to remove the origin after all,
1372 * undo the open context setup.
1373 */
1374 if (dsda->rm_origin != NULL) {
1375 dsl_dataset_disown(dsda->rm_origin, tag);
1376 dsda->rm_origin = NULL;
1377 }
1378
1379 return (0);
1380 }
1381
1382 /*
1383 * If you add new checks here, you may need to add
1384 * additional checks to the "temporary" case in
1385 * snapshot_check() in dmu_objset.c.
1386 */
1387 /* ARGSUSED */
1388 int
1389 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1390 {
1391 struct dsl_ds_destroyarg *dsda = arg1;
1392 dsl_dataset_t *ds = dsda->ds;
1393
1394 /* we have an owner hold, so noone else can destroy us */
1395 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1396
1397 /*
1398 * Only allow deferred destroy on pools that support it.
1399 * NOTE: deferred destroy is only supported on snapshots.
1400 */
1401 if (dsda->defer) {
1402 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1403 SPA_VERSION_USERREFS)
1404 return (ENOTSUP);
1405 ASSERT(dsl_dataset_is_snapshot(ds));
1406 return (0);
1407 }
1408
1409 /*
1410 * Can't delete a head dataset if there are snapshots of it.
1411 * (Except if the only snapshots are from the branch we cloned
1412 * from.)
1413 */
1414 if (ds->ds_prev != NULL &&
1415 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1416 return (EBUSY);
1417
1418 /*
1419 * If we made changes this txg, traverse_dsl_dataset won't find
1420 * them. Try again.
1421 */
1422 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1423 return (EAGAIN);
1424
1425 if (dsl_dataset_is_snapshot(ds)) {
1426 /*
1427 * If this snapshot has an elevated user reference count,
1428 * we can't destroy it yet.
1429 */
1430 if (ds->ds_userrefs > 0 && !dsda->releasing)
1431 return (EBUSY);
1432
1433 mutex_enter(&ds->ds_lock);
1434 /*
1435 * Can't delete a branch point. However, if we're destroying
1436 * a clone and removing its origin due to it having a user
1437 * hold count of 0 and having been marked for deferred destroy,
1438 * it's OK for the origin to have a single clone.
1439 */
1440 if (ds->ds_phys->ds_num_children >
1441 (dsda->is_origin_rm ? 2 : 1)) {
1442 mutex_exit(&ds->ds_lock);
1443 return (EEXIST);
1444 }
1445 mutex_exit(&ds->ds_lock);
1446 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1447 return (dsl_dataset_origin_check(dsda, arg2, tx));
1448 }
1449
1450 /* XXX we should do some i/o error checking... */
1451 return (0);
1452 }
1453
1454 struct refsarg {
1455 kmutex_t lock;
1456 boolean_t gone;
1457 kcondvar_t cv;
1458 };
1459
1460 /* ARGSUSED */
1461 static void
1462 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1463 {
1464 struct refsarg *arg = argv;
1465
1466 mutex_enter(&arg->lock);
1467 arg->gone = TRUE;
1468 cv_signal(&arg->cv);
1469 mutex_exit(&arg->lock);
1470 }
1471
1472 static void
1473 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1474 {
1475 struct refsarg arg;
1476
1477 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1478 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1479 arg.gone = FALSE;
1480 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1481 dsl_dataset_refs_gone);
1482 dmu_buf_rele(ds->ds_dbuf, tag);
1483 mutex_enter(&arg.lock);
1484 while (!arg.gone)
1485 cv_wait(&arg.cv, &arg.lock);
1486 ASSERT(arg.gone);
1487 mutex_exit(&arg.lock);
1488 ds->ds_dbuf = NULL;
1489 ds->ds_phys = NULL;
1490 mutex_destroy(&arg.lock);
1491 cv_destroy(&arg.cv);
1492 }
1493
1494 static void
1495 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1496 {
1497 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1498 int err;
1499 ASSERTV(uint64_t count);
1500
1501 ASSERT(ds->ds_phys->ds_num_children >= 2);
1502 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1503 /*
1504 * The err should not be ENOENT, but a bug in a previous version
1505 * of the code could cause upgrade_clones_cb() to not set
1506 * ds_next_snap_obj when it should, leading to a missing entry.
1507 * If we knew that the pool was created after
1508 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1509 * ENOENT. However, at least we can check that we don't have
1510 * too many entries in the next_clones_obj even after failing to
1511 * remove this one.
1512 */
1513 if (err != ENOENT) {
1514 VERIFY3U(err, ==, 0);
1515 }
1516 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1517 &count));
1518 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1519 }
1520
1521 static void
1522 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1523 {
1524 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1525 zap_cursor_t zc;
1526 zap_attribute_t za;
1527
1528 /*
1529 * If it is the old version, dd_clones doesn't exist so we can't
1530 * find the clones, but deadlist_remove_key() is a no-op so it
1531 * doesn't matter.
1532 */
1533 if (ds->ds_dir->dd_phys->dd_clones == 0)
1534 return;
1535
1536 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1537 zap_cursor_retrieve(&zc, &za) == 0;
1538 zap_cursor_advance(&zc)) {
1539 dsl_dataset_t *clone;
1540
1541 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1542 za.za_first_integer, FTAG, &clone));
1543 if (clone->ds_dir->dd_origin_txg > mintxg) {
1544 dsl_deadlist_remove_key(&clone->ds_deadlist,
1545 mintxg, tx);
1546 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1547 }
1548 dsl_dataset_rele(clone, FTAG);
1549 }
1550 zap_cursor_fini(&zc);
1551 }
1552
1553 struct process_old_arg {
1554 dsl_dataset_t *ds;
1555 dsl_dataset_t *ds_prev;
1556 boolean_t after_branch_point;
1557 zio_t *pio;
1558 uint64_t used, comp, uncomp;
1559 };
1560
1561 static int
1562 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1563 {
1564 struct process_old_arg *poa = arg;
1565 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1566
1567 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1568 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1569 if (poa->ds_prev && !poa->after_branch_point &&
1570 bp->blk_birth >
1571 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1572 poa->ds_prev->ds_phys->ds_unique_bytes +=
1573 bp_get_dsize_sync(dp->dp_spa, bp);
1574 }
1575 } else {
1576 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1577 poa->comp += BP_GET_PSIZE(bp);
1578 poa->uncomp += BP_GET_UCSIZE(bp);
1579 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1580 }
1581 return (0);
1582 }
1583
1584 static void
1585 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1586 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1587 {
1588 struct process_old_arg poa = { 0 };
1589 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1590 objset_t *mos = dp->dp_meta_objset;
1591
1592 ASSERT(ds->ds_deadlist.dl_oldfmt);
1593 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1594
1595 poa.ds = ds;
1596 poa.ds_prev = ds_prev;
1597 poa.after_branch_point = after_branch_point;
1598 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1599 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1600 process_old_cb, &poa, tx));
1601 VERIFY3U(zio_wait(poa.pio), ==, 0);
1602 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1603
1604 /* change snapused */
1605 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1606 -poa.used, -poa.comp, -poa.uncomp, tx);
1607
1608 /* swap next's deadlist to our deadlist */
1609 dsl_deadlist_close(&ds->ds_deadlist);
1610 dsl_deadlist_close(&ds_next->ds_deadlist);
1611 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1612 ds->ds_phys->ds_deadlist_obj);
1613 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1614 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1615 ds_next->ds_phys->ds_deadlist_obj);
1616 }
1617
1618 static int
1619 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1620 {
1621 int err;
1622 struct killarg ka;
1623
1624 /*
1625 * Free everything that we point to (that's born after
1626 * the previous snapshot, if we are a clone)
1627 *
1628 * NB: this should be very quick, because we already
1629 * freed all the objects in open context.
1630 */
1631 ka.ds = ds;
1632 ka.tx = tx;
1633 err = traverse_dataset(ds,
1634 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1635 kill_blkptr, &ka);
1636 ASSERT3U(err, ==, 0);
1637 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1638
1639 return (err);
1640 }
1641
1642 void
1643 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1644 {
1645 struct dsl_ds_destroyarg *dsda = arg1;
1646 dsl_dataset_t *ds = dsda->ds;
1647 int err = 0;
1648 int after_branch_point = FALSE;
1649 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1650 objset_t *mos = dp->dp_meta_objset;
1651 dsl_dataset_t *ds_prev = NULL;
1652 boolean_t wont_destroy;
1653 uint64_t obj;
1654
1655 wont_destroy = (dsda->defer &&
1656 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1657
1658 ASSERT(ds->ds_owner || wont_destroy);
1659 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1660 ASSERT(ds->ds_prev == NULL ||
1661 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1662 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1663
1664 if (wont_destroy) {
1665 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1666 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1667 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1668 return;
1669 }
1670
1671 /* signal any waiters that this dataset is going away */
1672 mutex_enter(&ds->ds_lock);
1673 ds->ds_owner = dsl_reaper;
1674 cv_broadcast(&ds->ds_exclusive_cv);
1675 mutex_exit(&ds->ds_lock);
1676
1677 /* Remove our reservation */
1678 if (ds->ds_reserved != 0) {
1679 dsl_prop_setarg_t psa;
1680 uint64_t value = 0;
1681
1682 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1683 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1684 &value);
1685 psa.psa_effective_value = 0; /* predict default value */
1686
1687 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1688 ASSERT3U(ds->ds_reserved, ==, 0);
1689 }
1690
1691 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1692
1693 dsl_scan_ds_destroyed(ds, tx);
1694
1695 obj = ds->ds_object;
1696
1697 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1698 if (ds->ds_prev) {
1699 ds_prev = ds->ds_prev;
1700 } else {
1701 VERIFY(0 == dsl_dataset_hold_obj(dp,
1702 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1703 }
1704 after_branch_point =
1705 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1706
1707 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1708 if (after_branch_point &&
1709 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1710 remove_from_next_clones(ds_prev, obj, tx);
1711 if (ds->ds_phys->ds_next_snap_obj != 0) {
1712 VERIFY(0 == zap_add_int(mos,
1713 ds_prev->ds_phys->ds_next_clones_obj,
1714 ds->ds_phys->ds_next_snap_obj, tx));
1715 }
1716 }
1717 if (after_branch_point &&
1718 ds->ds_phys->ds_next_snap_obj == 0) {
1719 /* This clone is toast. */
1720 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1721 ds_prev->ds_phys->ds_num_children--;
1722
1723 /*
1724 * If the clone's origin has no other clones, no
1725 * user holds, and has been marked for deferred
1726 * deletion, then we should have done the necessary
1727 * destroy setup for it.
1728 */
1729 if (ds_prev->ds_phys->ds_num_children == 1 &&
1730 ds_prev->ds_userrefs == 0 &&
1731 DS_IS_DEFER_DESTROY(ds_prev)) {
1732 ASSERT3P(dsda->rm_origin, !=, NULL);
1733 } else {
1734 ASSERT3P(dsda->rm_origin, ==, NULL);
1735 }
1736 } else if (!after_branch_point) {
1737 ds_prev->ds_phys->ds_next_snap_obj =
1738 ds->ds_phys->ds_next_snap_obj;
1739 }
1740 }
1741
1742 if (dsl_dataset_is_snapshot(ds)) {
1743 dsl_dataset_t *ds_next;
1744 uint64_t old_unique;
1745 uint64_t used = 0, comp = 0, uncomp = 0;
1746
1747 VERIFY(0 == dsl_dataset_hold_obj(dp,
1748 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1749 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1750
1751 old_unique = ds_next->ds_phys->ds_unique_bytes;
1752
1753 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1754 ds_next->ds_phys->ds_prev_snap_obj =
1755 ds->ds_phys->ds_prev_snap_obj;
1756 ds_next->ds_phys->ds_prev_snap_txg =
1757 ds->ds_phys->ds_prev_snap_txg;
1758 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1759 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1760
1761
1762 if (ds_next->ds_deadlist.dl_oldfmt) {
1763 process_old_deadlist(ds, ds_prev, ds_next,
1764 after_branch_point, tx);
1765 } else {
1766 /* Adjust prev's unique space. */
1767 if (ds_prev && !after_branch_point) {
1768 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1769 ds_prev->ds_phys->ds_prev_snap_txg,
1770 ds->ds_phys->ds_prev_snap_txg,
1771 &used, &comp, &uncomp);
1772 ds_prev->ds_phys->ds_unique_bytes += used;
1773 }
1774
1775 /* Adjust snapused. */
1776 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1777 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1778 &used, &comp, &uncomp);
1779 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1780 -used, -comp, -uncomp, tx);
1781
1782 /* Move blocks to be freed to pool's free list. */
1783 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1784 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1785 tx);
1786 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1787 DD_USED_HEAD, used, comp, uncomp, tx);
1788
1789 /* Merge our deadlist into next's and free it. */
1790 dsl_deadlist_merge(&ds_next->ds_deadlist,
1791 ds->ds_phys->ds_deadlist_obj, tx);
1792 }
1793 dsl_deadlist_close(&ds->ds_deadlist);
1794 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1795
1796 /* Collapse range in clone heads */
1797 dsl_dataset_remove_clones_key(ds,
1798 ds->ds_phys->ds_creation_txg, tx);
1799
1800 if (dsl_dataset_is_snapshot(ds_next)) {
1801 dsl_dataset_t *ds_nextnext;
1802 dsl_dataset_t *hds;
1803
1804 /*
1805 * Update next's unique to include blocks which
1806 * were previously shared by only this snapshot
1807 * and it. Those blocks will be born after the
1808 * prev snap and before this snap, and will have
1809 * died after the next snap and before the one
1810 * after that (ie. be on the snap after next's
1811 * deadlist).
1812 */
1813 VERIFY(0 == dsl_dataset_hold_obj(dp,
1814 ds_next->ds_phys->ds_next_snap_obj,
1815 FTAG, &ds_nextnext));
1816 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1817 ds->ds_phys->ds_prev_snap_txg,
1818 ds->ds_phys->ds_creation_txg,
1819 &used, &comp, &uncomp);
1820 ds_next->ds_phys->ds_unique_bytes += used;
1821 dsl_dataset_rele(ds_nextnext, FTAG);
1822 ASSERT3P(ds_next->ds_prev, ==, NULL);
1823
1824 /* Collapse range in this head. */
1825 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1826 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1827 FTAG, &hds));
1828 dsl_deadlist_remove_key(&hds->ds_deadlist,
1829 ds->ds_phys->ds_creation_txg, tx);
1830 dsl_dataset_rele(hds, FTAG);
1831
1832 } else {
1833 ASSERT3P(ds_next->ds_prev, ==, ds);
1834 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1835 ds_next->ds_prev = NULL;
1836 if (ds_prev) {
1837 VERIFY(0 == dsl_dataset_get_ref(dp,
1838 ds->ds_phys->ds_prev_snap_obj,
1839 ds_next, &ds_next->ds_prev));
1840 }
1841
1842 dsl_dataset_recalc_head_uniq(ds_next);
1843
1844 /*
1845 * Reduce the amount of our unconsmed refreservation
1846 * being charged to our parent by the amount of
1847 * new unique data we have gained.
1848 */
1849 if (old_unique < ds_next->ds_reserved) {
1850 int64_t mrsdelta;
1851 uint64_t new_unique =
1852 ds_next->ds_phys->ds_unique_bytes;
1853
1854 ASSERT(old_unique <= new_unique);
1855 mrsdelta = MIN(new_unique - old_unique,
1856 ds_next->ds_reserved - old_unique);
1857 dsl_dir_diduse_space(ds->ds_dir,
1858 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1859 }
1860 }
1861 dsl_dataset_rele(ds_next, FTAG);
1862 } else {
1863 zfeature_info_t *async_destroy =
1864 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1865 objset_t *os;
1866
1867 /*
1868 * There's no next snapshot, so this is a head dataset.
1869 * Destroy the deadlist. Unless it's a clone, the
1870 * deadlist should be empty. (If it's a clone, it's
1871 * safe to ignore the deadlist contents.)
1872 */
1873 dsl_deadlist_close(&ds->ds_deadlist);
1874 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1875 ds->ds_phys->ds_deadlist_obj = 0;
1876
1877 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
1878
1879 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1880 err = old_synchronous_dataset_destroy(ds, tx);
1881 } else {
1882 /*
1883 * Move the bptree into the pool's list of trees to
1884 * clean up and update space accounting information.
1885 */
1886 uint64_t used, comp, uncomp;
1887
1888 zil_destroy_sync(dmu_objset_zil(os), tx);
1889
1890 if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1891 spa_feature_incr(dp->dp_spa, async_destroy, tx);
1892 dp->dp_bptree_obj = bptree_alloc(mos, tx);
1893 VERIFY(zap_add(mos,
1894 DMU_POOL_DIRECTORY_OBJECT,
1895 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1896 &dp->dp_bptree_obj, tx) == 0);
1897 }
1898
1899 used = ds->ds_dir->dd_phys->dd_used_bytes;
1900 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1901 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1902
1903 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1904 ds->ds_phys->ds_unique_bytes == used);
1905
1906 bptree_add(mos, dp->dp_bptree_obj,
1907 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1908 used, comp, uncomp, tx);
1909 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1910 -used, -comp, -uncomp, tx);
1911 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1912 used, comp, uncomp, tx);
1913 }
1914
1915 if (ds->ds_prev != NULL) {
1916 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1917 VERIFY3U(0, ==, zap_remove_int(mos,
1918 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1919 ds->ds_object, tx));
1920 }
1921 dsl_dataset_rele(ds->ds_prev, ds);
1922 ds->ds_prev = ds_prev = NULL;
1923 }
1924 }
1925
1926 /*
1927 * This must be done after the dsl_traverse(), because it will
1928 * re-open the objset.
1929 */
1930 if (ds->ds_objset) {
1931 dmu_objset_evict(ds->ds_objset);
1932 ds->ds_objset = NULL;
1933 }
1934
1935 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1936 /* Erase the link in the dir */
1937 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1938 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1939 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1940 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1941 ASSERT(err == 0);
1942 } else {
1943 /* remove from snapshot namespace */
1944 dsl_dataset_t *ds_head;
1945 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1946 VERIFY(0 == dsl_dataset_hold_obj(dp,
1947 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1948 VERIFY(0 == dsl_dataset_get_snapname(ds));
1949 #ifdef ZFS_DEBUG
1950 {
1951 uint64_t val;
1952
1953 err = dsl_dataset_snap_lookup(ds_head,
1954 ds->ds_snapname, &val);
1955 ASSERT3U(err, ==, 0);
1956 ASSERT3U(val, ==, obj);
1957 }
1958 #endif
1959 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1960 ASSERT(err == 0);
1961 dsl_dataset_rele(ds_head, FTAG);
1962 }
1963
1964 if (ds_prev && ds->ds_prev != ds_prev)
1965 dsl_dataset_rele(ds_prev, FTAG);
1966
1967 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1968 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1969 "dataset = %llu", ds->ds_object);
1970
1971 if (ds->ds_phys->ds_next_clones_obj != 0) {
1972 ASSERTV(uint64_t count);
1973 ASSERT(0 == zap_count(mos,
1974 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1975 VERIFY(0 == dmu_object_free(mos,
1976 ds->ds_phys->ds_next_clones_obj, tx));
1977 }
1978 if (ds->ds_phys->ds_props_obj != 0)
1979 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1980 if (ds->ds_phys->ds_userrefs_obj != 0)
1981 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1982 dsl_dir_close(ds->ds_dir, ds);
1983 ds->ds_dir = NULL;
1984 dsl_dataset_drain_refs(ds, tag);
1985 VERIFY(0 == dmu_object_free(mos, obj, tx));
1986
1987 if (dsda->rm_origin) {
1988 /*
1989 * Remove the origin of the clone we just destroyed.
1990 */
1991 struct dsl_ds_destroyarg ndsda = {0};
1992
1993 ndsda.ds = dsda->rm_origin;
1994 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1995 }
1996 }
1997
1998 static int
1999 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
2000 {
2001 uint64_t asize;
2002
2003 if (!dmu_tx_is_syncing(tx))
2004 return (0);
2005
2006 /*
2007 * If there's an fs-only reservation, any blocks that might become
2008 * owned by the snapshot dataset must be accommodated by space
2009 * outside of the reservation.
2010 */
2011 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2012 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2013 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2014 return (ENOSPC);
2015
2016 /*
2017 * Propogate any reserved space for this snapshot to other
2018 * snapshot checks in this sync group.
2019 */
2020 if (asize > 0)
2021 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2022
2023 return (0);
2024 }
2025
2026 int
2027 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
2028 {
2029 dsl_dataset_t *ds = arg1;
2030 const char *snapname = arg2;
2031 int err;
2032 uint64_t value;
2033
2034 /*
2035 * We don't allow multiple snapshots of the same txg. If there
2036 * is already one, try again.
2037 */
2038 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2039 return (EAGAIN);
2040
2041 /*
2042 * Check for conflicting name snapshot name.
2043 */
2044 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2045 if (err == 0)
2046 return (EEXIST);
2047 if (err != ENOENT)
2048 return (err);
2049
2050 /*
2051 * Check that the dataset's name is not too long. Name consists
2052 * of the dataset's length + 1 for the @-sign + snapshot name's length
2053 */
2054 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2055 return (ENAMETOOLONG);
2056
2057 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2058 if (err)
2059 return (err);
2060
2061 ds->ds_trysnap_txg = tx->tx_txg;
2062 return (0);
2063 }
2064
2065 void
2066 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2067 {
2068 dsl_dataset_t *ds = arg1;
2069 const char *snapname = arg2;
2070 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2071 dmu_buf_t *dbuf;
2072 dsl_dataset_phys_t *dsphys;
2073 uint64_t dsobj, crtxg;
2074 objset_t *mos = dp->dp_meta_objset;
2075 int err;
2076
2077 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2078
2079 /*
2080 * The origin's ds_creation_txg has to be < TXG_INITIAL
2081 */
2082 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2083 crtxg = 1;
2084 else
2085 crtxg = tx->tx_txg;
2086
2087 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2088 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2089 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2090 dmu_buf_will_dirty(dbuf, tx);
2091 dsphys = dbuf->db_data;
2092 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2093 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2094 dsphys->ds_fsid_guid = unique_create();
2095 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2096 sizeof (dsphys->ds_guid));
2097 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2098 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2099 dsphys->ds_next_snap_obj = ds->ds_object;
2100 dsphys->ds_num_children = 1;
2101 dsphys->ds_creation_time = gethrestime_sec();
2102 dsphys->ds_creation_txg = crtxg;
2103 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2104 dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2105 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2106 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2107 dsphys->ds_flags = ds->ds_phys->ds_flags;
2108 dsphys->ds_bp = ds->ds_phys->ds_bp;
2109 dmu_buf_rele(dbuf, FTAG);
2110
2111 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2112 if (ds->ds_prev) {
2113 uint64_t next_clones_obj =
2114 ds->ds_prev->ds_phys->ds_next_clones_obj;
2115 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2116 ds->ds_object ||
2117 ds->ds_prev->ds_phys->ds_num_children > 1);
2118 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2119 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2120 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2121 ds->ds_prev->ds_phys->ds_creation_txg);
2122 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2123 } else if (next_clones_obj != 0) {
2124 remove_from_next_clones(ds->ds_prev,
2125 dsphys->ds_next_snap_obj, tx);
2126 VERIFY3U(0, ==, zap_add_int(mos,
2127 next_clones_obj, dsobj, tx));
2128 }
2129 }
2130
2131 /*
2132 * If we have a reference-reservation on this dataset, we will
2133 * need to increase the amount of refreservation being charged
2134 * since our unique space is going to zero.
2135 */
2136 if (ds->ds_reserved) {
2137 int64_t delta;
2138 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2139 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2140 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2141 delta, 0, 0, tx);
2142 }
2143
2144 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2145 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2146 ds->ds_dir->dd_myname, snapname, dsobj,
2147 ds->ds_phys->ds_prev_snap_txg);
2148 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2149 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2150 dsl_deadlist_close(&ds->ds_deadlist);
2151 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2152 dsl_deadlist_add_key(&ds->ds_deadlist,
2153 ds->ds_phys->ds_prev_snap_txg, tx);
2154
2155 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2156 ds->ds_phys->ds_prev_snap_obj = dsobj;
2157 ds->ds_phys->ds_prev_snap_txg = crtxg;
2158 ds->ds_phys->ds_unique_bytes = 0;
2159 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2160 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2161
2162 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2163 snapname, 8, 1, &dsobj, tx);
2164 ASSERT(err == 0);
2165
2166 if (ds->ds_prev)
2167 dsl_dataset_drop_ref(ds->ds_prev, ds);
2168 VERIFY(0 == dsl_dataset_get_ref(dp,
2169 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2170
2171 dsl_scan_ds_snapshotted(ds, tx);
2172
2173 dsl_dir_snap_cmtime_update(ds->ds_dir);
2174
2175 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2176 "dataset = %llu", dsobj);
2177 }
2178
2179 void
2180 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2181 {
2182 ASSERT(dmu_tx_is_syncing(tx));
2183 ASSERT(ds->ds_objset != NULL);
2184 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2185
2186 /*
2187 * in case we had to change ds_fsid_guid when we opened it,
2188 * sync it out now.
2189 */
2190 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2191 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2192
2193 dmu_objset_sync(ds->ds_objset, zio, tx);
2194 }
2195
2196 static void
2197 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2198 {
2199 uint64_t count = 0;
2200 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2201 zap_cursor_t zc;
2202 zap_attribute_t za;
2203 nvlist_t *propval;
2204 nvlist_t *val;
2205
2206 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2207 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2208 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2209
2210 /*
2211 * There may me missing entries in ds_next_clones_obj
2212 * due to a bug in a previous version of the code.
2213 * Only trust it if it has the right number of entries.
2214 */
2215 if (ds->ds_phys->ds_next_clones_obj != 0) {
2216 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2217 &count));
2218 }
2219 if (count != ds->ds_phys->ds_num_children - 1) {
2220 goto fail;
2221 }
2222 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2223 zap_cursor_retrieve(&zc, &za) == 0;
2224 zap_cursor_advance(&zc)) {
2225 dsl_dataset_t *clone;
2226 char buf[ZFS_MAXNAMELEN];
2227 /*
2228 * Even though we hold the dp_config_rwlock, the dataset
2229 * may fail to open, returning ENOENT. If there is a
2230 * thread concurrently attempting to destroy this
2231 * dataset, it will have the ds_rwlock held for
2232 * RW_WRITER. Our call to dsl_dataset_hold_obj() ->
2233 * dsl_dataset_hold_ref() will fail its
2234 * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2235 * dp_config_rwlock, and wait for the destroy progress
2236 * and signal ds_exclusive_cv. If the destroy was
2237 * successful, we will see that
2238 * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2239 */
2240 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2241 za.za_first_integer, FTAG, &clone) != 0)
2242 continue;
2243 dsl_dir_name(clone->ds_dir, buf);
2244 VERIFY(nvlist_add_boolean(val, buf) == 0);
2245 dsl_dataset_rele(clone, FTAG);
2246 }
2247 zap_cursor_fini(&zc);
2248 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2249 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2250 propval) == 0);
2251 fail:
2252 nvlist_free(val);
2253 nvlist_free(propval);
2254 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2255 }
2256
2257 void
2258 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2259 {
2260 uint64_t refd, avail, uobjs, aobjs, ratio;
2261
2262 dsl_dir_stats(ds->ds_dir, nv);
2263
2264 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2265 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2266 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2267
2268 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2269 ds->ds_phys->ds_creation_time);
2270 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2271 ds->ds_phys->ds_creation_txg);
2272 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2273 ds->ds_quota);
2274 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2275 ds->ds_reserved);
2276 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2277 ds->ds_phys->ds_guid);
2278 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2279 ds->ds_phys->ds_unique_bytes);
2280 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2281 ds->ds_object);
2282 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2283 ds->ds_userrefs);
2284 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2285 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2286
2287 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2288 uint64_t written, comp, uncomp;
2289 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2290 dsl_dataset_t *prev;
2291 int err;
2292
2293 rw_enter(&dp->dp_config_rwlock, RW_READER);
2294 err = dsl_dataset_hold_obj(dp,
2295 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2296 rw_exit(&dp->dp_config_rwlock);
2297 if (err == 0) {
2298 err = dsl_dataset_space_written(prev, ds, &written,
2299 &comp, &uncomp);
2300 dsl_dataset_rele(prev, FTAG);
2301 if (err == 0) {
2302 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2303 written);
2304 }
2305 }
2306 }
2307
2308 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2309 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2310 ds->ds_phys->ds_compressed_bytes);
2311 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2312
2313 if (ds->ds_phys->ds_next_snap_obj) {
2314 /*
2315 * This is a snapshot; override the dd's space used with
2316 * our unique space and compression ratio.
2317 */
2318 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2319 ds->ds_phys->ds_unique_bytes);
2320 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2321
2322 get_clones_stat(ds, nv);
2323 }
2324 }
2325
2326 void
2327 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2328 {
2329 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2330 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2331 stat->dds_guid = ds->ds_phys->ds_guid;
2332 if (ds->ds_phys->ds_next_snap_obj) {
2333 stat->dds_is_snapshot = B_TRUE;
2334 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2335 } else {
2336 stat->dds_is_snapshot = B_FALSE;
2337 stat->dds_num_clones = 0;
2338 }
2339
2340 /* clone origin is really a dsl_dir thing... */
2341 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2342 if (dsl_dir_is_clone(ds->ds_dir)) {
2343 dsl_dataset_t *ods;
2344
2345 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2346 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2347 dsl_dataset_name(ods, stat->dds_origin);
2348 dsl_dataset_drop_ref(ods, FTAG);
2349 } else {
2350 stat->dds_origin[0] = '\0';
2351 }
2352 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2353 }
2354
2355 uint64_t
2356 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2357 {
2358 return (ds->ds_fsid_guid);
2359 }
2360
2361 void
2362 dsl_dataset_space(dsl_dataset_t *ds,
2363 uint64_t *refdbytesp, uint64_t *availbytesp,
2364 uint64_t *usedobjsp, uint64_t *availobjsp)
2365 {
2366 *refdbytesp = ds->ds_phys->ds_referenced_bytes;
2367 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2368 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2369 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2370 if (ds->ds_quota != 0) {
2371 /*
2372 * Adjust available bytes according to refquota
2373 */
2374 if (*refdbytesp < ds->ds_quota)
2375 *availbytesp = MIN(*availbytesp,
2376 ds->ds_quota - *refdbytesp);
2377 else
2378 *availbytesp = 0;
2379 }
2380 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2381 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2382 }
2383
2384 boolean_t
2385 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2386 {
2387 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2388
2389 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2390 dsl_pool_sync_context(dp));
2391 if (ds->ds_prev == NULL)
2392 return (B_FALSE);
2393 if (ds->ds_phys->ds_bp.blk_birth >
2394 ds->ds_prev->ds_phys->ds_creation_txg) {
2395 objset_t *os, *os_prev;
2396 /*
2397 * It may be that only the ZIL differs, because it was
2398 * reset in the head. Don't count that as being
2399 * modified.
2400 */
2401 if (dmu_objset_from_ds(ds, &os) != 0)
2402 return (B_TRUE);
2403 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2404 return (B_TRUE);
2405 return (bcmp(&os->os_phys->os_meta_dnode,
2406 &os_prev->os_phys->os_meta_dnode,
2407 sizeof (os->os_phys->os_meta_dnode)) != 0);
2408 }
2409 return (B_FALSE);
2410 }
2411
2412 /* ARGSUSED */
2413 static int
2414 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2415 {
2416 dsl_dataset_t *ds = arg1;
2417 char *newsnapname = arg2;
2418 dsl_dir_t *dd = ds->ds_dir;
2419 dsl_dataset_t *hds;
2420 uint64_t val;
2421 int err;
2422
2423 err = dsl_dataset_hold_obj(dd->dd_pool,
2424 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2425 if (err)
2426 return (err);
2427
2428 /* new name better not be in use */
2429 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2430 dsl_dataset_rele(hds, FTAG);
2431
2432 if (err == 0)
2433 err = EEXIST;
2434 else if (err == ENOENT)
2435 err = 0;
2436
2437 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2438 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2439 err = ENAMETOOLONG;
2440
2441 return (err);
2442 }
2443
2444 static void
2445 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2446 {
2447 dsl_dataset_t *ds = arg1;
2448 const char *newsnapname = arg2;
2449 dsl_dir_t *dd = ds->ds_dir;
2450 objset_t *mos = dd->dd_pool->dp_meta_objset;
2451 dsl_dataset_t *hds;
2452 int err;
2453
2454 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2455
2456 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2457 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2458
2459 VERIFY(0 == dsl_dataset_get_snapname(ds));
2460 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2461 ASSERT3U(err, ==, 0);
2462 mutex_enter(&ds->ds_lock);
2463 (void) strcpy(ds->ds_snapname, newsnapname);
2464 mutex_exit(&ds->ds_lock);
2465 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2466 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2467 ASSERT3U(err, ==, 0);
2468
2469 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2470 "dataset = %llu", ds->ds_object);
2471 dsl_dataset_rele(hds, FTAG);
2472 }
2473
2474 struct renamesnaparg {
2475 dsl_sync_task_group_t *dstg;
2476 char failed[MAXPATHLEN];
2477 char *oldsnap;
2478 char *newsnap;
2479 };
2480
2481 static int
2482 dsl_snapshot_rename_one(const char *name, void *arg)
2483 {
2484 struct renamesnaparg *ra = arg;
2485 dsl_dataset_t *ds = NULL;
2486 char *snapname;
2487 int err;
2488
2489 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2490 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2491
2492 /*
2493 * For recursive snapshot renames the parent won't be changing
2494 * so we just pass name for both the to/from argument.
2495 */
2496 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2497 if (err != 0) {
2498 strfree(snapname);
2499 return (err == ENOENT ? 0 : err);
2500 }
2501
2502 #ifdef _KERNEL
2503 /*
2504 * For all filesystems undergoing rename, we'll need to unmount it.
2505 */
2506 (void) zfs_unmount_snap(snapname, NULL);
2507 #endif
2508 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2509 strfree(snapname);
2510 if (err != 0)
2511 return (err == ENOENT ? 0 : err);
2512
2513 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2514 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2515
2516 return (0);
2517 }
2518
2519 static int
2520 dsl_recursive_rename(char *oldname, const char *newname)
2521 {
2522 int err;
2523 struct renamesnaparg *ra;
2524 dsl_sync_task_t *dst;
2525 spa_t *spa;
2526 char *cp, *fsname = spa_strdup(oldname);
2527 int len = strlen(oldname) + 1;
2528
2529 /* truncate the snapshot name to get the fsname */
2530 cp = strchr(fsname, '@');
2531 *cp = '\0';
2532
2533 err = spa_open(fsname, &spa, FTAG);
2534 if (err) {
2535 kmem_free(fsname, len);
2536 return (err);
2537 }
2538 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2539 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2540
2541 ra->oldsnap = strchr(oldname, '@') + 1;
2542 ra->newsnap = strchr(newname, '@') + 1;
2543 *ra->failed = '\0';
2544
2545 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2546 DS_FIND_CHILDREN);
2547 kmem_free(fsname, len);
2548
2549 if (err == 0) {
2550 err = dsl_sync_task_group_wait(ra->dstg);
2551 }
2552
2553 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2554 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2555 dsl_dataset_t *ds = dst->dst_arg1;
2556 if (dst->dst_err) {
2557 dsl_dir_name(ds->ds_dir, ra->failed);
2558 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2559 (void) strlcat(ra->failed, ra->newsnap,
2560 sizeof (ra->failed));
2561 }
2562 dsl_dataset_rele(ds, ra->dstg);
2563 }
2564
2565 if (err)
2566 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2567
2568 dsl_sync_task_group_destroy(ra->dstg);
2569 kmem_free(ra, sizeof (struct renamesnaparg));
2570 spa_close(spa, FTAG);
2571 return (err);
2572 }
2573
2574 static int
2575 dsl_valid_rename(const char *oldname, void *arg)
2576 {
2577 int delta = *(int *)arg;
2578
2579 if (strlen(oldname) + delta >= MAXNAMELEN)
2580 return (ENAMETOOLONG);
2581
2582 return (0);
2583 }
2584
2585 #pragma weak dmu_objset_rename = dsl_dataset_rename
2586 int
2587 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2588 {
2589 dsl_dir_t *dd;
2590 dsl_dataset_t *ds;
2591 const char *tail;
2592 int err;
2593
2594 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2595 if (err)
2596 return (err);
2597
2598 if (tail == NULL) {
2599 int delta = strlen(newname) - strlen(oldname);
2600
2601 /* if we're growing, validate child name lengths */
2602 if (delta > 0)
2603 err = dmu_objset_find(oldname, dsl_valid_rename,
2604 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2605
2606 if (err == 0)
2607 err = dsl_dir_rename(dd, newname);
2608 dsl_dir_close(dd, FTAG);
2609 return (err);
2610 }
2611
2612 if (tail[0] != '@') {
2613 /* the name ended in a nonexistent component */
2614 dsl_dir_close(dd, FTAG);
2615 return (ENOENT);
2616 }
2617
2618 dsl_dir_close(dd, FTAG);
2619
2620 /* new name must be snapshot in same filesystem */
2621 tail = strchr(newname, '@');
2622 if (tail == NULL)
2623 return (EINVAL);
2624 tail++;
2625 if (strncmp(oldname, newname, tail - newname) != 0)
2626 return (EXDEV);
2627
2628 if (recursive) {
2629 err = dsl_recursive_rename(oldname, newname);
2630 } else {
2631 err = dsl_dataset_hold(oldname, FTAG, &ds);
2632 if (err)
2633 return (err);
2634
2635 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2636 dsl_dataset_snapshot_rename_check,
2637 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2638
2639 dsl_dataset_rele(ds, FTAG);
2640 }
2641
2642 return (err);
2643 }
2644
2645 struct promotenode {
2646 list_node_t link;
2647 dsl_dataset_t *ds;
2648 };
2649
2650 struct promotearg {
2651 list_t shared_snaps, origin_snaps, clone_snaps;
2652 dsl_dataset_t *origin_origin;
2653 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2654 char *err_ds;
2655 };
2656
2657 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2658
2659 static int
2660 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2661 {
2662 dsl_dataset_t *hds = arg1;
2663 struct promotearg *pa = arg2;
2664 struct promotenode *snap = list_head(&pa->shared_snaps);
2665 dsl_dataset_t *origin_ds = snap->ds;
2666 int err;
2667 uint64_t unused;
2668
2669 /* Check that it is a real clone */
2670 if (!dsl_dir_is_clone(hds->ds_dir))
2671 return (EINVAL);
2672
2673 /* Since this is so expensive, don't do the preliminary check */
2674 if (!dmu_tx_is_syncing(tx))
2675 return (0);
2676
2677 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2678 return (EXDEV);
2679
2680 /* compute origin's new unique space */
2681 snap = list_tail(&pa->clone_snaps);
2682 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2683 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2684 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2685 &pa->unique, &unused, &unused);
2686
2687 /*
2688 * Walk the snapshots that we are moving
2689 *
2690 * Compute space to transfer. Consider the incremental changes
2691 * to used for each snapshot:
2692 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2693 * So each snapshot gave birth to:
2694 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2695 * So a sequence would look like:
2696 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2697 * Which simplifies to:
2698 * uN + kN + kN-1 + ... + k1 + k0
2699 * Note however, if we stop before we reach the ORIGIN we get:
2700 * uN + kN + kN-1 + ... + kM - uM-1
2701 */
2702 pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2703 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2704 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2705 for (snap = list_head(&pa->shared_snaps); snap;
2706 snap = list_next(&pa->shared_snaps, snap)) {
2707 uint64_t val, dlused, dlcomp, dluncomp;
2708 dsl_dataset_t *ds = snap->ds;
2709
2710 /* Check that the snapshot name does not conflict */
2711 VERIFY(0 == dsl_dataset_get_snapname(ds));
2712 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2713 if (err == 0) {
2714 err = EEXIST;
2715 goto out;
2716 }
2717 if (err != ENOENT)
2718 goto out;
2719
2720 /* The very first snapshot does not have a deadlist */
2721 if (ds->ds_phys->ds_prev_snap_obj == 0)
2722 continue;
2723
2724 dsl_deadlist_space(&ds->ds_deadlist,
2725 &dlused, &dlcomp, &dluncomp);
2726 pa->used += dlused;
2727 pa->comp += dlcomp;
2728 pa->uncomp += dluncomp;
2729 }
2730
2731 /*
2732 * If we are a clone of a clone then we never reached ORIGIN,
2733 * so we need to subtract out the clone origin's used space.
2734 */
2735 if (pa->origin_origin) {
2736 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2737 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2738 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2739 }
2740
2741 /* Check that there is enough space here */
2742 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2743 pa->used);
2744 if (err)
2745 return (err);
2746
2747 /*
2748 * Compute the amounts of space that will be used by snapshots
2749 * after the promotion (for both origin and clone). For each,
2750 * it is the amount of space that will be on all of their
2751 * deadlists (that was not born before their new origin).
2752 */
2753 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2754 uint64_t space;
2755
2756 /*
2757 * Note, typically this will not be a clone of a clone,
2758 * so dd_origin_txg will be < TXG_INITIAL, so
2759 * these snaplist_space() -> dsl_deadlist_space_range()
2760 * calls will be fast because they do not have to
2761 * iterate over all bps.
2762 */
2763 snap = list_head(&pa->origin_snaps);
2764 err = snaplist_space(&pa->shared_snaps,
2765 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2766 if (err)
2767 return (err);
2768
2769 err = snaplist_space(&pa->clone_snaps,
2770 snap->ds->ds_dir->dd_origin_txg, &space);
2771 if (err)
2772 return (err);
2773 pa->cloneusedsnap += space;
2774 }
2775 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2776 err = snaplist_space(&pa->origin_snaps,
2777 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2778 if (err)
2779 return (err);
2780 }
2781
2782 return (0);
2783 out:
2784 pa->err_ds = snap->ds->ds_snapname;
2785 return (err);
2786 }
2787
2788 static void
2789 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2790 {
2791 dsl_dataset_t *hds = arg1;
2792 struct promotearg *pa = arg2;
2793 struct promotenode *snap = list_head(&pa->shared_snaps);
2794 dsl_dataset_t *origin_ds = snap->ds;
2795 dsl_dataset_t *origin_head;
2796 dsl_dir_t *dd = hds->ds_dir;
2797 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2798 dsl_dir_t *odd = NULL;
2799 uint64_t oldnext_obj;
2800 int64_t delta;
2801
2802 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2803
2804 snap = list_head(&pa->origin_snaps);
2805 origin_head = snap->ds;
2806
2807 /*
2808 * We need to explicitly open odd, since origin_ds's dd will be
2809 * changing.
2810 */
2811 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2812 NULL, FTAG, &odd));
2813
2814 /* change origin's next snap */
2815 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2816 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2817 snap = list_tail(&pa->clone_snaps);
2818 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2819 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2820
2821 /* change the origin's next clone */
2822 if (origin_ds->ds_phys->ds_next_clones_obj) {
2823 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2824 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2825 origin_ds->ds_phys->ds_next_clones_obj,
2826 oldnext_obj, tx));
2827 }
2828
2829 /* change origin */
2830 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2831 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2832 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2833 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2834 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2835 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2836 origin_head->ds_dir->dd_origin_txg =
2837 origin_ds->ds_phys->ds_creation_txg;
2838
2839 /* change dd_clone entries */
2840 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2841 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2842 odd->dd_phys->dd_clones, hds->ds_object, tx));
2843 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2844 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2845 hds->ds_object, tx));
2846
2847 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2848 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2849 origin_head->ds_object, tx));
2850 if (dd->dd_phys->dd_clones == 0) {
2851 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2852 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2853 }
2854 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2855 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2856
2857 }
2858
2859 /* move snapshots to this dir */
2860 for (snap = list_head(&pa->shared_snaps); snap;
2861 snap = list_next(&pa->shared_snaps, snap)) {
2862 dsl_dataset_t *ds = snap->ds;
2863
2864 /* unregister props as dsl_dir is changing */
2865 if (ds->ds_objset) {
2866 dmu_objset_evict(ds->ds_objset);
2867 ds->ds_objset = NULL;
2868 }
2869 /* move snap name entry */
2870 VERIFY(0 == dsl_dataset_get_snapname(ds));
2871 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2872 ds->ds_snapname, tx));
2873 VERIFY(0 == zap_add(dp->dp_meta_objset,
2874 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2875 8, 1, &ds->ds_object, tx));
2876
2877 /* change containing dsl_dir */
2878 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2879 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2880 ds->ds_phys->ds_dir_obj = dd->dd_object;
2881 ASSERT3P(ds->ds_dir, ==, odd);
2882 dsl_dir_close(ds->ds_dir, ds);
2883 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2884 NULL, ds, &ds->ds_dir));
2885
2886 /* move any clone references */
2887 if (ds->ds_phys->ds_next_clones_obj &&
2888 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2889 zap_cursor_t zc;
2890 zap_attribute_t za;
2891
2892 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2893 ds->ds_phys->ds_next_clones_obj);
2894 zap_cursor_retrieve(&zc, &za) == 0;
2895 zap_cursor_advance(&zc)) {
2896 dsl_dataset_t *cnds;
2897 uint64_t o;
2898
2899 if (za.za_first_integer == oldnext_obj) {
2900 /*
2901 * We've already moved the
2902 * origin's reference.
2903 */
2904 continue;
2905 }
2906
2907 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2908 za.za_first_integer, FTAG, &cnds));
2909 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2910
2911 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2912 odd->dd_phys->dd_clones, o, tx), ==, 0);
2913 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2914 dd->dd_phys->dd_clones, o, tx), ==, 0);
2915 dsl_dataset_rele(cnds, FTAG);
2916 }
2917 zap_cursor_fini(&zc);
2918 }
2919
2920 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2921 }
2922
2923 /*
2924 * Change space accounting.
2925 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2926 * both be valid, or both be 0 (resulting in delta == 0). This
2927 * is true for each of {clone,origin} independently.
2928 */
2929
2930 delta = pa->cloneusedsnap -
2931 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2932 ASSERT3S(delta, >=, 0);
2933 ASSERT3U(pa->used, >=, delta);
2934 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2935 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2936 pa->used - delta, pa->comp, pa->uncomp, tx);
2937
2938 delta = pa->originusedsnap -
2939 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2940 ASSERT3S(delta, <=, 0);
2941 ASSERT3U(pa->used, >=, -delta);
2942 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2943 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2944 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2945
2946 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2947
2948 /* log history record */
2949 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2950 "dataset = %llu", hds->ds_object);
2951
2952 dsl_dir_close(odd, FTAG);
2953 }
2954
2955 static char *snaplist_tag = "snaplist";
2956 /*
2957 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2958 * (exclusive) and last_obj (inclusive). The list will be in reverse
2959 * order (last_obj will be the list_head()). If first_obj == 0, do all
2960 * snapshots back to this dataset's origin.
2961 */
2962 static int
2963 snaplist_make(dsl_pool_t *dp, boolean_t own,
2964 uint64_t first_obj, uint64_t last_obj, list_t *l)
2965 {
2966 uint64_t obj = last_obj;
2967
2968 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2969
2970 list_create(l, sizeof (struct promotenode),
2971 offsetof(struct promotenode, link));
2972
2973 while (obj != first_obj) {
2974 dsl_dataset_t *ds;
2975 struct promotenode *snap;
2976 int err;
2977
2978 if (own) {
2979 err = dsl_dataset_own_obj(dp, obj,
2980 0, snaplist_tag, &ds);
2981 if (err == 0)
2982 dsl_dataset_make_exclusive(ds, snaplist_tag);
2983 } else {
2984 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2985 }
2986 if (err == ENOENT) {
2987 /* lost race with snapshot destroy */
2988 struct promotenode *last = list_tail(l);
2989 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2990 obj = last->ds->ds_phys->ds_prev_snap_obj;
2991 continue;
2992 } else if (err) {
2993 return (err);
2994 }
2995
2996 if (first_obj == 0)
2997 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2998
2999 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
3000 snap->ds = ds;
3001 list_insert_tail(l, snap);
3002 obj = ds->ds_phys->ds_prev_snap_obj;
3003 }
3004
3005 return (0);
3006 }
3007
3008 static int
3009 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3010 {
3011 struct promotenode *snap;
3012
3013 *spacep = 0;
3014 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3015 uint64_t used, comp, uncomp;
3016 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3017 mintxg, UINT64_MAX, &used, &comp, &uncomp);
3018 *spacep += used;
3019 }
3020 return (0);
3021 }
3022
3023 static void
3024 snaplist_destroy(list_t *l, boolean_t own)
3025 {
3026 struct promotenode *snap;
3027
3028 if (!l || !list_link_active(&l->list_head))
3029 return;
3030
3031 while ((snap = list_tail(l)) != NULL) {
3032 list_remove(l, snap);
3033 if (own)
3034 dsl_dataset_disown(snap->ds, snaplist_tag);
3035 else
3036 dsl_dataset_rele(snap->ds, snaplist_tag);
3037 kmem_free(snap, sizeof (struct promotenode));
3038 }
3039 list_destroy(l);
3040 }
3041
3042 /*
3043 * Promote a clone. Nomenclature note:
3044 * "clone" or "cds": the original clone which is being promoted
3045 * "origin" or "ods": the snapshot which is originally clone's origin
3046 * "origin head" or "ohds": the dataset which is the head
3047 * (filesystem/volume) for the origin
3048 * "origin origin": the origin of the origin's filesystem (typically
3049 * NULL, indicating that the clone is not a clone of a clone).
3050 */
3051 int
3052 dsl_dataset_promote(const char *name, char *conflsnap)
3053 {
3054 dsl_dataset_t *ds;
3055 dsl_dir_t *dd;
3056 dsl_pool_t *dp;
3057 dmu_object_info_t doi;
3058 struct promotearg pa;
3059 struct promotenode *snap;
3060 int err;
3061
3062 bzero(&pa, sizeof(struct promotearg));
3063 err = dsl_dataset_hold(name, FTAG, &ds);
3064 if (err)
3065 return (err);
3066 dd = ds->ds_dir;
3067 dp = dd->dd_pool;
3068
3069 err = dmu_object_info(dp->dp_meta_objset,
3070 ds->ds_phys->ds_snapnames_zapobj, &doi);
3071 if (err) {
3072 dsl_dataset_rele(ds, FTAG);
3073 return (err);
3074 }
3075
3076 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3077 dsl_dataset_rele(ds, FTAG);
3078 return (EINVAL);
3079 }
3080
3081 /*
3082 * We are going to inherit all the snapshots taken before our
3083 * origin (i.e., our new origin will be our parent's origin).
3084 * Take ownership of them so that we can rename them into our
3085 * namespace.
3086 */
3087 rw_enter(&dp->dp_config_rwlock, RW_READER);
3088
3089 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3090 &pa.shared_snaps);
3091 if (err != 0)
3092 goto out;
3093
3094 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3095 if (err != 0)
3096 goto out;
3097
3098 snap = list_head(&pa.shared_snaps);
3099 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3100 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3101 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3102 if (err != 0)
3103 goto out;
3104
3105 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3106 err = dsl_dataset_hold_obj(dp,
3107 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3108 FTAG, &pa.origin_origin);
3109 if (err != 0)
3110 goto out;
3111 }
3112
3113 out:
3114 rw_exit(&dp->dp_config_rwlock);
3115
3116 /*
3117 * Add in 128x the snapnames zapobj size, since we will be moving
3118 * a bunch of snapnames to the promoted ds, and dirtying their
3119 * bonus buffers.
3120 */
3121 if (err == 0) {
3122 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3123 dsl_dataset_promote_sync, ds, &pa,
3124 2 + 2 * doi.doi_physical_blocks_512);
3125 if (err && pa.err_ds && conflsnap)
3126 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3127 }
3128
3129 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3130 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3131 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3132 if (pa.origin_origin)
3133 dsl_dataset_rele(pa.origin_origin, FTAG);
3134 dsl_dataset_rele(ds, FTAG);
3135 return (err);
3136 }
3137
3138 struct cloneswaparg {
3139 dsl_dataset_t *cds; /* clone dataset */
3140 dsl_dataset_t *ohds; /* origin's head dataset */
3141 boolean_t force;
3142 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3143 };
3144
3145 /* ARGSUSED */
3146 static int
3147 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3148 {
3149 struct cloneswaparg *csa = arg1;
3150
3151 /* they should both be heads */
3152 if (dsl_dataset_is_snapshot(csa->cds) ||
3153 dsl_dataset_is_snapshot(csa->ohds))
3154 return (EINVAL);
3155
3156 /* the branch point should be just before them */
3157 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3158 return (EINVAL);
3159
3160 /* cds should be the clone (unless they are unrelated) */
3161 if (csa->cds->ds_prev != NULL &&
3162 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3163 csa->ohds->ds_object !=
3164 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3165 return (EINVAL);
3166
3167 /* the clone should be a child of the origin */
3168 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3169 return (EINVAL);
3170
3171 /* ohds shouldn't be modified unless 'force' */
3172 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3173 return (ETXTBSY);
3174
3175 /* adjust amount of any unconsumed refreservation */
3176 csa->unused_refres_delta =
3177 (int64_t)MIN(csa->ohds->ds_reserved,
3178 csa->ohds->ds_phys->ds_unique_bytes) -
3179 (int64_t)MIN(csa->ohds->ds_reserved,
3180 csa->cds->ds_phys->ds_unique_bytes);
3181
3182 if (csa->unused_refres_delta > 0 &&
3183 csa->unused_refres_delta >
3184 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3185 return (ENOSPC);
3186
3187 if (csa->ohds->ds_quota != 0 &&
3188 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3189 return (EDQUOT);
3190
3191 return (0);
3192 }
3193
3194 /* ARGSUSED */
3195 static void
3196 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3197 {
3198 struct cloneswaparg *csa = arg1;
3199 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3200
3201 ASSERT(csa->cds->ds_reserved == 0);
3202 ASSERT(csa->ohds->ds_quota == 0 ||
3203 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3204
3205 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3206 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3207
3208 if (csa->cds->ds_objset != NULL) {
3209 dmu_objset_evict(csa->cds->ds_objset);
3210 csa->cds->ds_objset = NULL;
3211 }
3212
3213 if (csa->ohds->ds_objset != NULL) {
3214 dmu_objset_evict(csa->ohds->ds_objset);
3215 csa->ohds->ds_objset = NULL;
3216 }
3217
3218 /*
3219 * Reset origin's unique bytes, if it exists.
3220 */
3221 if (csa->cds->ds_prev) {
3222 dsl_dataset_t *origin = csa->cds->ds_prev;
3223 uint64_t comp, uncomp;
3224
3225 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3226 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3227 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3228 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3229 }
3230
3231 /* swap blkptrs */
3232 {
3233 blkptr_t tmp;
3234 tmp = csa->ohds->ds_phys->ds_bp;
3235 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3236 csa->cds->ds_phys->ds_bp = tmp;
3237 }
3238
3239 /* set dd_*_bytes */
3240 {
3241 int64_t dused, dcomp, duncomp;
3242 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3243 uint64_t odl_used, odl_comp, odl_uncomp;
3244
3245 ASSERT3U(csa->cds->ds_dir->dd_phys->
3246 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3247
3248 dsl_deadlist_space(&csa->cds->ds_deadlist,
3249 &cdl_used, &cdl_comp, &cdl_uncomp);
3250 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3251 &odl_used, &odl_comp, &odl_uncomp);
3252
3253 dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3254 (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3255 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3256 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3257 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3258 cdl_uncomp -
3259 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3260
3261 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3262 dused, dcomp, duncomp, tx);
3263 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3264 -dused, -dcomp, -duncomp, tx);
3265
3266 /*
3267 * The difference in the space used by snapshots is the
3268 * difference in snapshot space due to the head's
3269 * deadlist (since that's the only thing that's
3270 * changing that affects the snapused).
3271 */
3272 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3273 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3274 &cdl_used, &cdl_comp, &cdl_uncomp);
3275 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3276 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3277 &odl_used, &odl_comp, &odl_uncomp);
3278 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3279 DD_USED_HEAD, DD_USED_SNAP, tx);
3280 }
3281
3282 /* swap ds_*_bytes */
3283 SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3284 csa->cds->ds_phys->ds_referenced_bytes);
3285 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3286 csa->cds->ds_phys->ds_compressed_bytes);
3287 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3288 csa->cds->ds_phys->ds_uncompressed_bytes);
3289 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3290 csa->cds->ds_phys->ds_unique_bytes);
3291
3292 /* apply any parent delta for change in unconsumed refreservation */
3293 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3294 csa->unused_refres_delta, 0, 0, tx);
3295
3296 /*
3297 * Swap deadlists.
3298 */
3299 dsl_deadlist_close(&csa->cds->ds_deadlist);
3300 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3301 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3302 csa->cds->ds_phys->ds_deadlist_obj);
3303 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3304 csa->cds->ds_phys->ds_deadlist_obj);
3305 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3306 csa->ohds->ds_phys->ds_deadlist_obj);
3307
3308 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3309 }
3310
3311 /*
3312 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3313 * recv" into an existing fs to swizzle the file system to the new
3314 * version, and by "zfs rollback". Can also be used to swap two
3315 * independent head datasets if neither has any snapshots.
3316 */
3317 int
3318 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3319 boolean_t force)
3320 {
3321 struct cloneswaparg csa;
3322 int error;
3323
3324 ASSERT(clone->ds_owner);
3325 ASSERT(origin_head->ds_owner);
3326 retry:
3327 /*
3328 * Need exclusive access for the swap. If we're swapping these
3329 * datasets back after an error, we already hold the locks.
3330 */
3331 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3332 rw_enter(&clone->ds_rwlock, RW_WRITER);
3333 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3334 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3335 rw_exit(&clone->ds_rwlock);
3336 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3337 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3338 rw_exit(&origin_head->ds_rwlock);
3339 goto retry;
3340 }
3341 }
3342 csa.cds = clone;
3343 csa.ohds = origin_head;
3344 csa.force = force;
3345 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3346 dsl_dataset_clone_swap_check,
3347 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3348 return (error);
3349 }
3350
3351 /*
3352 * Given a pool name and a dataset object number in that pool,
3353 * return the name of that dataset.
3354 */
3355 int
3356 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3357 {
3358 spa_t *spa;
3359 dsl_pool_t *dp;
3360 dsl_dataset_t *ds;
3361 int error;
3362
3363 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3364 return (error);
3365 dp = spa_get_dsl(spa);
3366 rw_enter(&dp->dp_config_rwlock, RW_READER);
3367 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3368 dsl_dataset_name(ds, buf);
3369 dsl_dataset_rele(ds, FTAG);
3370 }
3371 rw_exit(&dp->dp_config_rwlock);
3372 spa_close(spa, FTAG);
3373
3374 return (error);
3375 }
3376
3377 int
3378 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3379 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3380 {
3381 int error = 0;
3382
3383 ASSERT3S(asize, >, 0);
3384
3385 /*
3386 * *ref_rsrv is the portion of asize that will come from any
3387 * unconsumed refreservation space.
3388 */
3389 *ref_rsrv = 0;
3390
3391 mutex_enter(&ds->ds_lock);
3392 /*
3393 * Make a space adjustment for reserved bytes.
3394 */
3395 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3396 ASSERT3U(*used, >=,
3397 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3398 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3399 *ref_rsrv =
3400 asize - MIN(asize, parent_delta(ds, asize + inflight));
3401 }
3402
3403 if (!check_quota || ds->ds_quota == 0) {
3404 mutex_exit(&ds->ds_lock);
3405 return (0);
3406 }
3407 /*
3408 * If they are requesting more space, and our current estimate
3409 * is over quota, they get to try again unless the actual
3410 * on-disk is over quota and there are no pending changes (which
3411 * may free up space for us).
3412 */
3413 if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3414 if (inflight > 0 ||
3415 ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3416 error = ERESTART;
3417 else
3418 error = EDQUOT;
3419
3420 DMU_TX_STAT_BUMP(dmu_tx_quota);
3421 }
3422 mutex_exit(&ds->ds_lock);
3423
3424 return (error);
3425 }
3426
3427 /* ARGSUSED */
3428 static int
3429 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3430 {
3431 dsl_dataset_t *ds = arg1;
3432 dsl_prop_setarg_t *psa = arg2;
3433 int err;
3434
3435 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3436 return (ENOTSUP);
3437
3438 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3439 return (err);
3440
3441 if (psa->psa_effective_value == 0)
3442 return (0);
3443
3444 if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3445 psa->psa_effective_value < ds->ds_reserved)
3446 return (ENOSPC);
3447
3448 return (0);
3449 }
3450
3451 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3452
3453 void
3454 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3455 {
3456 dsl_dataset_t *ds = arg1;
3457 dsl_prop_setarg_t *psa = arg2;
3458 uint64_t effective_value = psa->psa_effective_value;
3459
3460 dsl_prop_set_sync(ds, psa, tx);
3461 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3462
3463 if (ds->ds_quota != effective_value) {
3464 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3465 ds->ds_quota = effective_value;
3466 }
3467 }
3468
3469 int
3470 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3471 {
3472 dsl_dataset_t *ds;
3473 dsl_prop_setarg_t psa;
3474 int err;
3475
3476 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3477
3478 err = dsl_dataset_hold(dsname, FTAG, &ds);
3479 if (err)
3480 return (err);
3481
3482 /*
3483 * If someone removes a file, then tries to set the quota, we
3484 * want to make sure the file freeing takes effect.
3485 */
3486 txg_wait_open(ds->ds_dir->dd_pool, 0);
3487
3488 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3489 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3490 ds, &psa, 0);
3491
3492 dsl_dataset_rele(ds, FTAG);
3493 return (err);
3494 }
3495
3496 static int
3497 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3498 {
3499 dsl_dataset_t *ds = arg1;
3500 dsl_prop_setarg_t *psa = arg2;
3501 uint64_t effective_value;
3502 uint64_t unique;
3503 int err;
3504
3505 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3506 SPA_VERSION_REFRESERVATION)
3507 return (ENOTSUP);
3508
3509 if (dsl_dataset_is_snapshot(ds))
3510 return (EINVAL);
3511
3512 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3513 return (err);
3514
3515 effective_value = psa->psa_effective_value;
3516
3517 /*
3518 * If we are doing the preliminary check in open context, the
3519 * space estimates may be inaccurate.
3520 */
3521 if (!dmu_tx_is_syncing(tx))
3522 return (0);
3523
3524 mutex_enter(&ds->ds_lock);
3525 if (!DS_UNIQUE_IS_ACCURATE(ds))
3526 dsl_dataset_recalc_head_uniq(ds);
3527 unique = ds->ds_phys->ds_unique_bytes;
3528 mutex_exit(&ds->ds_lock);
3529
3530 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3531 uint64_t delta = MAX(unique, effective_value) -
3532 MAX(unique, ds->ds_reserved);
3533
3534 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3535 return (ENOSPC);
3536 if (ds->ds_quota > 0 &&
3537 effective_value > ds->ds_quota)
3538 return (ENOSPC);
3539 }
3540
3541 return (0);
3542 }
3543
3544 static void
3545 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3546 {
3547 dsl_dataset_t *ds = arg1;
3548 dsl_prop_setarg_t *psa = arg2;
3549 uint64_t effective_value = psa->psa_effective_value;
3550 uint64_t unique;
3551 int64_t delta;
3552
3553 dsl_prop_set_sync(ds, psa, tx);
3554 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3555
3556 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3557
3558 mutex_enter(&ds->ds_dir->dd_lock);
3559 mutex_enter(&ds->ds_lock);
3560 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3561 unique = ds->ds_phys->ds_unique_bytes;
3562 delta = MAX(0, (int64_t)(effective_value - unique)) -
3563 MAX(0, (int64_t)(ds->ds_reserved - unique));
3564 ds->ds_reserved = effective_value;
3565 mutex_exit(&ds->ds_lock);
3566
3567 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3568 mutex_exit(&ds->ds_dir->dd_lock);
3569 }
3570
3571 int
3572 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3573 uint64_t reservation)
3574 {
3575 dsl_dataset_t *ds;
3576 dsl_prop_setarg_t psa;
3577 int err;
3578
3579 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3580 &reservation);
3581
3582 err = dsl_dataset_hold(dsname, FTAG, &ds);
3583 if (err)
3584 return (err);
3585
3586 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3587 dsl_dataset_set_reservation_check,
3588 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3589
3590 dsl_dataset_rele(ds, FTAG);
3591 return (err);
3592 }
3593
3594 typedef struct zfs_hold_cleanup_arg {
3595 dsl_pool_t *dp;
3596 uint64_t dsobj;
3597 char htag[MAXNAMELEN];
3598 } zfs_hold_cleanup_arg_t;
3599
3600 static void
3601 dsl_dataset_user_release_onexit(void *arg)
3602 {
3603 zfs_hold_cleanup_arg_t *ca = arg;
3604
3605 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3606 B_TRUE);
3607 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3608 }
3609
3610 void
3611 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3612 minor_t minor)
3613 {
3614 zfs_hold_cleanup_arg_t *ca;
3615
3616 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3617 ca->dp = ds->ds_dir->dd_pool;
3618 ca->dsobj = ds->ds_object;
3619 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3620 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3621 dsl_dataset_user_release_onexit, ca, NULL));
3622 }
3623
3624 /*
3625 * If you add new checks here, you may need to add
3626 * additional checks to the "temporary" case in
3627 * snapshot_check() in dmu_objset.c.
3628 */
3629 static int
3630 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3631 {
3632 dsl_dataset_t *ds = arg1;
3633 struct dsl_ds_holdarg *ha = arg2;
3634 char *htag = ha->htag;
3635 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3636 int error = 0;
3637
3638 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3639 return (ENOTSUP);
3640
3641 if (!dsl_dataset_is_snapshot(ds))
3642 return (EINVAL);
3643
3644 /* tags must be unique */
3645 mutex_enter(&ds->ds_lock);
3646 if (ds->ds_phys->ds_userrefs_obj) {
3647 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3648 8, 1, tx);
3649 if (error == 0)
3650 error = EEXIST;
3651 else if (error == ENOENT)
3652 error = 0;
3653 }
3654 mutex_exit(&ds->ds_lock);
3655
3656 if (error == 0 && ha->temphold &&
3657 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3658 error = E2BIG;
3659
3660 return (error);
3661 }
3662
3663 void
3664 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3665 {
3666 dsl_dataset_t *ds = arg1;
3667 struct dsl_ds_holdarg *ha = arg2;
3668 char *htag = ha->htag;
3669 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3670 objset_t *mos = dp->dp_meta_objset;
3671 uint64_t now = gethrestime_sec();
3672 uint64_t zapobj;
3673
3674 mutex_enter(&ds->ds_lock);
3675 if (ds->ds_phys->ds_userrefs_obj == 0) {
3676 /*
3677 * This is the first user hold for this dataset. Create
3678 * the userrefs zap object.
3679 */
3680 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3681 zapobj = ds->ds_phys->ds_userrefs_obj =
3682 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3683 } else {
3684 zapobj = ds->ds_phys->ds_userrefs_obj;
3685 }
3686 ds->ds_userrefs++;
3687 mutex_exit(&ds->ds_lock);
3688
3689 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3690
3691 if (ha->temphold) {
3692 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3693 htag, &now, tx));
3694 }
3695
3696 spa_history_log_internal(LOG_DS_USER_HOLD,
3697 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3698 (int)ha->temphold, ds->ds_object);
3699 }
3700
3701 static int
3702 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3703 {
3704 struct dsl_ds_holdarg *ha = arg;
3705 dsl_dataset_t *ds;
3706 int error;
3707 char *name;
3708
3709 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3710 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3711 error = dsl_dataset_hold(name, ha->dstg, &ds);
3712 strfree(name);
3713 if (error == 0) {
3714 ha->gotone = B_TRUE;
3715 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3716 dsl_dataset_user_hold_sync, ds, ha, 0);
3717 } else if (error == ENOENT && ha->recursive) {
3718 error = 0;
3719 } else {
3720 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3721 }
3722 return (error);
3723 }
3724
3725 int
3726 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3727 boolean_t temphold)
3728 {
3729 struct dsl_ds_holdarg *ha;
3730 int error;
3731
3732 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3733 ha->htag = htag;
3734 ha->temphold = temphold;
3735 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3736 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3737 ds, ha, 0);
3738 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3739
3740 return (error);
3741 }
3742
3743 int
3744 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3745 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3746 {
3747 struct dsl_ds_holdarg *ha;
3748 dsl_sync_task_t *dst;
3749 spa_t *spa;
3750 int error;
3751 minor_t minor = 0;
3752
3753 if (cleanup_fd != -1) {
3754 /* Currently we only support cleanup-on-exit of tempholds. */
3755 if (!temphold)
3756 return (EINVAL);
3757 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3758 if (error)
3759 return (error);
3760 }
3761
3762 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3763
3764 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3765
3766 error = spa_open(dsname, &spa, FTAG);
3767 if (error) {
3768 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3769 if (cleanup_fd != -1)
3770 zfs_onexit_fd_rele(cleanup_fd);
3771 return (error);
3772 }
3773
3774 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3775 ha->htag = htag;
3776 ha->snapname = snapname;
3777 ha->recursive = recursive;
3778 ha->temphold = temphold;
3779
3780 if (recursive) {
3781 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3782 ha, DS_FIND_CHILDREN);
3783 } else {
3784 error = dsl_dataset_user_hold_one(dsname, ha);
3785 }
3786 if (error == 0)
3787 error = dsl_sync_task_group_wait(ha->dstg);
3788
3789 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3790 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3791 dsl_dataset_t *ds = dst->dst_arg1;
3792
3793 if (dst->dst_err) {
3794 dsl_dataset_name(ds, ha->failed);
3795 *strchr(ha->failed, '@') = '\0';
3796 } else if (error == 0 && minor != 0 && temphold) {
3797 /*
3798 * If this hold is to be released upon process exit,
3799 * register that action now.
3800 */
3801 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3802 }
3803 dsl_dataset_rele(ds, ha->dstg);
3804 }
3805
3806 if (error == 0 && recursive && !ha->gotone)
3807 error = ENOENT;
3808
3809 if (error)
3810 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3811
3812 dsl_sync_task_group_destroy(ha->dstg);
3813
3814 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3815 spa_close(spa, FTAG);
3816 if (cleanup_fd != -1)
3817 zfs_onexit_fd_rele(cleanup_fd);
3818 return (error);
3819 }
3820
3821 struct dsl_ds_releasearg {
3822 dsl_dataset_t *ds;
3823 const char *htag;
3824 boolean_t own; /* do we own or just hold ds? */
3825 };
3826
3827 static int
3828 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3829 boolean_t *might_destroy)
3830 {
3831 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3832 uint64_t zapobj;
3833 uint64_t tmp;
3834 int error;
3835
3836 *might_destroy = B_FALSE;
3837
3838 mutex_enter(&ds->ds_lock);
3839 zapobj = ds->ds_phys->ds_userrefs_obj;
3840 if (zapobj == 0) {
3841 /* The tag can't possibly exist */
3842 mutex_exit(&ds->ds_lock);
3843 return (ESRCH);
3844 }
3845
3846 /* Make sure the tag exists */
3847 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3848 if (error) {
3849 mutex_exit(&ds->ds_lock);
3850 if (error == ENOENT)
3851 error = ESRCH;
3852 return (error);
3853 }
3854
3855 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3856 DS_IS_DEFER_DESTROY(ds))
3857 *might_destroy = B_TRUE;
3858
3859 mutex_exit(&ds->ds_lock);
3860 return (0);
3861 }
3862
3863 static int
3864 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3865 {
3866 struct dsl_ds_releasearg *ra = arg1;
3867 dsl_dataset_t *ds = ra->ds;
3868 boolean_t might_destroy;
3869 int error;
3870
3871 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3872 return (ENOTSUP);
3873
3874 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3875 if (error)
3876 return (error);
3877
3878 if (might_destroy) {
3879 struct dsl_ds_destroyarg dsda = {0};
3880
3881 if (dmu_tx_is_syncing(tx)) {
3882 /*
3883 * If we're not prepared to remove the snapshot,
3884 * we can't allow the release to happen right now.
3885 */
3886 if (!ra->own)
3887 return (EBUSY);
3888 }
3889 dsda.ds = ds;
3890 dsda.releasing = B_TRUE;
3891 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3892 }
3893
3894 return (0);
3895 }
3896
3897 static void
3898 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3899 {
3900 struct dsl_ds_releasearg *ra = arg1;
3901 dsl_dataset_t *ds = ra->ds;
3902 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3903 objset_t *mos = dp->dp_meta_objset;
3904 uint64_t zapobj;
3905 uint64_t dsobj = ds->ds_object;
3906 uint64_t refs;
3907 int error;
3908
3909 mutex_enter(&ds->ds_lock);
3910 ds->ds_userrefs--;
3911 refs = ds->ds_userrefs;
3912 mutex_exit(&ds->ds_lock);
3913 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3914 VERIFY(error == 0 || error == ENOENT);
3915 zapobj = ds->ds_phys->ds_userrefs_obj;
3916 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3917
3918 spa_history_log_internal(LOG_DS_USER_RELEASE,
3919 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3920 ra->htag, (longlong_t)refs, dsobj);
3921
3922 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3923 DS_IS_DEFER_DESTROY(ds)) {
3924 struct dsl_ds_destroyarg dsda = {0};
3925
3926 ASSERT(ra->own);
3927 dsda.ds = ds;
3928 dsda.releasing = B_TRUE;
3929 /* We already did the destroy_check */
3930 dsl_dataset_destroy_sync(&dsda, tag, tx);
3931 }
3932 }
3933
3934 static int
3935 dsl_dataset_user_release_one(const char *dsname, void *arg)
3936 {
3937 struct dsl_ds_holdarg *ha = arg;
3938 struct dsl_ds_releasearg *ra;
3939 dsl_dataset_t *ds;
3940 int error;
3941 void *dtag = ha->dstg;
3942 char *name;
3943 boolean_t own = B_FALSE;
3944 boolean_t might_destroy;
3945
3946 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3947 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3948 error = dsl_dataset_hold(name, dtag, &ds);
3949 strfree(name);
3950 if (error == ENOENT && ha->recursive)
3951 return (0);
3952 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3953 if (error)
3954 return (error);
3955
3956 ha->gotone = B_TRUE;
3957
3958 ASSERT(dsl_dataset_is_snapshot(ds));
3959
3960 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3961 if (error) {
3962 dsl_dataset_rele(ds, dtag);
3963 return (error);
3964 }
3965
3966 if (might_destroy) {
3967 #ifdef _KERNEL
3968 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3969 error = zfs_unmount_snap(name, NULL);
3970 strfree(name);
3971 if (error) {
3972 dsl_dataset_rele(ds, dtag);
3973 return (error);
3974 }
3975 #endif
3976 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3977 dsl_dataset_rele(ds, dtag);
3978 return (EBUSY);
3979 } else {
3980 own = B_TRUE;
3981 dsl_dataset_make_exclusive(ds, dtag);
3982 }
3983 }
3984
3985 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3986 ra->ds = ds;
3987 ra->htag = ha->htag;
3988 ra->own = own;
3989 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3990 dsl_dataset_user_release_sync, ra, dtag, 0);
3991
3992 return (0);
3993 }
3994
3995 int
3996 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3997 boolean_t recursive)
3998 {
3999 struct dsl_ds_holdarg *ha;
4000 dsl_sync_task_t *dst;
4001 spa_t *spa;
4002 int error;
4003
4004 top:
4005 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
4006
4007 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
4008
4009 error = spa_open(dsname, &spa, FTAG);
4010 if (error) {
4011 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4012 return (error);
4013 }
4014
4015 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
4016 ha->htag = htag;
4017 ha->snapname = snapname;
4018 ha->recursive = recursive;
4019 if (recursive) {
4020 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
4021 ha, DS_FIND_CHILDREN);
4022 } else {
4023 error = dsl_dataset_user_release_one(dsname, ha);
4024 }
4025 if (error == 0)
4026 error = dsl_sync_task_group_wait(ha->dstg);
4027
4028 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4029 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4030 struct dsl_ds_releasearg *ra = dst->dst_arg1;
4031 dsl_dataset_t *ds = ra->ds;
4032
4033 if (dst->dst_err)
4034 dsl_dataset_name(ds, ha->failed);
4035
4036 if (ra->own)
4037 dsl_dataset_disown(ds, ha->dstg);
4038 else
4039 dsl_dataset_rele(ds, ha->dstg);
4040
4041 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4042 }
4043
4044 if (error == 0 && recursive && !ha->gotone)
4045 error = ENOENT;
4046
4047 if (error && error != EBUSY)
4048 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4049
4050 dsl_sync_task_group_destroy(ha->dstg);
4051 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4052 spa_close(spa, FTAG);
4053
4054 /*
4055 * We can get EBUSY if we were racing with deferred destroy and
4056 * dsl_dataset_user_release_check() hadn't done the necessary
4057 * open context setup. We can also get EBUSY if we're racing
4058 * with destroy and that thread is the ds_owner. Either way
4059 * the busy condition should be transient, and we should retry
4060 * the release operation.
4061 */
4062 if (error == EBUSY)
4063 goto top;
4064
4065 return (error);
4066 }
4067
4068 /*
4069 * Called at spa_load time (with retry == B_FALSE) to release a stale
4070 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4071 */
4072 int
4073 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4074 boolean_t retry)
4075 {
4076 dsl_dataset_t *ds;
4077 char *snap;
4078 char *name;
4079 int namelen;
4080 int error;
4081
4082 do {
4083 rw_enter(&dp->dp_config_rwlock, RW_READER);
4084 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4085 rw_exit(&dp->dp_config_rwlock);
4086 if (error)
4087 return (error);
4088 namelen = dsl_dataset_namelen(ds)+1;
4089 name = kmem_alloc(namelen, KM_SLEEP);
4090 dsl_dataset_name(ds, name);
4091 dsl_dataset_rele(ds, FTAG);
4092
4093 snap = strchr(name, '@');
4094 *snap = '\0';
4095 ++snap;
4096 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4097 kmem_free(name, namelen);
4098
4099 /*
4100 * The object can't have been destroyed because we have a hold,
4101 * but it might have been renamed, resulting in ENOENT. Retry
4102 * if we've been requested to do so.
4103 *
4104 * It would be nice if we could use the dsobj all the way
4105 * through and avoid ENOENT entirely. But we might need to
4106 * unmount the snapshot, and there's currently no way to lookup
4107 * a vfsp using a ZFS object id.
4108 */
4109 } while ((error == ENOENT) && retry);
4110
4111 return (error);
4112 }
4113
4114 int
4115 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4116 {
4117 dsl_dataset_t *ds;
4118 int err;
4119
4120 err = dsl_dataset_hold(dsname, FTAG, &ds);
4121 if (err)
4122 return (err);
4123
4124 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4125 if (ds->ds_phys->ds_userrefs_obj != 0) {
4126 zap_attribute_t *za;
4127 zap_cursor_t zc;
4128
4129 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4130 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4131 ds->ds_phys->ds_userrefs_obj);
4132 zap_cursor_retrieve(&zc, za) == 0;
4133 zap_cursor_advance(&zc)) {
4134 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4135 za->za_first_integer));
4136 }
4137 zap_cursor_fini(&zc);
4138 kmem_free(za, sizeof (zap_attribute_t));
4139 }
4140 dsl_dataset_rele(ds, FTAG);
4141 return (0);
4142 }
4143
4144 /*
4145 * Note, this function is used as the callback for dmu_objset_find(). We
4146 * always return 0 so that we will continue to find and process
4147 * inconsistent datasets, even if we encounter an error trying to
4148 * process one of them.
4149 */
4150 /* ARGSUSED */
4151 int
4152 dsl_destroy_inconsistent(const char *dsname, void *arg)
4153 {
4154 dsl_dataset_t *ds;
4155
4156 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4157 if (DS_IS_INCONSISTENT(ds))
4158 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4159 else
4160 dsl_dataset_disown(ds, FTAG);
4161 }
4162 return (0);
4163 }
4164
4165
4166 /*
4167 * Return (in *usedp) the amount of space written in new that is not
4168 * present in oldsnap. New may be a snapshot or the head. Old must be
4169 * a snapshot before new, in new's filesystem (or its origin). If not then
4170 * fail and return EINVAL.
4171 *
4172 * The written space is calculated by considering two components: First, we
4173 * ignore any freed space, and calculate the written as new's used space
4174 * minus old's used space. Next, we add in the amount of space that was freed
4175 * between the two snapshots, thus reducing new's used space relative to old's.
4176 * Specifically, this is the space that was born before old->ds_creation_txg,
4177 * and freed before new (ie. on new's deadlist or a previous deadlist).
4178 *
4179 * space freed [---------------------]
4180 * snapshots ---O-------O--------O-------O------
4181 * oldsnap new
4182 */
4183 int
4184 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4185 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4186 {
4187 int err = 0;
4188 uint64_t snapobj;
4189 dsl_pool_t *dp = new->ds_dir->dd_pool;
4190
4191 *usedp = 0;
4192 *usedp += new->ds_phys->ds_referenced_bytes;
4193 *usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4194
4195 *compp = 0;
4196 *compp += new->ds_phys->ds_compressed_bytes;
4197 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4198
4199 *uncompp = 0;
4200 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4201 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4202
4203 rw_enter(&dp->dp_config_rwlock, RW_READER);
4204 snapobj = new->ds_object;
4205 while (snapobj != oldsnap->ds_object) {
4206 dsl_dataset_t *snap;
4207 uint64_t used, comp, uncomp;
4208
4209 if (snapobj == new->ds_object) {
4210 snap = new;
4211 } else {
4212 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4213 if (err != 0)
4214 break;
4215 }
4216
4217 if (snap->ds_phys->ds_prev_snap_txg ==
4218 oldsnap->ds_phys->ds_creation_txg) {
4219 /*
4220 * The blocks in the deadlist can not be born after
4221 * ds_prev_snap_txg, so get the whole deadlist space,
4222 * which is more efficient (especially for old-format
4223 * deadlists). Unfortunately the deadlist code
4224 * doesn't have enough information to make this
4225 * optimization itself.
4226 */
4227 dsl_deadlist_space(&snap->ds_deadlist,
4228 &used, &comp, &uncomp);
4229 } else {
4230 dsl_deadlist_space_range(&snap->ds_deadlist,
4231 0, oldsnap->ds_phys->ds_creation_txg,
4232 &used, &comp, &uncomp);
4233 }
4234 *usedp += used;
4235 *compp += comp;
4236 *uncompp += uncomp;
4237
4238 /*
4239 * If we get to the beginning of the chain of snapshots
4240 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4241 * was not a snapshot of/before new.
4242 */
4243 snapobj = snap->ds_phys->ds_prev_snap_obj;
4244 if (snap != new)
4245 dsl_dataset_rele(snap, FTAG);
4246 if (snapobj == 0) {
4247 err = EINVAL;
4248 break;
4249 }
4250
4251 }
4252 rw_exit(&dp->dp_config_rwlock);
4253 return (err);
4254 }
4255
4256 /*
4257 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4258 * lastsnap, and all snapshots in between are deleted.
4259 *
4260 * blocks that would be freed [---------------------------]
4261 * snapshots ---O-------O--------O-------O--------O
4262 * firstsnap lastsnap
4263 *
4264 * This is the set of blocks that were born after the snap before firstsnap,
4265 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4266 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4267 * We calculate this by iterating over the relevant deadlists (from the snap
4268 * after lastsnap, backward to the snap after firstsnap), summing up the
4269 * space on the deadlist that was born after the snap before firstsnap.
4270 */
4271 int
4272 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4273 dsl_dataset_t *lastsnap,
4274 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4275 {
4276 int err = 0;
4277 uint64_t snapobj;
4278 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4279
4280 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4281 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4282
4283 /*
4284 * Check that the snapshots are in the same dsl_dir, and firstsnap
4285 * is before lastsnap.
4286 */
4287 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4288 firstsnap->ds_phys->ds_creation_txg >
4289 lastsnap->ds_phys->ds_creation_txg)
4290 return (EINVAL);
4291
4292 *usedp = *compp = *uncompp = 0;
4293
4294 rw_enter(&dp->dp_config_rwlock, RW_READER);
4295 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4296 while (snapobj != firstsnap->ds_object) {
4297 dsl_dataset_t *ds;
4298 uint64_t used, comp, uncomp;
4299
4300 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4301 if (err != 0)
4302 break;
4303
4304 dsl_deadlist_space_range(&ds->ds_deadlist,
4305 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4306 &used, &comp, &uncomp);
4307 *usedp += used;
4308 *compp += comp;
4309 *uncompp += uncomp;
4310
4311 snapobj = ds->ds_phys->ds_prev_snap_obj;
4312 ASSERT3U(snapobj, !=, 0);
4313 dsl_dataset_rele(ds, FTAG);
4314 }
4315 rw_exit(&dp->dp_config_rwlock);
4316 return (err);
4317 }
4318
4319 #if defined(_KERNEL) && defined(HAVE_SPL)
4320 EXPORT_SYMBOL(dmu_snapshots_destroy_nvl);
4321 EXPORT_SYMBOL(dsl_dataset_hold);
4322 EXPORT_SYMBOL(dsl_dataset_hold_obj);
4323 EXPORT_SYMBOL(dsl_dataset_own);
4324 EXPORT_SYMBOL(dsl_dataset_own_obj);
4325 EXPORT_SYMBOL(dsl_dataset_name);
4326 EXPORT_SYMBOL(dsl_dataset_rele);
4327 EXPORT_SYMBOL(dsl_dataset_disown);
4328 EXPORT_SYMBOL(dsl_dataset_drop_ref);
4329 EXPORT_SYMBOL(dsl_dataset_tryown);
4330 EXPORT_SYMBOL(dsl_dataset_make_exclusive);
4331 EXPORT_SYMBOL(dsl_dataset_create_sync);
4332 EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
4333 EXPORT_SYMBOL(dsl_dataset_destroy);
4334 EXPORT_SYMBOL(dsl_dataset_destroy_check);
4335 EXPORT_SYMBOL(dsl_dataset_destroy_sync);
4336 EXPORT_SYMBOL(dsl_dataset_snapshot_check);
4337 EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
4338 EXPORT_SYMBOL(dsl_dataset_rename);
4339 EXPORT_SYMBOL(dsl_dataset_promote);
4340 EXPORT_SYMBOL(dsl_dataset_clone_swap);
4341 EXPORT_SYMBOL(dsl_dataset_user_hold);
4342 EXPORT_SYMBOL(dsl_dataset_user_release);
4343 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
4344 EXPORT_SYMBOL(dsl_dataset_get_holds);
4345 EXPORT_SYMBOL(dsl_dataset_get_blkptr);
4346 EXPORT_SYMBOL(dsl_dataset_set_blkptr);
4347 EXPORT_SYMBOL(dsl_dataset_get_spa);
4348 EXPORT_SYMBOL(dsl_dataset_modified_since_lastsnap);
4349 EXPORT_SYMBOL(dsl_dataset_space_written);
4350 EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
4351 EXPORT_SYMBOL(dsl_dataset_sync);
4352 EXPORT_SYMBOL(dsl_dataset_block_born);
4353 EXPORT_SYMBOL(dsl_dataset_block_kill);
4354 EXPORT_SYMBOL(dsl_dataset_block_freeable);
4355 EXPORT_SYMBOL(dsl_dataset_prev_snap_txg);
4356 EXPORT_SYMBOL(dsl_dataset_dirty);
4357 EXPORT_SYMBOL(dsl_dataset_stats);
4358 EXPORT_SYMBOL(dsl_dataset_fast_stat);
4359 EXPORT_SYMBOL(dsl_dataset_space);
4360 EXPORT_SYMBOL(dsl_dataset_fsid_guid);
4361 EXPORT_SYMBOL(dsl_dsobj_to_dsname);
4362 EXPORT_SYMBOL(dsl_dataset_check_quota);
4363 EXPORT_SYMBOL(dsl_dataset_set_quota);
4364 EXPORT_SYMBOL(dsl_dataset_set_quota_sync);
4365 EXPORT_SYMBOL(dsl_dataset_set_reservation);
4366 EXPORT_SYMBOL(dsl_destroy_inconsistent);
4367 #endif