]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_dataset.c
Rebase master to b108
[mirror_zfs.git] / module / zfs / dsl_dataset.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/sunddi.h>
42
43 static char *dsl_reaper = "the grim reaper";
44
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_checkfunc_t dsl_dataset_rollback_check;
48 static dsl_syncfunc_t dsl_dataset_rollback_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50
51 #define DS_REF_MAX (1ULL << 62)
52
53 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
54
55 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
56
57
58 /*
59 * Figure out how much of this delta should be propogated to the dsl_dir
60 * layer. If there's a refreservation, that space has already been
61 * partially accounted for in our ancestors.
62 */
63 static int64_t
64 parent_delta(dsl_dataset_t *ds, int64_t delta)
65 {
66 uint64_t old_bytes, new_bytes;
67
68 if (ds->ds_reserved == 0)
69 return (delta);
70
71 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
72 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
73
74 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
75 return (new_bytes - old_bytes);
76 }
77
78 void
79 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
80 {
81 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
82 int compressed = BP_GET_PSIZE(bp);
83 int uncompressed = BP_GET_UCSIZE(bp);
84 int64_t delta;
85
86 dprintf_bp(bp, "born, ds=%p\n", ds);
87
88 ASSERT(dmu_tx_is_syncing(tx));
89 /* It could have been compressed away to nothing */
90 if (BP_IS_HOLE(bp))
91 return;
92 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
93 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
94 if (ds == NULL) {
95 /*
96 * Account for the meta-objset space in its placeholder
97 * dsl_dir.
98 */
99 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
100 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
101 used, compressed, uncompressed, tx);
102 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
103 return;
104 }
105 dmu_buf_will_dirty(ds->ds_dbuf, tx);
106 mutex_enter(&ds->ds_dir->dd_lock);
107 mutex_enter(&ds->ds_lock);
108 delta = parent_delta(ds, used);
109 ds->ds_phys->ds_used_bytes += used;
110 ds->ds_phys->ds_compressed_bytes += compressed;
111 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
112 ds->ds_phys->ds_unique_bytes += used;
113 mutex_exit(&ds->ds_lock);
114 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
115 compressed, uncompressed, tx);
116 dsl_dir_transfer_space(ds->ds_dir, used - delta,
117 DD_USED_REFRSRV, DD_USED_HEAD, tx);
118 mutex_exit(&ds->ds_dir->dd_lock);
119 }
120
121 int
122 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio,
123 dmu_tx_t *tx)
124 {
125 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
126 int compressed = BP_GET_PSIZE(bp);
127 int uncompressed = BP_GET_UCSIZE(bp);
128
129 ASSERT(pio != NULL);
130 ASSERT(dmu_tx_is_syncing(tx));
131 /* No block pointer => nothing to free */
132 if (BP_IS_HOLE(bp))
133 return (0);
134
135 ASSERT(used > 0);
136 if (ds == NULL) {
137 int err;
138 /*
139 * Account for the meta-objset space in its placeholder
140 * dataset.
141 */
142 err = dsl_free(pio, tx->tx_pool,
143 tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
144 ASSERT(err == 0);
145
146 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
147 -used, -compressed, -uncompressed, tx);
148 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
149 return (used);
150 }
151 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
152
153 ASSERT(!dsl_dataset_is_snapshot(ds));
154 dmu_buf_will_dirty(ds->ds_dbuf, tx);
155
156 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
157 int err;
158 int64_t delta;
159
160 dprintf_bp(bp, "freeing: %s", "");
161 err = dsl_free(pio, tx->tx_pool,
162 tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
163 ASSERT(err == 0);
164
165 mutex_enter(&ds->ds_dir->dd_lock);
166 mutex_enter(&ds->ds_lock);
167 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
168 !DS_UNIQUE_IS_ACCURATE(ds));
169 delta = parent_delta(ds, -used);
170 ds->ds_phys->ds_unique_bytes -= used;
171 mutex_exit(&ds->ds_lock);
172 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
173 delta, -compressed, -uncompressed, tx);
174 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
175 DD_USED_REFRSRV, DD_USED_HEAD, tx);
176 mutex_exit(&ds->ds_dir->dd_lock);
177 } else {
178 dprintf_bp(bp, "putting on dead list: %s", "");
179 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
180 ASSERT3U(ds->ds_prev->ds_object, ==,
181 ds->ds_phys->ds_prev_snap_obj);
182 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
183 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
184 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
185 ds->ds_object && bp->blk_birth >
186 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
187 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
188 mutex_enter(&ds->ds_prev->ds_lock);
189 ds->ds_prev->ds_phys->ds_unique_bytes += used;
190 mutex_exit(&ds->ds_prev->ds_lock);
191 }
192 if (bp->blk_birth > ds->ds_origin_txg) {
193 dsl_dir_transfer_space(ds->ds_dir, used,
194 DD_USED_HEAD, DD_USED_SNAP, tx);
195 }
196 }
197 mutex_enter(&ds->ds_lock);
198 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
199 ds->ds_phys->ds_used_bytes -= used;
200 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
201 ds->ds_phys->ds_compressed_bytes -= compressed;
202 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
203 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
204 mutex_exit(&ds->ds_lock);
205
206 return (used);
207 }
208
209 uint64_t
210 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
211 {
212 uint64_t trysnap = 0;
213
214 if (ds == NULL)
215 return (0);
216 /*
217 * The snapshot creation could fail, but that would cause an
218 * incorrect FALSE return, which would only result in an
219 * overestimation of the amount of space that an operation would
220 * consume, which is OK.
221 *
222 * There's also a small window where we could miss a pending
223 * snapshot, because we could set the sync task in the quiescing
224 * phase. So this should only be used as a guess.
225 */
226 if (ds->ds_trysnap_txg >
227 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
228 trysnap = ds->ds_trysnap_txg;
229 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
230 }
231
232 int
233 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
234 {
235 return (blk_birth > dsl_dataset_prev_snap_txg(ds));
236 }
237
238 /* ARGSUSED */
239 static void
240 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
241 {
242 dsl_dataset_t *ds = dsv;
243
244 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
245
246 dprintf_ds(ds, "evicting %s\n", "");
247
248 unique_remove(ds->ds_fsid_guid);
249
250 if (ds->ds_user_ptr != NULL)
251 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
252
253 if (ds->ds_prev) {
254 dsl_dataset_drop_ref(ds->ds_prev, ds);
255 ds->ds_prev = NULL;
256 }
257
258 bplist_close(&ds->ds_deadlist);
259 if (ds->ds_dir)
260 dsl_dir_close(ds->ds_dir, ds);
261
262 ASSERT(!list_link_active(&ds->ds_synced_link));
263
264 mutex_destroy(&ds->ds_lock);
265 mutex_destroy(&ds->ds_opening_lock);
266 mutex_destroy(&ds->ds_deadlist.bpl_lock);
267 rw_destroy(&ds->ds_rwlock);
268 cv_destroy(&ds->ds_exclusive_cv);
269
270 kmem_free(ds, sizeof (dsl_dataset_t));
271 }
272
273 static int
274 dsl_dataset_get_snapname(dsl_dataset_t *ds)
275 {
276 dsl_dataset_phys_t *headphys;
277 int err;
278 dmu_buf_t *headdbuf;
279 dsl_pool_t *dp = ds->ds_dir->dd_pool;
280 objset_t *mos = dp->dp_meta_objset;
281
282 if (ds->ds_snapname[0])
283 return (0);
284 if (ds->ds_phys->ds_next_snap_obj == 0)
285 return (0);
286
287 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
288 FTAG, &headdbuf);
289 if (err)
290 return (err);
291 headphys = headdbuf->db_data;
292 err = zap_value_search(dp->dp_meta_objset,
293 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
294 dmu_buf_rele(headdbuf, FTAG);
295 return (err);
296 }
297
298 static int
299 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
300 {
301 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
302 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
303 matchtype_t mt;
304 int err;
305
306 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
307 mt = MT_FIRST;
308 else
309 mt = MT_EXACT;
310
311 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
312 value, mt, NULL, 0, NULL);
313 if (err == ENOTSUP && mt == MT_FIRST)
314 err = zap_lookup(mos, snapobj, name, 8, 1, value);
315 return (err);
316 }
317
318 static int
319 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
320 {
321 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
322 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
323 matchtype_t mt;
324 int err;
325
326 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
327 mt = MT_FIRST;
328 else
329 mt = MT_EXACT;
330
331 err = zap_remove_norm(mos, snapobj, name, mt, tx);
332 if (err == ENOTSUP && mt == MT_FIRST)
333 err = zap_remove(mos, snapobj, name, tx);
334 return (err);
335 }
336
337 static int
338 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
339 dsl_dataset_t **dsp)
340 {
341 objset_t *mos = dp->dp_meta_objset;
342 dmu_buf_t *dbuf;
343 dsl_dataset_t *ds;
344 int err;
345
346 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
347 dsl_pool_sync_context(dp));
348
349 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
350 if (err)
351 return (err);
352 ds = dmu_buf_get_user(dbuf);
353 if (ds == NULL) {
354 dsl_dataset_t *winner;
355
356 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
357 ds->ds_dbuf = dbuf;
358 ds->ds_object = dsobj;
359 ds->ds_phys = dbuf->db_data;
360
361 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
362 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
363 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
364 NULL);
365 rw_init(&ds->ds_rwlock, 0, 0, 0);
366 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
367
368 err = bplist_open(&ds->ds_deadlist,
369 mos, ds->ds_phys->ds_deadlist_obj);
370 if (err == 0) {
371 err = dsl_dir_open_obj(dp,
372 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
373 }
374 if (err) {
375 /*
376 * we don't really need to close the blist if we
377 * just opened it.
378 */
379 mutex_destroy(&ds->ds_lock);
380 mutex_destroy(&ds->ds_opening_lock);
381 mutex_destroy(&ds->ds_deadlist.bpl_lock);
382 rw_destroy(&ds->ds_rwlock);
383 cv_destroy(&ds->ds_exclusive_cv);
384 kmem_free(ds, sizeof (dsl_dataset_t));
385 dmu_buf_rele(dbuf, tag);
386 return (err);
387 }
388
389 if (!dsl_dataset_is_snapshot(ds)) {
390 ds->ds_snapname[0] = '\0';
391 if (ds->ds_phys->ds_prev_snap_obj) {
392 err = dsl_dataset_get_ref(dp,
393 ds->ds_phys->ds_prev_snap_obj,
394 ds, &ds->ds_prev);
395 }
396
397 if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) {
398 dsl_dataset_t *origin;
399
400 err = dsl_dataset_hold_obj(dp,
401 ds->ds_dir->dd_phys->dd_origin_obj,
402 FTAG, &origin);
403 if (err == 0) {
404 ds->ds_origin_txg =
405 origin->ds_phys->ds_creation_txg;
406 dsl_dataset_rele(origin, FTAG);
407 }
408 }
409 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) {
410 err = dsl_dataset_get_snapname(ds);
411 }
412
413 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
414 /*
415 * In sync context, we're called with either no lock
416 * or with the write lock. If we're not syncing,
417 * we're always called with the read lock held.
418 */
419 boolean_t need_lock =
420 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
421 dsl_pool_sync_context(dp);
422
423 if (need_lock)
424 rw_enter(&dp->dp_config_rwlock, RW_READER);
425
426 err = dsl_prop_get_ds(ds,
427 "refreservation", sizeof (uint64_t), 1,
428 &ds->ds_reserved, NULL);
429 if (err == 0) {
430 err = dsl_prop_get_ds(ds,
431 "refquota", sizeof (uint64_t), 1,
432 &ds->ds_quota, NULL);
433 }
434
435 if (need_lock)
436 rw_exit(&dp->dp_config_rwlock);
437 } else {
438 ds->ds_reserved = ds->ds_quota = 0;
439 }
440
441 if (err == 0) {
442 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
443 dsl_dataset_evict);
444 }
445 if (err || winner) {
446 bplist_close(&ds->ds_deadlist);
447 if (ds->ds_prev)
448 dsl_dataset_drop_ref(ds->ds_prev, ds);
449 dsl_dir_close(ds->ds_dir, ds);
450 mutex_destroy(&ds->ds_lock);
451 mutex_destroy(&ds->ds_opening_lock);
452 mutex_destroy(&ds->ds_deadlist.bpl_lock);
453 rw_destroy(&ds->ds_rwlock);
454 cv_destroy(&ds->ds_exclusive_cv);
455 kmem_free(ds, sizeof (dsl_dataset_t));
456 if (err) {
457 dmu_buf_rele(dbuf, tag);
458 return (err);
459 }
460 ds = winner;
461 } else {
462 ds->ds_fsid_guid =
463 unique_insert(ds->ds_phys->ds_fsid_guid);
464 }
465 }
466 ASSERT3P(ds->ds_dbuf, ==, dbuf);
467 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
468 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
469 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
470 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
471 mutex_enter(&ds->ds_lock);
472 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
473 mutex_exit(&ds->ds_lock);
474 dmu_buf_rele(ds->ds_dbuf, tag);
475 return (ENOENT);
476 }
477 mutex_exit(&ds->ds_lock);
478 *dsp = ds;
479 return (0);
480 }
481
482 static int
483 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
484 {
485 dsl_pool_t *dp = ds->ds_dir->dd_pool;
486
487 /*
488 * In syncing context we don't want the rwlock lock: there
489 * may be an existing writer waiting for sync phase to
490 * finish. We don't need to worry about such writers, since
491 * sync phase is single-threaded, so the writer can't be
492 * doing anything while we are active.
493 */
494 if (dsl_pool_sync_context(dp)) {
495 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
496 return (0);
497 }
498
499 /*
500 * Normal users will hold the ds_rwlock as a READER until they
501 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
502 * drop their READER lock after they set the ds_owner field.
503 *
504 * If the dataset is being destroyed, the destroy thread will
505 * obtain a WRITER lock for exclusive access after it's done its
506 * open-context work and then change the ds_owner to
507 * dsl_reaper once destruction is assured. So threads
508 * may block here temporarily, until the "destructability" of
509 * the dataset is determined.
510 */
511 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
512 mutex_enter(&ds->ds_lock);
513 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
514 rw_exit(&dp->dp_config_rwlock);
515 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
516 if (DSL_DATASET_IS_DESTROYED(ds)) {
517 mutex_exit(&ds->ds_lock);
518 dsl_dataset_drop_ref(ds, tag);
519 rw_enter(&dp->dp_config_rwlock, RW_READER);
520 return (ENOENT);
521 }
522 rw_enter(&dp->dp_config_rwlock, RW_READER);
523 }
524 mutex_exit(&ds->ds_lock);
525 return (0);
526 }
527
528 int
529 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
530 dsl_dataset_t **dsp)
531 {
532 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
533
534 if (err)
535 return (err);
536 return (dsl_dataset_hold_ref(*dsp, tag));
537 }
538
539 int
540 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner,
541 dsl_dataset_t **dsp)
542 {
543 int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp);
544
545 ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER);
546
547 if (err)
548 return (err);
549 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
550 dsl_dataset_rele(*dsp, owner);
551 return (EBUSY);
552 }
553 return (0);
554 }
555
556 int
557 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
558 {
559 dsl_dir_t *dd;
560 dsl_pool_t *dp;
561 const char *snapname;
562 uint64_t obj;
563 int err = 0;
564
565 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
566 if (err)
567 return (err);
568
569 dp = dd->dd_pool;
570 obj = dd->dd_phys->dd_head_dataset_obj;
571 rw_enter(&dp->dp_config_rwlock, RW_READER);
572 if (obj)
573 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
574 else
575 err = ENOENT;
576 if (err)
577 goto out;
578
579 err = dsl_dataset_hold_ref(*dsp, tag);
580
581 /* we may be looking for a snapshot */
582 if (err == 0 && snapname != NULL) {
583 dsl_dataset_t *ds = NULL;
584
585 if (*snapname++ != '@') {
586 dsl_dataset_rele(*dsp, tag);
587 err = ENOENT;
588 goto out;
589 }
590
591 dprintf("looking for snapshot '%s'\n", snapname);
592 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
593 if (err == 0)
594 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
595 dsl_dataset_rele(*dsp, tag);
596
597 ASSERT3U((err == 0), ==, (ds != NULL));
598
599 if (ds) {
600 mutex_enter(&ds->ds_lock);
601 if (ds->ds_snapname[0] == 0)
602 (void) strlcpy(ds->ds_snapname, snapname,
603 sizeof (ds->ds_snapname));
604 mutex_exit(&ds->ds_lock);
605 err = dsl_dataset_hold_ref(ds, tag);
606 *dsp = err ? NULL : ds;
607 }
608 }
609 out:
610 rw_exit(&dp->dp_config_rwlock);
611 dsl_dir_close(dd, FTAG);
612 return (err);
613 }
614
615 int
616 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp)
617 {
618 int err = dsl_dataset_hold(name, owner, dsp);
619 if (err)
620 return (err);
621 if ((*dsp)->ds_phys->ds_num_children > 0 &&
622 !DS_MODE_IS_READONLY(flags)) {
623 dsl_dataset_rele(*dsp, owner);
624 return (EROFS);
625 }
626 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
627 dsl_dataset_rele(*dsp, owner);
628 return (EBUSY);
629 }
630 return (0);
631 }
632
633 void
634 dsl_dataset_name(dsl_dataset_t *ds, char *name)
635 {
636 if (ds == NULL) {
637 (void) strcpy(name, "mos");
638 } else {
639 dsl_dir_name(ds->ds_dir, name);
640 VERIFY(0 == dsl_dataset_get_snapname(ds));
641 if (ds->ds_snapname[0]) {
642 (void) strcat(name, "@");
643 /*
644 * We use a "recursive" mutex so that we
645 * can call dprintf_ds() with ds_lock held.
646 */
647 if (!MUTEX_HELD(&ds->ds_lock)) {
648 mutex_enter(&ds->ds_lock);
649 (void) strcat(name, ds->ds_snapname);
650 mutex_exit(&ds->ds_lock);
651 } else {
652 (void) strcat(name, ds->ds_snapname);
653 }
654 }
655 }
656 }
657
658 static int
659 dsl_dataset_namelen(dsl_dataset_t *ds)
660 {
661 int result;
662
663 if (ds == NULL) {
664 result = 3; /* "mos" */
665 } else {
666 result = dsl_dir_namelen(ds->ds_dir);
667 VERIFY(0 == dsl_dataset_get_snapname(ds));
668 if (ds->ds_snapname[0]) {
669 ++result; /* adding one for the @-sign */
670 if (!MUTEX_HELD(&ds->ds_lock)) {
671 mutex_enter(&ds->ds_lock);
672 result += strlen(ds->ds_snapname);
673 mutex_exit(&ds->ds_lock);
674 } else {
675 result += strlen(ds->ds_snapname);
676 }
677 }
678 }
679
680 return (result);
681 }
682
683 void
684 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
685 {
686 dmu_buf_rele(ds->ds_dbuf, tag);
687 }
688
689 void
690 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
691 {
692 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
693 rw_exit(&ds->ds_rwlock);
694 }
695 dsl_dataset_drop_ref(ds, tag);
696 }
697
698 void
699 dsl_dataset_disown(dsl_dataset_t *ds, void *owner)
700 {
701 ASSERT((ds->ds_owner == owner && ds->ds_dbuf) ||
702 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
703
704 mutex_enter(&ds->ds_lock);
705 ds->ds_owner = NULL;
706 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
707 rw_exit(&ds->ds_rwlock);
708 cv_broadcast(&ds->ds_exclusive_cv);
709 }
710 mutex_exit(&ds->ds_lock);
711 if (ds->ds_dbuf)
712 dsl_dataset_drop_ref(ds, owner);
713 else
714 dsl_dataset_evict(ds->ds_dbuf, ds);
715 }
716
717 boolean_t
718 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner)
719 {
720 boolean_t gotit = FALSE;
721
722 mutex_enter(&ds->ds_lock);
723 if (ds->ds_owner == NULL &&
724 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
725 ds->ds_owner = owner;
726 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
727 rw_exit(&ds->ds_rwlock);
728 gotit = TRUE;
729 }
730 mutex_exit(&ds->ds_lock);
731 return (gotit);
732 }
733
734 void
735 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
736 {
737 ASSERT3P(owner, ==, ds->ds_owner);
738 if (!RW_WRITE_HELD(&ds->ds_rwlock))
739 rw_enter(&ds->ds_rwlock, RW_WRITER);
740 }
741
742 uint64_t
743 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
744 uint64_t flags, dmu_tx_t *tx)
745 {
746 dsl_pool_t *dp = dd->dd_pool;
747 dmu_buf_t *dbuf;
748 dsl_dataset_phys_t *dsphys;
749 uint64_t dsobj;
750 objset_t *mos = dp->dp_meta_objset;
751
752 if (origin == NULL)
753 origin = dp->dp_origin_snap;
754
755 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
756 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
757 ASSERT(dmu_tx_is_syncing(tx));
758 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
759
760 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
761 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
762 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
763 dmu_buf_will_dirty(dbuf, tx);
764 dsphys = dbuf->db_data;
765 bzero(dsphys, sizeof (dsl_dataset_phys_t));
766 dsphys->ds_dir_obj = dd->dd_object;
767 dsphys->ds_flags = flags;
768 dsphys->ds_fsid_guid = unique_create();
769 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
770 sizeof (dsphys->ds_guid));
771 dsphys->ds_snapnames_zapobj =
772 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
773 DMU_OT_NONE, 0, tx);
774 dsphys->ds_creation_time = gethrestime_sec();
775 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
776 dsphys->ds_deadlist_obj =
777 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
778
779 if (origin) {
780 dsphys->ds_prev_snap_obj = origin->ds_object;
781 dsphys->ds_prev_snap_txg =
782 origin->ds_phys->ds_creation_txg;
783 dsphys->ds_used_bytes =
784 origin->ds_phys->ds_used_bytes;
785 dsphys->ds_compressed_bytes =
786 origin->ds_phys->ds_compressed_bytes;
787 dsphys->ds_uncompressed_bytes =
788 origin->ds_phys->ds_uncompressed_bytes;
789 dsphys->ds_bp = origin->ds_phys->ds_bp;
790 dsphys->ds_flags |= origin->ds_phys->ds_flags;
791
792 dmu_buf_will_dirty(origin->ds_dbuf, tx);
793 origin->ds_phys->ds_num_children++;
794
795 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
796 if (origin->ds_phys->ds_next_clones_obj == 0) {
797 origin->ds_phys->ds_next_clones_obj =
798 zap_create(mos,
799 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
800 }
801 VERIFY(0 == zap_add_int(mos,
802 origin->ds_phys->ds_next_clones_obj,
803 dsobj, tx));
804 }
805
806 dmu_buf_will_dirty(dd->dd_dbuf, tx);
807 dd->dd_phys->dd_origin_obj = origin->ds_object;
808 }
809
810 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
811 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
812
813 dmu_buf_rele(dbuf, FTAG);
814
815 dmu_buf_will_dirty(dd->dd_dbuf, tx);
816 dd->dd_phys->dd_head_dataset_obj = dsobj;
817
818 return (dsobj);
819 }
820
821 uint64_t
822 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
823 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
824 {
825 dsl_pool_t *dp = pdd->dd_pool;
826 uint64_t dsobj, ddobj;
827 dsl_dir_t *dd;
828
829 ASSERT(lastname[0] != '@');
830
831 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
832 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
833
834 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
835
836 dsl_deleg_set_create_perms(dd, tx, cr);
837
838 dsl_dir_close(dd, FTAG);
839
840 return (dsobj);
841 }
842
843 struct destroyarg {
844 dsl_sync_task_group_t *dstg;
845 char *snapname;
846 char *failed;
847 };
848
849 static int
850 dsl_snapshot_destroy_one(char *name, void *arg)
851 {
852 struct destroyarg *da = arg;
853 dsl_dataset_t *ds;
854 char *cp;
855 int err;
856
857 (void) strcat(name, "@");
858 (void) strcat(name, da->snapname);
859 err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT,
860 da->dstg, &ds);
861 cp = strchr(name, '@');
862 *cp = '\0';
863 if (err == 0) {
864 dsl_dataset_make_exclusive(ds, da->dstg);
865 if (ds->ds_user_ptr) {
866 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
867 ds->ds_user_ptr = NULL;
868 }
869 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
870 dsl_dataset_destroy_sync, ds, da->dstg, 0);
871 } else if (err == ENOENT) {
872 err = 0;
873 } else {
874 (void) strcpy(da->failed, name);
875 }
876 return (err);
877 }
878
879 /*
880 * Destroy 'snapname' in all descendants of 'fsname'.
881 */
882 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
883 int
884 dsl_snapshots_destroy(char *fsname, char *snapname)
885 {
886 int err;
887 struct destroyarg da;
888 dsl_sync_task_t *dst;
889 spa_t *spa;
890
891 err = spa_open(fsname, &spa, FTAG);
892 if (err)
893 return (err);
894 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
895 da.snapname = snapname;
896 da.failed = fsname;
897
898 err = dmu_objset_find(fsname,
899 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
900
901 if (err == 0)
902 err = dsl_sync_task_group_wait(da.dstg);
903
904 for (dst = list_head(&da.dstg->dstg_tasks); dst;
905 dst = list_next(&da.dstg->dstg_tasks, dst)) {
906 dsl_dataset_t *ds = dst->dst_arg1;
907 /*
908 * Return the file system name that triggered the error
909 */
910 if (dst->dst_err) {
911 dsl_dataset_name(ds, fsname);
912 *strchr(fsname, '@') = '\0';
913 }
914 dsl_dataset_disown(ds, da.dstg);
915 }
916
917 dsl_sync_task_group_destroy(da.dstg);
918 spa_close(spa, FTAG);
919 return (err);
920 }
921
922 /*
923 * ds must be opened as OWNER. On return (whether successful or not),
924 * ds will be closed and caller can no longer dereference it.
925 */
926 int
927 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag)
928 {
929 int err;
930 dsl_sync_task_group_t *dstg;
931 objset_t *os;
932 dsl_dir_t *dd;
933 uint64_t obj;
934
935 if (dsl_dataset_is_snapshot(ds)) {
936 /* Destroying a snapshot is simpler */
937 dsl_dataset_make_exclusive(ds, tag);
938
939 if (ds->ds_user_ptr) {
940 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
941 ds->ds_user_ptr = NULL;
942 }
943 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
944 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
945 ds, tag, 0);
946 goto out;
947 }
948
949 dd = ds->ds_dir;
950
951 /*
952 * Check for errors and mark this ds as inconsistent, in
953 * case we crash while freeing the objects.
954 */
955 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
956 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
957 if (err)
958 goto out;
959
960 err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os);
961 if (err)
962 goto out;
963
964 /*
965 * remove the objects in open context, so that we won't
966 * have too much to do in syncing context.
967 */
968 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
969 ds->ds_phys->ds_prev_snap_txg)) {
970 /*
971 * Ignore errors, if there is not enough disk space
972 * we will deal with it in dsl_dataset_destroy_sync().
973 */
974 (void) dmu_free_object(os, obj);
975 }
976
977 dmu_objset_close(os);
978 if (err != ESRCH)
979 goto out;
980
981 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
982 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
983 rw_exit(&dd->dd_pool->dp_config_rwlock);
984
985 if (err)
986 goto out;
987
988 if (ds->ds_user_ptr) {
989 /*
990 * We need to sync out all in-flight IO before we try
991 * to evict (the dataset evict func is trying to clear
992 * the cached entries for this dataset in the ARC).
993 */
994 txg_wait_synced(dd->dd_pool, 0);
995 }
996
997 /*
998 * Blow away the dsl_dir + head dataset.
999 */
1000 dsl_dataset_make_exclusive(ds, tag);
1001 if (ds->ds_user_ptr) {
1002 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1003 ds->ds_user_ptr = NULL;
1004 }
1005 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1006 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1007 dsl_dataset_destroy_sync, ds, tag, 0);
1008 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1009 dsl_dir_destroy_sync, dd, FTAG, 0);
1010 err = dsl_sync_task_group_wait(dstg);
1011 dsl_sync_task_group_destroy(dstg);
1012 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1013 if (err)
1014 dsl_dir_close(dd, FTAG);
1015 out:
1016 dsl_dataset_disown(ds, tag);
1017 return (err);
1018 }
1019
1020 int
1021 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost)
1022 {
1023 int err;
1024
1025 ASSERT(ds->ds_owner);
1026
1027 dsl_dataset_make_exclusive(ds, ds->ds_owner);
1028 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1029 dsl_dataset_rollback_check, dsl_dataset_rollback_sync,
1030 ds, &ost, 0);
1031 /* drop exclusive access */
1032 mutex_enter(&ds->ds_lock);
1033 rw_exit(&ds->ds_rwlock);
1034 cv_broadcast(&ds->ds_exclusive_cv);
1035 mutex_exit(&ds->ds_lock);
1036 return (err);
1037 }
1038
1039 void *
1040 dsl_dataset_set_user_ptr(dsl_dataset_t *ds,
1041 void *p, dsl_dataset_evict_func_t func)
1042 {
1043 void *old;
1044
1045 mutex_enter(&ds->ds_lock);
1046 old = ds->ds_user_ptr;
1047 if (old == NULL) {
1048 ds->ds_user_ptr = p;
1049 ds->ds_user_evict_func = func;
1050 }
1051 mutex_exit(&ds->ds_lock);
1052 return (old);
1053 }
1054
1055 void *
1056 dsl_dataset_get_user_ptr(dsl_dataset_t *ds)
1057 {
1058 return (ds->ds_user_ptr);
1059 }
1060
1061
1062 blkptr_t *
1063 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1064 {
1065 return (&ds->ds_phys->ds_bp);
1066 }
1067
1068 void
1069 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1070 {
1071 ASSERT(dmu_tx_is_syncing(tx));
1072 /* If it's the meta-objset, set dp_meta_rootbp */
1073 if (ds == NULL) {
1074 tx->tx_pool->dp_meta_rootbp = *bp;
1075 } else {
1076 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1077 ds->ds_phys->ds_bp = *bp;
1078 }
1079 }
1080
1081 spa_t *
1082 dsl_dataset_get_spa(dsl_dataset_t *ds)
1083 {
1084 return (ds->ds_dir->dd_pool->dp_spa);
1085 }
1086
1087 void
1088 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1089 {
1090 dsl_pool_t *dp;
1091
1092 if (ds == NULL) /* this is the meta-objset */
1093 return;
1094
1095 ASSERT(ds->ds_user_ptr != NULL);
1096
1097 if (ds->ds_phys->ds_next_snap_obj != 0)
1098 panic("dirtying snapshot!");
1099
1100 dp = ds->ds_dir->dd_pool;
1101
1102 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1103 /* up the hold count until we can be written out */
1104 dmu_buf_add_ref(ds->ds_dbuf, ds);
1105 }
1106 }
1107
1108 /*
1109 * The unique space in the head dataset can be calculated by subtracting
1110 * the space used in the most recent snapshot, that is still being used
1111 * in this file system, from the space currently in use. To figure out
1112 * the space in the most recent snapshot still in use, we need to take
1113 * the total space used in the snapshot and subtract out the space that
1114 * has been freed up since the snapshot was taken.
1115 */
1116 static void
1117 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1118 {
1119 uint64_t mrs_used;
1120 uint64_t dlused, dlcomp, dluncomp;
1121
1122 ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1123
1124 if (ds->ds_phys->ds_prev_snap_obj != 0)
1125 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1126 else
1127 mrs_used = 0;
1128
1129 VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1130 &dluncomp));
1131
1132 ASSERT3U(dlused, <=, mrs_used);
1133 ds->ds_phys->ds_unique_bytes =
1134 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1135
1136 if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1137 spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1138 SPA_VERSION_UNIQUE_ACCURATE)
1139 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1140 }
1141
1142 static uint64_t
1143 dsl_dataset_unique(dsl_dataset_t *ds)
1144 {
1145 if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1146 dsl_dataset_recalc_head_uniq(ds);
1147
1148 return (ds->ds_phys->ds_unique_bytes);
1149 }
1150
1151 struct killarg {
1152 dsl_dataset_t *ds;
1153 zio_t *zio;
1154 dmu_tx_t *tx;
1155 };
1156
1157 /* ARGSUSED */
1158 static int
1159 kill_blkptr(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb,
1160 const dnode_phys_t *dnp, void *arg)
1161 {
1162 struct killarg *ka = arg;
1163
1164 if (bp == NULL)
1165 return (0);
1166
1167 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1168 (void) dsl_dataset_block_kill(ka->ds, bp, ka->zio, ka->tx);
1169
1170 return (0);
1171 }
1172
1173 /* ARGSUSED */
1174 static int
1175 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx)
1176 {
1177 dsl_dataset_t *ds = arg1;
1178 dmu_objset_type_t *ost = arg2;
1179
1180 /*
1181 * We can only roll back to emptyness if it is a ZPL objset.
1182 */
1183 if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0)
1184 return (EINVAL);
1185
1186 /*
1187 * This must not be a snapshot.
1188 */
1189 if (ds->ds_phys->ds_next_snap_obj != 0)
1190 return (EINVAL);
1191
1192 /*
1193 * If we made changes this txg, traverse_dataset won't find
1194 * them. Try again.
1195 */
1196 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1197 return (EAGAIN);
1198
1199 return (0);
1200 }
1201
1202 /* ARGSUSED */
1203 static void
1204 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1205 {
1206 dsl_dataset_t *ds = arg1;
1207 dmu_objset_type_t *ost = arg2;
1208 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1209
1210 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1211
1212 /*
1213 * Before the roll back destroy the zil.
1214 */
1215 if (ds->ds_user_ptr != NULL) {
1216 zil_rollback_destroy(
1217 ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx);
1218
1219 /*
1220 * We need to make sure that the objset_impl_t is reopened after
1221 * we do the rollback, otherwise it will have the wrong
1222 * objset_phys_t. Normally this would happen when this
1223 * dataset-open is closed, thus causing the
1224 * dataset to be immediately evicted. But when doing "zfs recv
1225 * -F", we reopen the objset before that, so that there is no
1226 * window where the dataset is closed and inconsistent.
1227 */
1228 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1229 ds->ds_user_ptr = NULL;
1230 }
1231
1232 /* Transfer space that was freed since last snap back to the head. */
1233 {
1234 uint64_t used;
1235
1236 VERIFY(0 == bplist_space_birthrange(&ds->ds_deadlist,
1237 ds->ds_origin_txg, UINT64_MAX, &used));
1238 dsl_dir_transfer_space(ds->ds_dir, used,
1239 DD_USED_SNAP, DD_USED_HEAD, tx);
1240 }
1241
1242 /* Zero out the deadlist. */
1243 bplist_close(&ds->ds_deadlist);
1244 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1245 ds->ds_phys->ds_deadlist_obj =
1246 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1247 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1248 ds->ds_phys->ds_deadlist_obj));
1249
1250 {
1251 /* Free blkptrs that we gave birth to */
1252 zio_t *zio;
1253 struct killarg ka;
1254
1255 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL,
1256 ZIO_FLAG_MUSTSUCCEED);
1257 ka.ds = ds;
1258 ka.zio = zio;
1259 ka.tx = tx;
1260 (void) traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1261 TRAVERSE_POST, kill_blkptr, &ka);
1262 (void) zio_wait(zio);
1263 }
1264
1265 ASSERT(!(ds->ds_phys->ds_flags & DS_FLAG_UNIQUE_ACCURATE) ||
1266 ds->ds_phys->ds_unique_bytes == 0);
1267
1268 if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) {
1269 /* Change our contents to that of the prev snapshot */
1270
1271 ASSERT3U(ds->ds_prev->ds_object, ==,
1272 ds->ds_phys->ds_prev_snap_obj);
1273 ASSERT3U(ds->ds_phys->ds_used_bytes, <=,
1274 ds->ds_prev->ds_phys->ds_used_bytes);
1275
1276 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp;
1277 ds->ds_phys->ds_used_bytes =
1278 ds->ds_prev->ds_phys->ds_used_bytes;
1279 ds->ds_phys->ds_compressed_bytes =
1280 ds->ds_prev->ds_phys->ds_compressed_bytes;
1281 ds->ds_phys->ds_uncompressed_bytes =
1282 ds->ds_prev->ds_phys->ds_uncompressed_bytes;
1283 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags;
1284
1285 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1286 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1287 ds->ds_prev->ds_phys->ds_unique_bytes = 0;
1288 }
1289 } else {
1290 objset_impl_t *osi;
1291
1292 ASSERT3U(ds->ds_phys->ds_used_bytes, ==, 0);
1293 ASSERT3U(ds->ds_phys->ds_compressed_bytes, ==, 0);
1294 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, ==, 0);
1295
1296 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t));
1297 ds->ds_phys->ds_flags = 0;
1298 ds->ds_phys->ds_unique_bytes = 0;
1299 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1300 SPA_VERSION_UNIQUE_ACCURATE)
1301 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1302
1303 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds,
1304 &ds->ds_phys->ds_bp, *ost, tx);
1305 #ifdef _KERNEL
1306 zfs_create_fs(&osi->os, kcred, NULL, tx);
1307 #endif
1308 }
1309
1310 spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa,
1311 tx, cr, "dataset = %llu", ds->ds_object);
1312 }
1313
1314 /* ARGSUSED */
1315 static int
1316 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1317 {
1318 dsl_dataset_t *ds = arg1;
1319 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1320 uint64_t count;
1321 int err;
1322
1323 /*
1324 * Can't delete a head dataset if there are snapshots of it.
1325 * (Except if the only snapshots are from the branch we cloned
1326 * from.)
1327 */
1328 if (ds->ds_prev != NULL &&
1329 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1330 return (EINVAL);
1331
1332 /*
1333 * This is really a dsl_dir thing, but check it here so that
1334 * we'll be less likely to leave this dataset inconsistent &
1335 * nearly destroyed.
1336 */
1337 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1338 if (err)
1339 return (err);
1340 if (count != 0)
1341 return (EEXIST);
1342
1343 return (0);
1344 }
1345
1346 /* ARGSUSED */
1347 static void
1348 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1349 {
1350 dsl_dataset_t *ds = arg1;
1351 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1352
1353 /* Mark it as inconsistent on-disk, in case we crash */
1354 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1355 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1356
1357 spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1358 cr, "dataset = %llu", ds->ds_object);
1359 }
1360
1361 /* ARGSUSED */
1362 int
1363 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1364 {
1365 dsl_dataset_t *ds = arg1;
1366
1367 /* we have an owner hold, so noone else can destroy us */
1368 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1369
1370 /* Can't delete a branch point. */
1371 if (ds->ds_phys->ds_num_children > 1)
1372 return (EEXIST);
1373
1374 /*
1375 * Can't delete a head dataset if there are snapshots of it.
1376 * (Except if the only snapshots are from the branch we cloned
1377 * from.)
1378 */
1379 if (ds->ds_prev != NULL &&
1380 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1381 return (EINVAL);
1382
1383 /*
1384 * If we made changes this txg, traverse_dsl_dataset won't find
1385 * them. Try again.
1386 */
1387 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1388 return (EAGAIN);
1389
1390 /* XXX we should do some i/o error checking... */
1391 return (0);
1392 }
1393
1394 struct refsarg {
1395 kmutex_t lock;
1396 boolean_t gone;
1397 kcondvar_t cv;
1398 };
1399
1400 /* ARGSUSED */
1401 static void
1402 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1403 {
1404 struct refsarg *arg = argv;
1405
1406 mutex_enter(&arg->lock);
1407 arg->gone = TRUE;
1408 cv_signal(&arg->cv);
1409 mutex_exit(&arg->lock);
1410 }
1411
1412 static void
1413 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1414 {
1415 struct refsarg arg;
1416
1417 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1418 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1419 arg.gone = FALSE;
1420 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1421 dsl_dataset_refs_gone);
1422 dmu_buf_rele(ds->ds_dbuf, tag);
1423 mutex_enter(&arg.lock);
1424 while (!arg.gone)
1425 cv_wait(&arg.cv, &arg.lock);
1426 ASSERT(arg.gone);
1427 mutex_exit(&arg.lock);
1428 ds->ds_dbuf = NULL;
1429 ds->ds_phys = NULL;
1430 mutex_destroy(&arg.lock);
1431 cv_destroy(&arg.cv);
1432 }
1433
1434 void
1435 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1436 {
1437 dsl_dataset_t *ds = arg1;
1438 zio_t *zio;
1439 int err;
1440 int after_branch_point = FALSE;
1441 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1442 objset_t *mos = dp->dp_meta_objset;
1443 dsl_dataset_t *ds_prev = NULL;
1444 uint64_t obj;
1445
1446 ASSERT(ds->ds_owner);
1447 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
1448 ASSERT(ds->ds_prev == NULL ||
1449 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1450 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1451
1452 /* signal any waiters that this dataset is going away */
1453 mutex_enter(&ds->ds_lock);
1454 ds->ds_owner = dsl_reaper;
1455 cv_broadcast(&ds->ds_exclusive_cv);
1456 mutex_exit(&ds->ds_lock);
1457
1458 /* Remove our reservation */
1459 if (ds->ds_reserved != 0) {
1460 uint64_t val = 0;
1461 dsl_dataset_set_reservation_sync(ds, &val, cr, tx);
1462 ASSERT3U(ds->ds_reserved, ==, 0);
1463 }
1464
1465 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1466
1467 dsl_pool_ds_destroyed(ds, tx);
1468
1469 obj = ds->ds_object;
1470
1471 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1472 if (ds->ds_prev) {
1473 ds_prev = ds->ds_prev;
1474 } else {
1475 VERIFY(0 == dsl_dataset_hold_obj(dp,
1476 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1477 }
1478 after_branch_point =
1479 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1480
1481 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1482 if (after_branch_point &&
1483 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1484 VERIFY(0 == zap_remove_int(mos,
1485 ds_prev->ds_phys->ds_next_clones_obj, obj, tx));
1486 if (ds->ds_phys->ds_next_snap_obj != 0) {
1487 VERIFY(0 == zap_add_int(mos,
1488 ds_prev->ds_phys->ds_next_clones_obj,
1489 ds->ds_phys->ds_next_snap_obj, tx));
1490 }
1491 }
1492 if (after_branch_point &&
1493 ds->ds_phys->ds_next_snap_obj == 0) {
1494 /* This clone is toast. */
1495 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1496 ds_prev->ds_phys->ds_num_children--;
1497 } else if (!after_branch_point) {
1498 ds_prev->ds_phys->ds_next_snap_obj =
1499 ds->ds_phys->ds_next_snap_obj;
1500 }
1501 }
1502
1503 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1504
1505 if (ds->ds_phys->ds_next_snap_obj != 0) {
1506 blkptr_t bp;
1507 dsl_dataset_t *ds_next;
1508 uint64_t itor = 0;
1509 uint64_t old_unique;
1510 int64_t used = 0, compressed = 0, uncompressed = 0;
1511
1512 VERIFY(0 == dsl_dataset_hold_obj(dp,
1513 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1514 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1515
1516 old_unique = dsl_dataset_unique(ds_next);
1517
1518 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1519 ds_next->ds_phys->ds_prev_snap_obj =
1520 ds->ds_phys->ds_prev_snap_obj;
1521 ds_next->ds_phys->ds_prev_snap_txg =
1522 ds->ds_phys->ds_prev_snap_txg;
1523 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1524 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1525
1526 /*
1527 * Transfer to our deadlist (which will become next's
1528 * new deadlist) any entries from next's current
1529 * deadlist which were born before prev, and free the
1530 * other entries.
1531 *
1532 * XXX we're doing this long task with the config lock held
1533 */
1534 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1535 if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1536 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1537 &bp, tx));
1538 if (ds_prev && !after_branch_point &&
1539 bp.blk_birth >
1540 ds_prev->ds_phys->ds_prev_snap_txg) {
1541 ds_prev->ds_phys->ds_unique_bytes +=
1542 bp_get_dasize(dp->dp_spa, &bp);
1543 }
1544 } else {
1545 used += bp_get_dasize(dp->dp_spa, &bp);
1546 compressed += BP_GET_PSIZE(&bp);
1547 uncompressed += BP_GET_UCSIZE(&bp);
1548 /* XXX check return value? */
1549 (void) dsl_free(zio, dp, tx->tx_txg,
1550 &bp, NULL, NULL, ARC_NOWAIT);
1551 }
1552 }
1553
1554 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1555
1556 /* change snapused */
1557 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1558 -used, -compressed, -uncompressed, tx);
1559
1560 /* free next's deadlist */
1561 bplist_close(&ds_next->ds_deadlist);
1562 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1563
1564 /* set next's deadlist to our deadlist */
1565 bplist_close(&ds->ds_deadlist);
1566 ds_next->ds_phys->ds_deadlist_obj =
1567 ds->ds_phys->ds_deadlist_obj;
1568 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1569 ds_next->ds_phys->ds_deadlist_obj));
1570 ds->ds_phys->ds_deadlist_obj = 0;
1571
1572 if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1573 /*
1574 * Update next's unique to include blocks which
1575 * were previously shared by only this snapshot
1576 * and it. Those blocks will be born after the
1577 * prev snap and before this snap, and will have
1578 * died after the next snap and before the one
1579 * after that (ie. be on the snap after next's
1580 * deadlist).
1581 *
1582 * XXX we're doing this long task with the
1583 * config lock held
1584 */
1585 dsl_dataset_t *ds_after_next;
1586 uint64_t space;
1587
1588 VERIFY(0 == dsl_dataset_hold_obj(dp,
1589 ds_next->ds_phys->ds_next_snap_obj,
1590 FTAG, &ds_after_next));
1591
1592 VERIFY(0 ==
1593 bplist_space_birthrange(&ds_after_next->ds_deadlist,
1594 ds->ds_phys->ds_prev_snap_txg,
1595 ds->ds_phys->ds_creation_txg, &space));
1596 ds_next->ds_phys->ds_unique_bytes += space;
1597
1598 dsl_dataset_rele(ds_after_next, FTAG);
1599 ASSERT3P(ds_next->ds_prev, ==, NULL);
1600 } else {
1601 ASSERT3P(ds_next->ds_prev, ==, ds);
1602 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1603 ds_next->ds_prev = NULL;
1604 if (ds_prev) {
1605 VERIFY(0 == dsl_dataset_get_ref(dp,
1606 ds->ds_phys->ds_prev_snap_obj,
1607 ds_next, &ds_next->ds_prev));
1608 }
1609
1610 dsl_dataset_recalc_head_uniq(ds_next);
1611
1612 /*
1613 * Reduce the amount of our unconsmed refreservation
1614 * being charged to our parent by the amount of
1615 * new unique data we have gained.
1616 */
1617 if (old_unique < ds_next->ds_reserved) {
1618 int64_t mrsdelta;
1619 uint64_t new_unique =
1620 ds_next->ds_phys->ds_unique_bytes;
1621
1622 ASSERT(old_unique <= new_unique);
1623 mrsdelta = MIN(new_unique - old_unique,
1624 ds_next->ds_reserved - old_unique);
1625 dsl_dir_diduse_space(ds->ds_dir,
1626 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1627 }
1628 }
1629 dsl_dataset_rele(ds_next, FTAG);
1630 } else {
1631 /*
1632 * There's no next snapshot, so this is a head dataset.
1633 * Destroy the deadlist. Unless it's a clone, the
1634 * deadlist should be empty. (If it's a clone, it's
1635 * safe to ignore the deadlist contents.)
1636 */
1637 struct killarg ka;
1638
1639 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1640 bplist_close(&ds->ds_deadlist);
1641 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1642 ds->ds_phys->ds_deadlist_obj = 0;
1643
1644 /*
1645 * Free everything that we point to (that's born after
1646 * the previous snapshot, if we are a clone)
1647 *
1648 * NB: this should be very quick, because we already
1649 * freed all the objects in open context.
1650 */
1651 ka.ds = ds;
1652 ka.zio = zio;
1653 ka.tx = tx;
1654 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1655 TRAVERSE_POST, kill_blkptr, &ka);
1656 ASSERT3U(err, ==, 0);
1657 ASSERT(spa_version(dp->dp_spa) < SPA_VERSION_UNIQUE_ACCURATE ||
1658 ds->ds_phys->ds_unique_bytes == 0);
1659 }
1660
1661 err = zio_wait(zio);
1662 ASSERT3U(err, ==, 0);
1663
1664 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1665 /* Erase the link in the dir */
1666 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1667 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1668 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1669 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1670 ASSERT(err == 0);
1671 } else {
1672 /* remove from snapshot namespace */
1673 dsl_dataset_t *ds_head;
1674 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1675 VERIFY(0 == dsl_dataset_hold_obj(dp,
1676 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1677 VERIFY(0 == dsl_dataset_get_snapname(ds));
1678 #ifdef ZFS_DEBUG
1679 {
1680 uint64_t val;
1681
1682 err = dsl_dataset_snap_lookup(ds_head,
1683 ds->ds_snapname, &val);
1684 ASSERT3U(err, ==, 0);
1685 ASSERT3U(val, ==, obj);
1686 }
1687 #endif
1688 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1689 ASSERT(err == 0);
1690 dsl_dataset_rele(ds_head, FTAG);
1691 }
1692
1693 if (ds_prev && ds->ds_prev != ds_prev)
1694 dsl_dataset_rele(ds_prev, FTAG);
1695
1696 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1697 spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1698 cr, "dataset = %llu", ds->ds_object);
1699
1700 if (ds->ds_phys->ds_next_clones_obj != 0) {
1701 uint64_t count;
1702 ASSERT(0 == zap_count(mos,
1703 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1704 VERIFY(0 == dmu_object_free(mos,
1705 ds->ds_phys->ds_next_clones_obj, tx));
1706 }
1707 if (ds->ds_phys->ds_props_obj != 0)
1708 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1709 dsl_dir_close(ds->ds_dir, ds);
1710 ds->ds_dir = NULL;
1711 dsl_dataset_drain_refs(ds, tag);
1712 VERIFY(0 == dmu_object_free(mos, obj, tx));
1713 }
1714
1715 static int
1716 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1717 {
1718 uint64_t asize;
1719
1720 if (!dmu_tx_is_syncing(tx))
1721 return (0);
1722
1723 /*
1724 * If there's an fs-only reservation, any blocks that might become
1725 * owned by the snapshot dataset must be accommodated by space
1726 * outside of the reservation.
1727 */
1728 asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1729 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1730 return (ENOSPC);
1731
1732 /*
1733 * Propogate any reserved space for this snapshot to other
1734 * snapshot checks in this sync group.
1735 */
1736 if (asize > 0)
1737 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1738
1739 return (0);
1740 }
1741
1742 /* ARGSUSED */
1743 int
1744 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1745 {
1746 dsl_dataset_t *ds = arg1;
1747 const char *snapname = arg2;
1748 int err;
1749 uint64_t value;
1750
1751 /*
1752 * We don't allow multiple snapshots of the same txg. If there
1753 * is already one, try again.
1754 */
1755 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1756 return (EAGAIN);
1757
1758 /*
1759 * Check for conflicting name snapshot name.
1760 */
1761 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1762 if (err == 0)
1763 return (EEXIST);
1764 if (err != ENOENT)
1765 return (err);
1766
1767 /*
1768 * Check that the dataset's name is not too long. Name consists
1769 * of the dataset's length + 1 for the @-sign + snapshot name's length
1770 */
1771 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1772 return (ENAMETOOLONG);
1773
1774 err = dsl_dataset_snapshot_reserve_space(ds, tx);
1775 if (err)
1776 return (err);
1777
1778 ds->ds_trysnap_txg = tx->tx_txg;
1779 return (0);
1780 }
1781
1782 void
1783 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1784 {
1785 dsl_dataset_t *ds = arg1;
1786 const char *snapname = arg2;
1787 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1788 dmu_buf_t *dbuf;
1789 dsl_dataset_phys_t *dsphys;
1790 uint64_t dsobj, crtxg;
1791 objset_t *mos = dp->dp_meta_objset;
1792 int err;
1793
1794 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1795
1796 /*
1797 * The origin's ds_creation_txg has to be < TXG_INITIAL
1798 */
1799 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1800 crtxg = 1;
1801 else
1802 crtxg = tx->tx_txg;
1803
1804 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1805 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1806 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1807 dmu_buf_will_dirty(dbuf, tx);
1808 dsphys = dbuf->db_data;
1809 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1810 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1811 dsphys->ds_fsid_guid = unique_create();
1812 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1813 sizeof (dsphys->ds_guid));
1814 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1815 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1816 dsphys->ds_next_snap_obj = ds->ds_object;
1817 dsphys->ds_num_children = 1;
1818 dsphys->ds_creation_time = gethrestime_sec();
1819 dsphys->ds_creation_txg = crtxg;
1820 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1821 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1822 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1823 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1824 dsphys->ds_flags = ds->ds_phys->ds_flags;
1825 dsphys->ds_bp = ds->ds_phys->ds_bp;
1826 dmu_buf_rele(dbuf, FTAG);
1827
1828 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1829 if (ds->ds_prev) {
1830 uint64_t next_clones_obj =
1831 ds->ds_prev->ds_phys->ds_next_clones_obj;
1832 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1833 ds->ds_object ||
1834 ds->ds_prev->ds_phys->ds_num_children > 1);
1835 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1836 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1837 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1838 ds->ds_prev->ds_phys->ds_creation_txg);
1839 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1840 } else if (next_clones_obj != 0) {
1841 VERIFY3U(0, ==, zap_remove_int(mos,
1842 next_clones_obj, dsphys->ds_next_snap_obj, tx));
1843 VERIFY3U(0, ==, zap_add_int(mos,
1844 next_clones_obj, dsobj, tx));
1845 }
1846 }
1847
1848 /*
1849 * If we have a reference-reservation on this dataset, we will
1850 * need to increase the amount of refreservation being charged
1851 * since our unique space is going to zero.
1852 */
1853 if (ds->ds_reserved) {
1854 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1855 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1856 add, 0, 0, tx);
1857 }
1858
1859 bplist_close(&ds->ds_deadlist);
1860 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1861 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1862 ds->ds_phys->ds_prev_snap_obj = dsobj;
1863 ds->ds_phys->ds_prev_snap_txg = crtxg;
1864 ds->ds_phys->ds_unique_bytes = 0;
1865 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1866 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1867 ds->ds_phys->ds_deadlist_obj =
1868 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1869 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1870 ds->ds_phys->ds_deadlist_obj));
1871
1872 dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1873 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1874 snapname, 8, 1, &dsobj, tx);
1875 ASSERT(err == 0);
1876
1877 if (ds->ds_prev)
1878 dsl_dataset_drop_ref(ds->ds_prev, ds);
1879 VERIFY(0 == dsl_dataset_get_ref(dp,
1880 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1881
1882 dsl_pool_ds_snapshotted(ds, tx);
1883
1884 spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1885 "dataset = %llu", dsobj);
1886 }
1887
1888 void
1889 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1890 {
1891 ASSERT(dmu_tx_is_syncing(tx));
1892 ASSERT(ds->ds_user_ptr != NULL);
1893 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1894
1895 /*
1896 * in case we had to change ds_fsid_guid when we opened it,
1897 * sync it out now.
1898 */
1899 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1900 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1901
1902 dsl_dir_dirty(ds->ds_dir, tx);
1903 dmu_objset_sync(ds->ds_user_ptr, zio, tx);
1904 }
1905
1906 void
1907 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1908 {
1909 uint64_t refd, avail, uobjs, aobjs;
1910
1911 dsl_dir_stats(ds->ds_dir, nv);
1912
1913 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1914 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1915 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1916
1917 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1918 ds->ds_phys->ds_creation_time);
1919 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1920 ds->ds_phys->ds_creation_txg);
1921 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1922 ds->ds_quota);
1923 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1924 ds->ds_reserved);
1925 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1926 ds->ds_phys->ds_guid);
1927
1928 if (ds->ds_phys->ds_next_snap_obj) {
1929 /*
1930 * This is a snapshot; override the dd's space used with
1931 * our unique space and compression ratio.
1932 */
1933 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1934 ds->ds_phys->ds_unique_bytes);
1935 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1936 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
1937 (ds->ds_phys->ds_uncompressed_bytes * 100 /
1938 ds->ds_phys->ds_compressed_bytes));
1939 }
1940 }
1941
1942 void
1943 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
1944 {
1945 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
1946 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
1947 stat->dds_guid = ds->ds_phys->ds_guid;
1948 if (ds->ds_phys->ds_next_snap_obj) {
1949 stat->dds_is_snapshot = B_TRUE;
1950 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
1951 } else {
1952 stat->dds_is_snapshot = B_FALSE;
1953 stat->dds_num_clones = 0;
1954 }
1955
1956 /* clone origin is really a dsl_dir thing... */
1957 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
1958 if (dsl_dir_is_clone(ds->ds_dir)) {
1959 dsl_dataset_t *ods;
1960
1961 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
1962 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
1963 dsl_dataset_name(ods, stat->dds_origin);
1964 dsl_dataset_drop_ref(ods, FTAG);
1965 } else {
1966 stat->dds_origin[0] = '\0';
1967 }
1968 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
1969 }
1970
1971 uint64_t
1972 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
1973 {
1974 return (ds->ds_fsid_guid);
1975 }
1976
1977 void
1978 dsl_dataset_space(dsl_dataset_t *ds,
1979 uint64_t *refdbytesp, uint64_t *availbytesp,
1980 uint64_t *usedobjsp, uint64_t *availobjsp)
1981 {
1982 *refdbytesp = ds->ds_phys->ds_used_bytes;
1983 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
1984 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
1985 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
1986 if (ds->ds_quota != 0) {
1987 /*
1988 * Adjust available bytes according to refquota
1989 */
1990 if (*refdbytesp < ds->ds_quota)
1991 *availbytesp = MIN(*availbytesp,
1992 ds->ds_quota - *refdbytesp);
1993 else
1994 *availbytesp = 0;
1995 }
1996 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
1997 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
1998 }
1999
2000 boolean_t
2001 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2002 {
2003 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2004
2005 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2006 dsl_pool_sync_context(dp));
2007 if (ds->ds_prev == NULL)
2008 return (B_FALSE);
2009 if (ds->ds_phys->ds_bp.blk_birth >
2010 ds->ds_prev->ds_phys->ds_creation_txg)
2011 return (B_TRUE);
2012 return (B_FALSE);
2013 }
2014
2015 /* ARGSUSED */
2016 static int
2017 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2018 {
2019 dsl_dataset_t *ds = arg1;
2020 char *newsnapname = arg2;
2021 dsl_dir_t *dd = ds->ds_dir;
2022 dsl_dataset_t *hds;
2023 uint64_t val;
2024 int err;
2025
2026 err = dsl_dataset_hold_obj(dd->dd_pool,
2027 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2028 if (err)
2029 return (err);
2030
2031 /* new name better not be in use */
2032 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2033 dsl_dataset_rele(hds, FTAG);
2034
2035 if (err == 0)
2036 err = EEXIST;
2037 else if (err == ENOENT)
2038 err = 0;
2039
2040 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2041 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2042 err = ENAMETOOLONG;
2043
2044 return (err);
2045 }
2046
2047 static void
2048 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2049 cred_t *cr, dmu_tx_t *tx)
2050 {
2051 dsl_dataset_t *ds = arg1;
2052 const char *newsnapname = arg2;
2053 dsl_dir_t *dd = ds->ds_dir;
2054 objset_t *mos = dd->dd_pool->dp_meta_objset;
2055 dsl_dataset_t *hds;
2056 int err;
2057
2058 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2059
2060 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2061 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2062
2063 VERIFY(0 == dsl_dataset_get_snapname(ds));
2064 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2065 ASSERT3U(err, ==, 0);
2066 mutex_enter(&ds->ds_lock);
2067 (void) strcpy(ds->ds_snapname, newsnapname);
2068 mutex_exit(&ds->ds_lock);
2069 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2070 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2071 ASSERT3U(err, ==, 0);
2072
2073 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2074 cr, "dataset = %llu", ds->ds_object);
2075 dsl_dataset_rele(hds, FTAG);
2076 }
2077
2078 struct renamesnaparg {
2079 dsl_sync_task_group_t *dstg;
2080 char failed[MAXPATHLEN];
2081 char *oldsnap;
2082 char *newsnap;
2083 };
2084
2085 static int
2086 dsl_snapshot_rename_one(char *name, void *arg)
2087 {
2088 struct renamesnaparg *ra = arg;
2089 dsl_dataset_t *ds = NULL;
2090 char *cp;
2091 int err;
2092
2093 cp = name + strlen(name);
2094 *cp = '@';
2095 (void) strcpy(cp + 1, ra->oldsnap);
2096
2097 /*
2098 * For recursive snapshot renames the parent won't be changing
2099 * so we just pass name for both the to/from argument.
2100 */
2101 err = zfs_secpolicy_rename_perms(name, name, CRED());
2102 if (err == ENOENT) {
2103 return (0);
2104 } else if (err) {
2105 (void) strcpy(ra->failed, name);
2106 return (err);
2107 }
2108
2109 #ifdef _KERNEL
2110 /*
2111 * For all filesystems undergoing rename, we'll need to unmount it.
2112 */
2113 (void) zfs_unmount_snap(name, NULL);
2114 #endif
2115 err = dsl_dataset_hold(name, ra->dstg, &ds);
2116 *cp = '\0';
2117 if (err == ENOENT) {
2118 return (0);
2119 } else if (err) {
2120 (void) strcpy(ra->failed, name);
2121 return (err);
2122 }
2123
2124 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2125 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2126
2127 return (0);
2128 }
2129
2130 static int
2131 dsl_recursive_rename(char *oldname, const char *newname)
2132 {
2133 int err;
2134 struct renamesnaparg *ra;
2135 dsl_sync_task_t *dst;
2136 spa_t *spa;
2137 char *cp, *fsname = spa_strdup(oldname);
2138 int len = strlen(oldname);
2139
2140 /* truncate the snapshot name to get the fsname */
2141 cp = strchr(fsname, '@');
2142 *cp = '\0';
2143
2144 err = spa_open(fsname, &spa, FTAG);
2145 if (err) {
2146 kmem_free(fsname, len + 1);
2147 return (err);
2148 }
2149 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2150 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2151
2152 ra->oldsnap = strchr(oldname, '@') + 1;
2153 ra->newsnap = strchr(newname, '@') + 1;
2154 *ra->failed = '\0';
2155
2156 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2157 DS_FIND_CHILDREN);
2158 kmem_free(fsname, len + 1);
2159
2160 if (err == 0) {
2161 err = dsl_sync_task_group_wait(ra->dstg);
2162 }
2163
2164 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2165 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2166 dsl_dataset_t *ds = dst->dst_arg1;
2167 if (dst->dst_err) {
2168 dsl_dir_name(ds->ds_dir, ra->failed);
2169 (void) strcat(ra->failed, "@");
2170 (void) strcat(ra->failed, ra->newsnap);
2171 }
2172 dsl_dataset_rele(ds, ra->dstg);
2173 }
2174
2175 if (err)
2176 (void) strcpy(oldname, ra->failed);
2177
2178 dsl_sync_task_group_destroy(ra->dstg);
2179 kmem_free(ra, sizeof (struct renamesnaparg));
2180 spa_close(spa, FTAG);
2181 return (err);
2182 }
2183
2184 static int
2185 dsl_valid_rename(char *oldname, void *arg)
2186 {
2187 int delta = *(int *)arg;
2188
2189 if (strlen(oldname) + delta >= MAXNAMELEN)
2190 return (ENAMETOOLONG);
2191
2192 return (0);
2193 }
2194
2195 #pragma weak dmu_objset_rename = dsl_dataset_rename
2196 int
2197 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2198 {
2199 dsl_dir_t *dd;
2200 dsl_dataset_t *ds;
2201 const char *tail;
2202 int err;
2203
2204 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2205 if (err)
2206 return (err);
2207 /*
2208 * If there are more than 2 references there may be holds
2209 * hanging around that haven't been cleared out yet.
2210 */
2211 if (dmu_buf_refcount(dd->dd_dbuf) > 2)
2212 txg_wait_synced(dd->dd_pool, 0);
2213 if (tail == NULL) {
2214 int delta = strlen(newname) - strlen(oldname);
2215
2216 /* if we're growing, validate child name lengths */
2217 if (delta > 0)
2218 err = dmu_objset_find(oldname, dsl_valid_rename,
2219 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2220
2221 if (!err)
2222 err = dsl_dir_rename(dd, newname);
2223 dsl_dir_close(dd, FTAG);
2224 return (err);
2225 }
2226 if (tail[0] != '@') {
2227 /* the name ended in a nonexistant component */
2228 dsl_dir_close(dd, FTAG);
2229 return (ENOENT);
2230 }
2231
2232 dsl_dir_close(dd, FTAG);
2233
2234 /* new name must be snapshot in same filesystem */
2235 tail = strchr(newname, '@');
2236 if (tail == NULL)
2237 return (EINVAL);
2238 tail++;
2239 if (strncmp(oldname, newname, tail - newname) != 0)
2240 return (EXDEV);
2241
2242 if (recursive) {
2243 err = dsl_recursive_rename(oldname, newname);
2244 } else {
2245 err = dsl_dataset_hold(oldname, FTAG, &ds);
2246 if (err)
2247 return (err);
2248
2249 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2250 dsl_dataset_snapshot_rename_check,
2251 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2252
2253 dsl_dataset_rele(ds, FTAG);
2254 }
2255
2256 return (err);
2257 }
2258
2259 struct promotenode {
2260 list_node_t link;
2261 dsl_dataset_t *ds;
2262 };
2263
2264 struct promotearg {
2265 list_t shared_snaps, origin_snaps, clone_snaps;
2266 dsl_dataset_t *origin_origin, *origin_head;
2267 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2268 };
2269
2270 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2271
2272 /* ARGSUSED */
2273 static int
2274 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2275 {
2276 dsl_dataset_t *hds = arg1;
2277 struct promotearg *pa = arg2;
2278 struct promotenode *snap = list_head(&pa->shared_snaps);
2279 dsl_dataset_t *origin_ds = snap->ds;
2280 int err;
2281
2282 /* Check that it is a real clone */
2283 if (!dsl_dir_is_clone(hds->ds_dir))
2284 return (EINVAL);
2285
2286 /* Since this is so expensive, don't do the preliminary check */
2287 if (!dmu_tx_is_syncing(tx))
2288 return (0);
2289
2290 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2291 return (EXDEV);
2292
2293 /* compute origin's new unique space */
2294 snap = list_tail(&pa->clone_snaps);
2295 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2296 err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2297 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2298 if (err)
2299 return (err);
2300
2301 /*
2302 * Walk the snapshots that we are moving
2303 *
2304 * Compute space to transfer. Consider the incremental changes
2305 * to used for each snapshot:
2306 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2307 * So each snapshot gave birth to:
2308 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2309 * So a sequence would look like:
2310 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2311 * Which simplifies to:
2312 * uN + kN + kN-1 + ... + k1 + k0
2313 * Note however, if we stop before we reach the ORIGIN we get:
2314 * uN + kN + kN-1 + ... + kM - uM-1
2315 */
2316 pa->used = origin_ds->ds_phys->ds_used_bytes;
2317 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2318 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2319 for (snap = list_head(&pa->shared_snaps); snap;
2320 snap = list_next(&pa->shared_snaps, snap)) {
2321 uint64_t val, dlused, dlcomp, dluncomp;
2322 dsl_dataset_t *ds = snap->ds;
2323
2324 /* Check that the snapshot name does not conflict */
2325 VERIFY(0 == dsl_dataset_get_snapname(ds));
2326 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2327 if (err == 0)
2328 return (EEXIST);
2329 if (err != ENOENT)
2330 return (err);
2331
2332 /* The very first snapshot does not have a deadlist */
2333 if (ds->ds_phys->ds_prev_snap_obj == 0)
2334 continue;
2335
2336 if (err = bplist_space(&ds->ds_deadlist,
2337 &dlused, &dlcomp, &dluncomp))
2338 return (err);
2339 pa->used += dlused;
2340 pa->comp += dlcomp;
2341 pa->uncomp += dluncomp;
2342 }
2343
2344 /*
2345 * If we are a clone of a clone then we never reached ORIGIN,
2346 * so we need to subtract out the clone origin's used space.
2347 */
2348 if (pa->origin_origin) {
2349 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2350 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2351 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2352 }
2353
2354 /* Check that there is enough space here */
2355 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2356 pa->used);
2357 if (err)
2358 return (err);
2359
2360 /*
2361 * Compute the amounts of space that will be used by snapshots
2362 * after the promotion (for both origin and clone). For each,
2363 * it is the amount of space that will be on all of their
2364 * deadlists (that was not born before their new origin).
2365 */
2366 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2367 uint64_t space;
2368
2369 /*
2370 * Note, typically this will not be a clone of a clone,
2371 * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so
2372 * these snaplist_space() -> bplist_space_birthrange()
2373 * calls will be fast because they do not have to
2374 * iterate over all bps.
2375 */
2376 snap = list_head(&pa->origin_snaps);
2377 err = snaplist_space(&pa->shared_snaps,
2378 snap->ds->ds_origin_txg, &pa->cloneusedsnap);
2379 if (err)
2380 return (err);
2381
2382 err = snaplist_space(&pa->clone_snaps,
2383 snap->ds->ds_origin_txg, &space);
2384 if (err)
2385 return (err);
2386 pa->cloneusedsnap += space;
2387 }
2388 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2389 err = snaplist_space(&pa->origin_snaps,
2390 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2391 if (err)
2392 return (err);
2393 }
2394
2395 return (0);
2396 }
2397
2398 static void
2399 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2400 {
2401 dsl_dataset_t *hds = arg1;
2402 struct promotearg *pa = arg2;
2403 struct promotenode *snap = list_head(&pa->shared_snaps);
2404 dsl_dataset_t *origin_ds = snap->ds;
2405 dsl_dataset_t *origin_head;
2406 dsl_dir_t *dd = hds->ds_dir;
2407 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2408 dsl_dir_t *odd = NULL;
2409 uint64_t oldnext_obj;
2410 int64_t delta;
2411
2412 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2413
2414 snap = list_head(&pa->origin_snaps);
2415 origin_head = snap->ds;
2416
2417 /*
2418 * We need to explicitly open odd, since origin_ds's dd will be
2419 * changing.
2420 */
2421 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2422 NULL, FTAG, &odd));
2423
2424 /* change origin's next snap */
2425 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2426 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2427 snap = list_tail(&pa->clone_snaps);
2428 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2429 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2430
2431 /* change the origin's next clone */
2432 if (origin_ds->ds_phys->ds_next_clones_obj) {
2433 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2434 origin_ds->ds_phys->ds_next_clones_obj,
2435 origin_ds->ds_phys->ds_next_snap_obj, tx));
2436 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2437 origin_ds->ds_phys->ds_next_clones_obj,
2438 oldnext_obj, tx));
2439 }
2440
2441 /* change origin */
2442 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2443 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2444 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2445 hds->ds_origin_txg = origin_head->ds_origin_txg;
2446 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2447 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2448 origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg;
2449
2450 /* move snapshots to this dir */
2451 for (snap = list_head(&pa->shared_snaps); snap;
2452 snap = list_next(&pa->shared_snaps, snap)) {
2453 dsl_dataset_t *ds = snap->ds;
2454
2455 /* unregister props as dsl_dir is changing */
2456 if (ds->ds_user_ptr) {
2457 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
2458 ds->ds_user_ptr = NULL;
2459 }
2460 /* move snap name entry */
2461 VERIFY(0 == dsl_dataset_get_snapname(ds));
2462 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2463 ds->ds_snapname, tx));
2464 VERIFY(0 == zap_add(dp->dp_meta_objset,
2465 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2466 8, 1, &ds->ds_object, tx));
2467 /* change containing dsl_dir */
2468 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2469 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2470 ds->ds_phys->ds_dir_obj = dd->dd_object;
2471 ASSERT3P(ds->ds_dir, ==, odd);
2472 dsl_dir_close(ds->ds_dir, ds);
2473 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2474 NULL, ds, &ds->ds_dir));
2475
2476 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2477 }
2478
2479 /*
2480 * Change space accounting.
2481 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2482 * both be valid, or both be 0 (resulting in delta == 0). This
2483 * is true for each of {clone,origin} independently.
2484 */
2485
2486 delta = pa->cloneusedsnap -
2487 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2488 ASSERT3S(delta, >=, 0);
2489 ASSERT3U(pa->used, >=, delta);
2490 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2491 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2492 pa->used - delta, pa->comp, pa->uncomp, tx);
2493
2494 delta = pa->originusedsnap -
2495 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2496 ASSERT3S(delta, <=, 0);
2497 ASSERT3U(pa->used, >=, -delta);
2498 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2499 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2500 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2501
2502 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2503
2504 /* log history record */
2505 spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2506 cr, "dataset = %llu", hds->ds_object);
2507
2508 dsl_dir_close(odd, FTAG);
2509 }
2510
2511 static char *snaplist_tag = "snaplist";
2512 /*
2513 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2514 * (exclusive) and last_obj (inclusive). The list will be in reverse
2515 * order (last_obj will be the list_head()). If first_obj == 0, do all
2516 * snapshots back to this dataset's origin.
2517 */
2518 static int
2519 snaplist_make(dsl_pool_t *dp, boolean_t own,
2520 uint64_t first_obj, uint64_t last_obj, list_t *l)
2521 {
2522 uint64_t obj = last_obj;
2523
2524 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2525
2526 list_create(l, sizeof (struct promotenode),
2527 offsetof(struct promotenode, link));
2528
2529 while (obj != first_obj) {
2530 dsl_dataset_t *ds;
2531 struct promotenode *snap;
2532 int err;
2533
2534 if (own) {
2535 err = dsl_dataset_own_obj(dp, obj,
2536 0, snaplist_tag, &ds);
2537 if (err == 0)
2538 dsl_dataset_make_exclusive(ds, snaplist_tag);
2539 } else {
2540 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2541 }
2542 if (err == ENOENT) {
2543 /* lost race with snapshot destroy */
2544 struct promotenode *last = list_tail(l);
2545 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2546 obj = last->ds->ds_phys->ds_prev_snap_obj;
2547 continue;
2548 } else if (err) {
2549 return (err);
2550 }
2551
2552 if (first_obj == 0)
2553 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2554
2555 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2556 snap->ds = ds;
2557 list_insert_tail(l, snap);
2558 obj = ds->ds_phys->ds_prev_snap_obj;
2559 }
2560
2561 return (0);
2562 }
2563
2564 static int
2565 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2566 {
2567 struct promotenode *snap;
2568
2569 *spacep = 0;
2570 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2571 uint64_t used;
2572 int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2573 mintxg, UINT64_MAX, &used);
2574 if (err)
2575 return (err);
2576 *spacep += used;
2577 }
2578 return (0);
2579 }
2580
2581 static void
2582 snaplist_destroy(list_t *l, boolean_t own)
2583 {
2584 struct promotenode *snap;
2585
2586 if (!list_link_active(&l->list_head))
2587 return;
2588
2589 while ((snap = list_tail(l)) != NULL) {
2590 list_remove(l, snap);
2591 if (own)
2592 dsl_dataset_disown(snap->ds, snaplist_tag);
2593 else
2594 dsl_dataset_rele(snap->ds, snaplist_tag);
2595 kmem_free(snap, sizeof (struct promotenode));
2596 }
2597 list_destroy(l);
2598 }
2599
2600 /*
2601 * Promote a clone. Nomenclature note:
2602 * "clone" or "cds": the original clone which is being promoted
2603 * "origin" or "ods": the snapshot which is originally clone's origin
2604 * "origin head" or "ohds": the dataset which is the head
2605 * (filesystem/volume) for the origin
2606 * "origin origin": the origin of the origin's filesystem (typically
2607 * NULL, indicating that the clone is not a clone of a clone).
2608 */
2609 int
2610 dsl_dataset_promote(const char *name)
2611 {
2612 dsl_dataset_t *ds;
2613 dsl_dir_t *dd;
2614 dsl_pool_t *dp;
2615 dmu_object_info_t doi;
2616 struct promotearg pa = { 0 };
2617 struct promotenode *snap;
2618 int err;
2619
2620 err = dsl_dataset_hold(name, FTAG, &ds);
2621 if (err)
2622 return (err);
2623 dd = ds->ds_dir;
2624 dp = dd->dd_pool;
2625
2626 err = dmu_object_info(dp->dp_meta_objset,
2627 ds->ds_phys->ds_snapnames_zapobj, &doi);
2628 if (err) {
2629 dsl_dataset_rele(ds, FTAG);
2630 return (err);
2631 }
2632
2633 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2634 dsl_dataset_rele(ds, FTAG);
2635 return (EINVAL);
2636 }
2637
2638 /*
2639 * We are going to inherit all the snapshots taken before our
2640 * origin (i.e., our new origin will be our parent's origin).
2641 * Take ownership of them so that we can rename them into our
2642 * namespace.
2643 */
2644 rw_enter(&dp->dp_config_rwlock, RW_READER);
2645
2646 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2647 &pa.shared_snaps);
2648 if (err != 0)
2649 goto out;
2650
2651 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2652 if (err != 0)
2653 goto out;
2654
2655 snap = list_head(&pa.shared_snaps);
2656 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2657 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2658 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2659 if (err != 0)
2660 goto out;
2661
2662 if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2663 err = dsl_dataset_own_obj(dp,
2664 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2665 0, FTAG, &pa.origin_origin);
2666 if (err != 0)
2667 goto out;
2668 }
2669
2670 out:
2671 rw_exit(&dp->dp_config_rwlock);
2672
2673 /*
2674 * Add in 128x the snapnames zapobj size, since we will be moving
2675 * a bunch of snapnames to the promoted ds, and dirtying their
2676 * bonus buffers.
2677 */
2678 if (err == 0) {
2679 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2680 dsl_dataset_promote_sync, ds, &pa,
2681 2 + 2 * doi.doi_physical_blks);
2682 }
2683
2684 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2685 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2686 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2687 if (pa.origin_origin)
2688 dsl_dataset_disown(pa.origin_origin, FTAG);
2689 dsl_dataset_rele(ds, FTAG);
2690 return (err);
2691 }
2692
2693 struct cloneswaparg {
2694 dsl_dataset_t *cds; /* clone dataset */
2695 dsl_dataset_t *ohds; /* origin's head dataset */
2696 boolean_t force;
2697 int64_t unused_refres_delta; /* change in unconsumed refreservation */
2698 };
2699
2700 /* ARGSUSED */
2701 static int
2702 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2703 {
2704 struct cloneswaparg *csa = arg1;
2705
2706 /* they should both be heads */
2707 if (dsl_dataset_is_snapshot(csa->cds) ||
2708 dsl_dataset_is_snapshot(csa->ohds))
2709 return (EINVAL);
2710
2711 /* the branch point should be just before them */
2712 if (csa->cds->ds_prev != csa->ohds->ds_prev)
2713 return (EINVAL);
2714
2715 /* cds should be the clone */
2716 if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj !=
2717 csa->ohds->ds_object)
2718 return (EINVAL);
2719
2720 /* the clone should be a child of the origin */
2721 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2722 return (EINVAL);
2723
2724 /* ohds shouldn't be modified unless 'force' */
2725 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2726 return (ETXTBSY);
2727
2728 /* adjust amount of any unconsumed refreservation */
2729 csa->unused_refres_delta =
2730 (int64_t)MIN(csa->ohds->ds_reserved,
2731 csa->ohds->ds_phys->ds_unique_bytes) -
2732 (int64_t)MIN(csa->ohds->ds_reserved,
2733 csa->cds->ds_phys->ds_unique_bytes);
2734
2735 if (csa->unused_refres_delta > 0 &&
2736 csa->unused_refres_delta >
2737 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2738 return (ENOSPC);
2739
2740 return (0);
2741 }
2742
2743 /* ARGSUSED */
2744 static void
2745 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2746 {
2747 struct cloneswaparg *csa = arg1;
2748 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2749
2750 ASSERT(csa->cds->ds_reserved == 0);
2751 ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota);
2752
2753 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2754 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2755 dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx);
2756
2757 if (csa->cds->ds_user_ptr != NULL) {
2758 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr);
2759 csa->cds->ds_user_ptr = NULL;
2760 }
2761
2762 if (csa->ohds->ds_user_ptr != NULL) {
2763 csa->ohds->ds_user_evict_func(csa->ohds,
2764 csa->ohds->ds_user_ptr);
2765 csa->ohds->ds_user_ptr = NULL;
2766 }
2767
2768 /* reset origin's unique bytes */
2769 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2770 csa->cds->ds_prev->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2771 &csa->cds->ds_prev->ds_phys->ds_unique_bytes));
2772
2773 /* swap blkptrs */
2774 {
2775 blkptr_t tmp;
2776 tmp = csa->ohds->ds_phys->ds_bp;
2777 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2778 csa->cds->ds_phys->ds_bp = tmp;
2779 }
2780
2781 /* set dd_*_bytes */
2782 {
2783 int64_t dused, dcomp, duncomp;
2784 uint64_t cdl_used, cdl_comp, cdl_uncomp;
2785 uint64_t odl_used, odl_comp, odl_uncomp;
2786
2787 ASSERT3U(csa->cds->ds_dir->dd_phys->
2788 dd_used_breakdown[DD_USED_SNAP], ==, 0);
2789
2790 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2791 &cdl_comp, &cdl_uncomp));
2792 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2793 &odl_comp, &odl_uncomp));
2794
2795 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2796 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2797 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2798 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2799 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2800 cdl_uncomp -
2801 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2802
2803 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2804 dused, dcomp, duncomp, tx);
2805 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2806 -dused, -dcomp, -duncomp, tx);
2807
2808 /*
2809 * The difference in the space used by snapshots is the
2810 * difference in snapshot space due to the head's
2811 * deadlist (since that's the only thing that's
2812 * changing that affects the snapused).
2813 */
2814 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2815 csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used));
2816 VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2817 csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used));
2818 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2819 DD_USED_HEAD, DD_USED_SNAP, tx);
2820 }
2821
2822 #define SWITCH64(x, y) \
2823 { \
2824 uint64_t __tmp = (x); \
2825 (x) = (y); \
2826 (y) = __tmp; \
2827 }
2828
2829 /* swap ds_*_bytes */
2830 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2831 csa->cds->ds_phys->ds_used_bytes);
2832 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2833 csa->cds->ds_phys->ds_compressed_bytes);
2834 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2835 csa->cds->ds_phys->ds_uncompressed_bytes);
2836 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2837 csa->cds->ds_phys->ds_unique_bytes);
2838
2839 /* apply any parent delta for change in unconsumed refreservation */
2840 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2841 csa->unused_refres_delta, 0, 0, tx);
2842
2843 /* swap deadlists */
2844 bplist_close(&csa->cds->ds_deadlist);
2845 bplist_close(&csa->ohds->ds_deadlist);
2846 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2847 csa->cds->ds_phys->ds_deadlist_obj);
2848 VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2849 csa->cds->ds_phys->ds_deadlist_obj));
2850 VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2851 csa->ohds->ds_phys->ds_deadlist_obj));
2852
2853 dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx);
2854 }
2855
2856 /*
2857 * Swap 'clone' with its origin head file system. Used at the end
2858 * of "online recv" to swizzle the file system to the new version.
2859 */
2860 int
2861 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2862 boolean_t force)
2863 {
2864 struct cloneswaparg csa;
2865 int error;
2866
2867 ASSERT(clone->ds_owner);
2868 ASSERT(origin_head->ds_owner);
2869 retry:
2870 /* Need exclusive access for the swap */
2871 rw_enter(&clone->ds_rwlock, RW_WRITER);
2872 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2873 rw_exit(&clone->ds_rwlock);
2874 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2875 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2876 rw_exit(&origin_head->ds_rwlock);
2877 goto retry;
2878 }
2879 }
2880 csa.cds = clone;
2881 csa.ohds = origin_head;
2882 csa.force = force;
2883 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2884 dsl_dataset_clone_swap_check,
2885 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2886 return (error);
2887 }
2888
2889 /*
2890 * Given a pool name and a dataset object number in that pool,
2891 * return the name of that dataset.
2892 */
2893 int
2894 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2895 {
2896 spa_t *spa;
2897 dsl_pool_t *dp;
2898 dsl_dataset_t *ds;
2899 int error;
2900
2901 if ((error = spa_open(pname, &spa, FTAG)) != 0)
2902 return (error);
2903 dp = spa_get_dsl(spa);
2904 rw_enter(&dp->dp_config_rwlock, RW_READER);
2905 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2906 dsl_dataset_name(ds, buf);
2907 dsl_dataset_rele(ds, FTAG);
2908 }
2909 rw_exit(&dp->dp_config_rwlock);
2910 spa_close(spa, FTAG);
2911
2912 return (error);
2913 }
2914
2915 int
2916 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
2917 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
2918 {
2919 int error = 0;
2920
2921 ASSERT3S(asize, >, 0);
2922
2923 /*
2924 * *ref_rsrv is the portion of asize that will come from any
2925 * unconsumed refreservation space.
2926 */
2927 *ref_rsrv = 0;
2928
2929 mutex_enter(&ds->ds_lock);
2930 /*
2931 * Make a space adjustment for reserved bytes.
2932 */
2933 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
2934 ASSERT3U(*used, >=,
2935 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2936 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2937 *ref_rsrv =
2938 asize - MIN(asize, parent_delta(ds, asize + inflight));
2939 }
2940
2941 if (!check_quota || ds->ds_quota == 0) {
2942 mutex_exit(&ds->ds_lock);
2943 return (0);
2944 }
2945 /*
2946 * If they are requesting more space, and our current estimate
2947 * is over quota, they get to try again unless the actual
2948 * on-disk is over quota and there are no pending changes (which
2949 * may free up space for us).
2950 */
2951 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
2952 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
2953 error = ERESTART;
2954 else
2955 error = EDQUOT;
2956 }
2957 mutex_exit(&ds->ds_lock);
2958
2959 return (error);
2960 }
2961
2962 /* ARGSUSED */
2963 static int
2964 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
2965 {
2966 dsl_dataset_t *ds = arg1;
2967 uint64_t *quotap = arg2;
2968 uint64_t new_quota = *quotap;
2969
2970 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
2971 return (ENOTSUP);
2972
2973 if (new_quota == 0)
2974 return (0);
2975
2976 if (new_quota < ds->ds_phys->ds_used_bytes ||
2977 new_quota < ds->ds_reserved)
2978 return (ENOSPC);
2979
2980 return (0);
2981 }
2982
2983 /* ARGSUSED */
2984 void
2985 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2986 {
2987 dsl_dataset_t *ds = arg1;
2988 uint64_t *quotap = arg2;
2989 uint64_t new_quota = *quotap;
2990
2991 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2992
2993 ds->ds_quota = new_quota;
2994
2995 dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx);
2996
2997 spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa,
2998 tx, cr, "%lld dataset = %llu ",
2999 (longlong_t)new_quota, ds->ds_object);
3000 }
3001
3002 int
3003 dsl_dataset_set_quota(const char *dsname, uint64_t quota)
3004 {
3005 dsl_dataset_t *ds;
3006 int err;
3007
3008 err = dsl_dataset_hold(dsname, FTAG, &ds);
3009 if (err)
3010 return (err);
3011
3012 if (quota != ds->ds_quota) {
3013 /*
3014 * If someone removes a file, then tries to set the quota, we
3015 * want to make sure the file freeing takes effect.
3016 */
3017 txg_wait_open(ds->ds_dir->dd_pool, 0);
3018
3019 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3020 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3021 ds, &quota, 0);
3022 }
3023 dsl_dataset_rele(ds, FTAG);
3024 return (err);
3025 }
3026
3027 static int
3028 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3029 {
3030 dsl_dataset_t *ds = arg1;
3031 uint64_t *reservationp = arg2;
3032 uint64_t new_reservation = *reservationp;
3033 uint64_t unique;
3034
3035 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3036 SPA_VERSION_REFRESERVATION)
3037 return (ENOTSUP);
3038
3039 if (dsl_dataset_is_snapshot(ds))
3040 return (EINVAL);
3041
3042 /*
3043 * If we are doing the preliminary check in open context, the
3044 * space estimates may be inaccurate.
3045 */
3046 if (!dmu_tx_is_syncing(tx))
3047 return (0);
3048
3049 mutex_enter(&ds->ds_lock);
3050 unique = dsl_dataset_unique(ds);
3051 mutex_exit(&ds->ds_lock);
3052
3053 if (MAX(unique, new_reservation) > MAX(unique, ds->ds_reserved)) {
3054 uint64_t delta = MAX(unique, new_reservation) -
3055 MAX(unique, ds->ds_reserved);
3056
3057 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3058 return (ENOSPC);
3059 if (ds->ds_quota > 0 &&
3060 new_reservation > ds->ds_quota)
3061 return (ENOSPC);
3062 }
3063
3064 return (0);
3065 }
3066
3067 /* ARGSUSED */
3068 static void
3069 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
3070 dmu_tx_t *tx)
3071 {
3072 dsl_dataset_t *ds = arg1;
3073 uint64_t *reservationp = arg2;
3074 uint64_t new_reservation = *reservationp;
3075 uint64_t unique;
3076 int64_t delta;
3077
3078 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3079
3080 mutex_enter(&ds->ds_dir->dd_lock);
3081 mutex_enter(&ds->ds_lock);
3082 unique = dsl_dataset_unique(ds);
3083 delta = MAX(0, (int64_t)(new_reservation - unique)) -
3084 MAX(0, (int64_t)(ds->ds_reserved - unique));
3085 ds->ds_reserved = new_reservation;
3086 mutex_exit(&ds->ds_lock);
3087
3088 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3089 mutex_exit(&ds->ds_dir->dd_lock);
3090 dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation",
3091 new_reservation, cr, tx);
3092
3093 spa_history_internal_log(LOG_DS_REFRESERV,
3094 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
3095 (longlong_t)new_reservation, ds->ds_object);
3096 }
3097
3098 int
3099 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation)
3100 {
3101 dsl_dataset_t *ds;
3102 int err;
3103
3104 err = dsl_dataset_hold(dsname, FTAG, &ds);
3105 if (err)
3106 return (err);
3107
3108 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3109 dsl_dataset_set_reservation_check,
3110 dsl_dataset_set_reservation_sync, ds, &reservation, 0);
3111 dsl_dataset_rele(ds, FTAG);
3112 return (err);
3113 }