4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
36 #include <sys/dnode.h>
39 #include <sys/dmu_tx.h>
42 #include <sys/dmu_impl.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zfs_onexit.h>
48 * Needed to close a window in dnode_move() that allows the objset to be freed
49 * before it can be safely accessed.
56 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
66 dmu_objset_spa(objset_t
*os
)
72 dmu_objset_zil(objset_t
*os
)
78 dmu_objset_pool(objset_t
*os
)
82 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
83 return (ds
->ds_dir
->dd_pool
);
85 return (spa_get_dsl(os
->os_spa
));
89 dmu_objset_ds(objset_t
*os
)
91 return (os
->os_dsl_dataset
);
95 dmu_objset_type(objset_t
*os
)
97 return (os
->os_phys
->os_type
);
101 dmu_objset_name(objset_t
*os
, char *buf
)
103 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
107 dmu_objset_id(objset_t
*os
)
109 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
111 return (ds
? ds
->ds_object
: 0);
115 dmu_objset_syncprop(objset_t
*os
)
117 return (os
->os_sync
);
121 dmu_objset_logbias(objset_t
*os
)
123 return (os
->os_logbias
);
127 checksum_changed_cb(void *arg
, uint64_t newval
)
132 * Inheritance should have been done by now.
134 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
136 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
140 compression_changed_cb(void *arg
, uint64_t newval
)
145 * Inheritance and range checking should have been done by now.
147 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
149 os
->os_compress
= zio_compress_select(newval
, ZIO_COMPRESS_ON_VALUE
);
153 copies_changed_cb(void *arg
, uint64_t newval
)
158 * Inheritance and range checking should have been done by now.
161 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
163 os
->os_copies
= newval
;
167 dedup_changed_cb(void *arg
, uint64_t newval
)
170 spa_t
*spa
= os
->os_spa
;
171 enum zio_checksum checksum
;
174 * Inheritance should have been done by now.
176 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
178 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
180 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
181 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
185 primary_cache_changed_cb(void *arg
, uint64_t newval
)
190 * Inheritance and range checking should have been done by now.
192 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
193 newval
== ZFS_CACHE_METADATA
);
195 os
->os_primary_cache
= newval
;
199 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
204 * Inheritance and range checking should have been done by now.
206 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
207 newval
== ZFS_CACHE_METADATA
);
209 os
->os_secondary_cache
= newval
;
213 sync_changed_cb(void *arg
, uint64_t newval
)
218 * Inheritance and range checking should have been done by now.
220 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
221 newval
== ZFS_SYNC_DISABLED
);
223 os
->os_sync
= newval
;
225 zil_set_sync(os
->os_zil
, newval
);
229 logbias_changed_cb(void *arg
, uint64_t newval
)
233 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
234 newval
== ZFS_LOGBIAS_THROUGHPUT
);
235 os
->os_logbias
= newval
;
237 zil_set_logbias(os
->os_zil
, newval
);
241 dmu_objset_byteswap(void *buf
, size_t size
)
243 objset_phys_t
*osp
= buf
;
245 ASSERT(size
== OBJSET_OLD_PHYS_SIZE
|| size
== sizeof (objset_phys_t
));
246 dnode_byteswap(&osp
->os_meta_dnode
);
247 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
248 osp
->os_type
= BSWAP_64(osp
->os_type
);
249 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
250 if (size
== sizeof (objset_phys_t
)) {
251 dnode_byteswap(&osp
->os_userused_dnode
);
252 dnode_byteswap(&osp
->os_groupused_dnode
);
257 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
263 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
265 os
= kmem_zalloc(sizeof (objset_t
), KM_PUSHPAGE
);
266 os
->os_dsl_dataset
= ds
;
269 if (!BP_IS_HOLE(os
->os_rootbp
)) {
270 uint32_t aflags
= ARC_WAIT
;
272 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
273 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
275 if (DMU_OS_IS_L2CACHEABLE(os
))
276 aflags
|= ARC_L2CACHE
;
278 dprintf_bp(os
->os_rootbp
, "reading %s", "");
279 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
280 arc_getbuf_func
, &os
->os_phys_buf
,
281 ZIO_PRIORITY_SYNC_READ
, ZIO_FLAG_CANFAIL
, &aflags
, &zb
);
283 kmem_free(os
, sizeof (objset_t
));
284 /* convert checksum errors into IO errors */
290 /* Increase the blocksize if we are permitted. */
291 if (spa_version(spa
) >= SPA_VERSION_USERSPACE
&&
292 arc_buf_size(os
->os_phys_buf
) < sizeof (objset_phys_t
)) {
293 arc_buf_t
*buf
= arc_buf_alloc(spa
,
294 sizeof (objset_phys_t
), &os
->os_phys_buf
,
296 bzero(buf
->b_data
, sizeof (objset_phys_t
));
297 bcopy(os
->os_phys_buf
->b_data
, buf
->b_data
,
298 arc_buf_size(os
->os_phys_buf
));
299 (void) arc_buf_remove_ref(os
->os_phys_buf
,
301 os
->os_phys_buf
= buf
;
304 os
->os_phys
= os
->os_phys_buf
->b_data
;
305 os
->os_flags
= os
->os_phys
->os_flags
;
307 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
308 sizeof (objset_phys_t
) : OBJSET_OLD_PHYS_SIZE
;
309 os
->os_phys_buf
= arc_buf_alloc(spa
, size
,
310 &os
->os_phys_buf
, ARC_BUFC_METADATA
);
311 os
->os_phys
= os
->os_phys_buf
->b_data
;
312 bzero(os
->os_phys
, size
);
316 * Note: the changed_cb will be called once before the register
317 * func returns, thus changing the checksum/compression from the
318 * default (fletcher2/off). Snapshots don't need to know about
319 * checksum/compression/copies.
322 err
= dsl_prop_register(ds
, "primarycache",
323 primary_cache_changed_cb
, os
);
325 err
= dsl_prop_register(ds
, "secondarycache",
326 secondary_cache_changed_cb
, os
);
327 if (!dsl_dataset_is_snapshot(ds
)) {
329 err
= dsl_prop_register(ds
, "checksum",
330 checksum_changed_cb
, os
);
332 err
= dsl_prop_register(ds
, "compression",
333 compression_changed_cb
, os
);
335 err
= dsl_prop_register(ds
, "copies",
336 copies_changed_cb
, os
);
338 err
= dsl_prop_register(ds
, "dedup",
339 dedup_changed_cb
, os
);
341 err
= dsl_prop_register(ds
, "logbias",
342 logbias_changed_cb
, os
);
344 err
= dsl_prop_register(ds
, "sync",
345 sync_changed_cb
, os
);
348 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
,
349 &os
->os_phys_buf
) == 1);
350 kmem_free(os
, sizeof (objset_t
));
353 } else if (ds
== NULL
) {
354 /* It's the meta-objset. */
355 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
356 os
->os_compress
= ZIO_COMPRESS_LZJB
;
357 os
->os_copies
= spa_max_replication(spa
);
358 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
359 os
->os_dedup_verify
= 0;
362 os
->os_primary_cache
= ZFS_CACHE_ALL
;
363 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
366 if (ds
== NULL
|| !dsl_dataset_is_snapshot(ds
))
367 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
368 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
370 for (i
= 0; i
< TXG_SIZE
; i
++) {
371 list_create(&os
->os_dirty_dnodes
[i
], sizeof (dnode_t
),
372 offsetof(dnode_t
, dn_dirty_link
[i
]));
373 list_create(&os
->os_free_dnodes
[i
], sizeof (dnode_t
),
374 offsetof(dnode_t
, dn_dirty_link
[i
]));
376 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
377 offsetof(dnode_t
, dn_link
));
378 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
379 offsetof(dmu_buf_impl_t
, db_link
));
381 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
382 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
383 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
385 DMU_META_DNODE(os
) = dnode_special_open(os
,
386 &os
->os_phys
->os_meta_dnode
, DMU_META_DNODE_OBJECT
,
388 if (arc_buf_size(os
->os_phys_buf
) >= sizeof (objset_phys_t
)) {
389 DMU_USERUSED_DNODE(os
) = dnode_special_open(os
,
390 &os
->os_phys
->os_userused_dnode
, DMU_USERUSED_OBJECT
,
391 &os
->os_userused_dnode
);
392 DMU_GROUPUSED_DNODE(os
) = dnode_special_open(os
,
393 &os
->os_phys
->os_groupused_dnode
, DMU_GROUPUSED_OBJECT
,
394 &os
->os_groupused_dnode
);
398 * We should be the only thread trying to do this because we
399 * have ds_opening_lock
402 mutex_enter(&ds
->ds_lock
);
403 ASSERT(ds
->ds_objset
== NULL
);
405 mutex_exit(&ds
->ds_lock
);
413 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
417 mutex_enter(&ds
->ds_opening_lock
);
418 *osp
= ds
->ds_objset
;
420 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
421 ds
, dsl_dataset_get_blkptr(ds
), osp
);
423 mutex_exit(&ds
->ds_opening_lock
);
427 /* called from zpl */
429 dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
)
434 err
= dsl_dataset_hold(name
, tag
, &ds
);
438 err
= dmu_objset_from_ds(ds
, osp
);
440 dsl_dataset_rele(ds
, tag
);
445 /* called from zpl */
447 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
448 boolean_t readonly
, void *tag
, objset_t
**osp
)
453 err
= dsl_dataset_own(name
, B_FALSE
, tag
, &ds
);
457 err
= dmu_objset_from_ds(ds
, osp
);
459 dsl_dataset_disown(ds
, tag
);
460 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
461 dmu_objset_disown(*osp
, tag
);
463 } else if (!readonly
&& dsl_dataset_is_snapshot(ds
)) {
464 dmu_objset_disown(*osp
, tag
);
471 dmu_objset_rele(objset_t
*os
, void *tag
)
473 dsl_dataset_rele(os
->os_dsl_dataset
, tag
);
477 dmu_objset_disown(objset_t
*os
, void *tag
)
479 dsl_dataset_disown(os
->os_dsl_dataset
, tag
);
483 dmu_objset_evict_dbufs(objset_t
*os
)
487 mutex_enter(&os
->os_lock
);
489 /* process the mdn last, since the other dnodes have holds on it */
490 list_remove(&os
->os_dnodes
, DMU_META_DNODE(os
));
491 list_insert_tail(&os
->os_dnodes
, DMU_META_DNODE(os
));
494 * Find the first dnode with holds. We have to do this dance
495 * because dnode_add_ref() only works if you already have a
496 * hold. If there are no holds then it has no dbufs so OK to
499 for (dn
= list_head(&os
->os_dnodes
);
500 dn
&& !dnode_add_ref(dn
, FTAG
);
501 dn
= list_next(&os
->os_dnodes
, dn
))
505 dnode_t
*next_dn
= dn
;
508 next_dn
= list_next(&os
->os_dnodes
, next_dn
);
509 } while (next_dn
&& !dnode_add_ref(next_dn
, FTAG
));
511 mutex_exit(&os
->os_lock
);
512 dnode_evict_dbufs(dn
);
513 dnode_rele(dn
, FTAG
);
514 mutex_enter(&os
->os_lock
);
517 dn
= list_head(&os
->os_dnodes
);
518 mutex_exit(&os
->os_lock
);
519 return (dn
!= DMU_META_DNODE(os
));
523 dmu_objset_evict(objset_t
*os
)
525 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
528 for (t
= 0; t
< TXG_SIZE
; t
++)
529 ASSERT(!dmu_objset_is_dirty(os
, t
));
532 if (!dsl_dataset_is_snapshot(ds
)) {
533 VERIFY(0 == dsl_prop_unregister(ds
, "checksum",
534 checksum_changed_cb
, os
));
535 VERIFY(0 == dsl_prop_unregister(ds
, "compression",
536 compression_changed_cb
, os
));
537 VERIFY(0 == dsl_prop_unregister(ds
, "copies",
538 copies_changed_cb
, os
));
539 VERIFY(0 == dsl_prop_unregister(ds
, "dedup",
540 dedup_changed_cb
, os
));
541 VERIFY(0 == dsl_prop_unregister(ds
, "logbias",
542 logbias_changed_cb
, os
));
543 VERIFY(0 == dsl_prop_unregister(ds
, "sync",
544 sync_changed_cb
, os
));
546 VERIFY(0 == dsl_prop_unregister(ds
, "primarycache",
547 primary_cache_changed_cb
, os
));
548 VERIFY(0 == dsl_prop_unregister(ds
, "secondarycache",
549 secondary_cache_changed_cb
, os
));
556 * We should need only a single pass over the dnode list, since
557 * nothing can be added to the list at this point.
559 (void) dmu_objset_evict_dbufs(os
);
561 dnode_special_close(&os
->os_meta_dnode
);
562 if (DMU_USERUSED_DNODE(os
)) {
563 dnode_special_close(&os
->os_userused_dnode
);
564 dnode_special_close(&os
->os_groupused_dnode
);
566 zil_free(os
->os_zil
);
568 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
570 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
, &os
->os_phys_buf
) == 1);
573 * This is a barrier to prevent the objset from going away in
574 * dnode_move() until we can safely ensure that the objset is still in
575 * use. We consider the objset valid before the barrier and invalid
578 rw_enter(&os_lock
, RW_READER
);
581 mutex_destroy(&os
->os_lock
);
582 mutex_destroy(&os
->os_obj_lock
);
583 mutex_destroy(&os
->os_user_ptr_lock
);
584 kmem_free(os
, sizeof (objset_t
));
588 dmu_objset_snap_cmtime(objset_t
*os
)
590 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
593 /* called from dsl for meta-objset */
595 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
596 dmu_objset_type_t type
, dmu_tx_t
*tx
)
601 ASSERT(dmu_tx_is_syncing(tx
));
603 VERIFY(0 == dmu_objset_from_ds(ds
, &os
));
605 VERIFY(0 == dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
607 mdn
= DMU_META_DNODE(os
);
609 dnode_allocate(mdn
, DMU_OT_DNODE
, 1 << DNODE_BLOCK_SHIFT
,
610 DN_MAX_INDBLKSHIFT
, DMU_OT_NONE
, 0, tx
);
613 * We don't want to have to increase the meta-dnode's nlevels
614 * later, because then we could do it in quescing context while
615 * we are also accessing it in open context.
617 * This precaution is not necessary for the MOS (ds == NULL),
618 * because the MOS is only updated in syncing context.
619 * This is most fortunate: the MOS is the only objset that
620 * needs to be synced multiple times as spa_sync() iterates
621 * to convergence, so minimizing its dn_nlevels matters.
627 * Determine the number of levels necessary for the meta-dnode
628 * to contain DN_MAX_OBJECT dnodes.
630 while ((uint64_t)mdn
->dn_nblkptr
<< (mdn
->dn_datablkshift
+
631 (levels
- 1) * (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
632 DN_MAX_OBJECT
* sizeof (dnode_phys_t
))
635 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
636 mdn
->dn_nlevels
= levels
;
639 ASSERT(type
!= DMU_OST_NONE
);
640 ASSERT(type
!= DMU_OST_ANY
);
641 ASSERT(type
< DMU_OST_NUMTYPES
);
642 os
->os_phys
->os_type
= type
;
643 if (dmu_objset_userused_enabled(os
)) {
644 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
645 os
->os_flags
= os
->os_phys
->os_flags
;
648 dsl_dataset_dirty(ds
, tx
);
654 void (*userfunc
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
);
656 dsl_dataset_t
*clone_origin
;
657 const char *lastname
;
658 dmu_objset_type_t type
;
665 dmu_objset_create_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
667 dsl_dir_t
*dd
= arg1
;
668 struct oscarg
*oa
= arg2
;
669 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
673 err
= zap_lookup(mos
, dd
->dd_phys
->dd_child_dir_zapobj
,
674 oa
->lastname
, sizeof (uint64_t), 1, &ddobj
);
676 return (err
? err
: EEXIST
);
678 if (oa
->clone_origin
!= NULL
) {
679 /* You can't clone across pools. */
680 if (oa
->clone_origin
->ds_dir
->dd_pool
!= dd
->dd_pool
)
683 /* You can only clone snapshots, not the head datasets. */
684 if (!dsl_dataset_is_snapshot(oa
->clone_origin
))
692 dmu_objset_create_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
694 dsl_dir_t
*dd
= arg1
;
695 spa_t
*spa
= dd
->dd_pool
->dp_spa
;
696 struct oscarg
*oa
= arg2
;
699 ASSERT(dmu_tx_is_syncing(tx
));
701 obj
= dsl_dataset_create_sync(dd
, oa
->lastname
,
702 oa
->clone_origin
, oa
->flags
, oa
->cr
, tx
);
704 if (oa
->clone_origin
== NULL
) {
705 dsl_pool_t
*dp
= dd
->dd_pool
;
710 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
));
711 bp
= dsl_dataset_get_blkptr(ds
);
712 ASSERT(BP_IS_HOLE(bp
));
714 os
= dmu_objset_create_impl(spa
, ds
, bp
, oa
->type
, tx
);
717 oa
->userfunc(os
, oa
->userarg
, oa
->cr
, tx
);
718 dsl_dataset_rele(ds
, FTAG
);
721 spa_history_log_internal(LOG_DS_CREATE
, spa
, tx
, "dataset = %llu", obj
);
725 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
726 void (*func
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
), void *arg
)
731 struct oscarg oa
= { 0 };
733 ASSERT(strchr(name
, '@') == NULL
);
734 err
= dsl_dir_open(name
, FTAG
, &pdd
, &tail
);
738 dsl_dir_close(pdd
, FTAG
);
749 err
= dsl_sync_task_do(pdd
->dd_pool
, dmu_objset_create_check
,
750 dmu_objset_create_sync
, pdd
, &oa
, 5);
751 dsl_dir_close(pdd
, FTAG
);
756 dmu_objset_clone(const char *name
, dsl_dataset_t
*clone_origin
, uint64_t flags
)
761 struct oscarg oa
= { 0 };
763 ASSERT(strchr(name
, '@') == NULL
);
764 err
= dsl_dir_open(name
, FTAG
, &pdd
, &tail
);
768 dsl_dir_close(pdd
, FTAG
);
773 oa
.clone_origin
= clone_origin
;
777 err
= dsl_sync_task_do(pdd
->dd_pool
, dmu_objset_create_check
,
778 dmu_objset_create_sync
, pdd
, &oa
, 5);
779 dsl_dir_close(pdd
, FTAG
);
784 dmu_objset_destroy(const char *name
, boolean_t defer
)
789 error
= dsl_dataset_own(name
, B_TRUE
, FTAG
, &ds
);
791 error
= dsl_dataset_destroy(ds
, FTAG
, defer
);
792 /* dsl_dataset_destroy() closes the ds. */
799 dsl_sync_task_group_t
*dstg
;
802 char failed
[MAXPATHLEN
];
804 boolean_t needsuspend
;
807 struct dsl_ds_holdarg
*ha
; /* only needed in the temporary case */
808 dsl_dataset_t
*newds
;
812 snapshot_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
815 struct snaparg
*sn
= arg2
;
818 /* The props have already been checked by zfs_check_userprops(). */
820 error
= dsl_dataset_snapshot_check(os
->os_dsl_dataset
,
827 * Ideally we would just call
828 * dsl_dataset_user_hold_check() and
829 * dsl_dataset_destroy_check() here. However the
830 * dataset we want to hold and destroy is the snapshot
831 * that we just confirmed we can create, but it won't
832 * exist until after these checks are run. Do any
833 * checks we can here and if more checks are added to
834 * those routines in the future, similar checks may be
837 if (spa_version(os
->os_spa
) < SPA_VERSION_USERREFS
)
840 * Not checking number of tags because the tag will be
841 * unique, as it will be the only tag.
843 if (strlen(sn
->htag
) + MAX_TAG_PREFIX_LEN
>= MAXNAMELEN
)
846 sn
->ha
= kmem_alloc(sizeof(struct dsl_ds_holdarg
), KM_PUSHPAGE
);
847 sn
->ha
->temphold
= B_TRUE
;
848 sn
->ha
->htag
= sn
->htag
;
854 snapshot_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
857 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
858 struct snaparg
*sn
= arg2
;
860 dsl_dataset_snapshot_sync(ds
, sn
->snapname
, tx
);
864 pa
.pa_props
= sn
->props
;
865 pa
.pa_source
= ZPROP_SRC_LOCAL
;
866 dsl_props_set_sync(ds
->ds_prev
, &pa
, tx
);
870 struct dsl_ds_destroyarg da
;
872 dsl_dataset_user_hold_sync(ds
->ds_prev
, sn
->ha
, tx
);
873 kmem_free(sn
->ha
, sizeof (struct dsl_ds_holdarg
));
875 sn
->newds
= ds
->ds_prev
;
879 dsl_dataset_destroy_sync(&da
, FTAG
, tx
);
884 dmu_objset_snapshot_one(const char *name
, void *arg
)
886 struct snaparg
*sn
= arg
;
892 * If the objset starts with a '%', then ignore it unless it was
893 * explicitly named (ie, not recursive). These hidden datasets
894 * are always inconsistent, and by not opening them here, we can
895 * avoid a race with dsl_dir_destroy_check().
897 cp
= strrchr(name
, '/');
898 if (cp
&& cp
[1] == '%' && sn
->recursive
)
901 (void) strcpy(sn
->failed
, name
);
904 * Check permissions if we are doing a recursive snapshot. The
905 * permission checks for the starting dataset have already been
906 * performed in zfs_secpolicy_snapshot()
908 if (sn
->recursive
&& (err
= zfs_secpolicy_snapshot_perms(name
, CRED())))
911 err
= dmu_objset_hold(name
, sn
, &os
);
916 * If the objset is in an inconsistent state (eg, in the process
917 * of being destroyed), don't snapshot it. As with %hidden
918 * datasets, we return EBUSY if this name was explicitly
919 * requested (ie, not recursive), and otherwise ignore it.
921 if (os
->os_dsl_dataset
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
) {
922 dmu_objset_rele(os
, sn
);
923 return (sn
->recursive
? 0 : EBUSY
);
926 if (sn
->needsuspend
) {
927 err
= zil_suspend(dmu_objset_zil(os
));
929 dmu_objset_rele(os
, sn
);
933 dsl_sync_task_create(sn
->dstg
, snapshot_check
, snapshot_sync
,
940 dmu_objset_snapshot(char *fsname
, char *snapname
, char *tag
,
941 nvlist_t
*props
, boolean_t recursive
, boolean_t temporary
, int cleanup_fd
)
943 dsl_sync_task_t
*dst
;
949 sn
= kmem_alloc(sizeof (struct snaparg
), KM_SLEEP
);
950 (void) strcpy(sn
->failed
, fsname
);
952 err
= spa_open(fsname
, &spa
, FTAG
);
954 kmem_free(sn
, sizeof (struct snaparg
));
959 if (cleanup_fd
< 0) {
960 spa_close(spa
, FTAG
);
963 if ((err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
)) != 0) {
964 spa_close(spa
, FTAG
);
969 sn
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
970 sn
->snapname
= snapname
;
973 sn
->recursive
= recursive
;
974 sn
->needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
975 sn
->temporary
= temporary
;
980 err
= dmu_objset_find(fsname
,
981 dmu_objset_snapshot_one
, sn
, DS_FIND_CHILDREN
);
983 err
= dmu_objset_snapshot_one(fsname
, sn
);
987 err
= dsl_sync_task_group_wait(sn
->dstg
);
989 for (dst
= list_head(&sn
->dstg
->dstg_tasks
); dst
;
990 dst
= list_next(&sn
->dstg
->dstg_tasks
, dst
)) {
991 objset_t
*os
= dst
->dst_arg1
;
992 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
994 dsl_dataset_name(ds
, sn
->failed
);
995 } else if (temporary
) {
996 dsl_register_onexit_hold_cleanup(sn
->newds
, tag
, minor
);
999 zil_resume(dmu_objset_zil(os
));
1000 dmu_objset_rele(os
, sn
);
1004 (void) strcpy(fsname
, sn
->failed
);
1006 zfs_onexit_fd_rele(cleanup_fd
);
1007 dsl_sync_task_group_destroy(sn
->dstg
);
1008 spa_close(spa
, FTAG
);
1009 kmem_free(sn
, sizeof (struct snaparg
));
1014 dmu_objset_sync_dnodes(list_t
*list
, list_t
*newlist
, dmu_tx_t
*tx
)
1018 while ((dn
= list_head(list
))) {
1019 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
1020 ASSERT(dn
->dn_dbuf
->db_data_pending
);
1022 * Initialize dn_zio outside dnode_sync() because the
1023 * meta-dnode needs to set it ouside dnode_sync().
1025 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
1028 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1029 list_remove(list
, dn
);
1032 (void) dnode_add_ref(dn
, newlist
);
1033 list_insert_tail(newlist
, dn
);
1042 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1046 blkptr_t
*bp
= zio
->io_bp
;
1048 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1050 ASSERT(bp
== os
->os_rootbp
);
1051 ASSERT(BP_GET_TYPE(bp
) == DMU_OT_OBJSET
);
1052 ASSERT(BP_GET_LEVEL(bp
) == 0);
1055 * Update rootbp fill count: it should be the number of objects
1056 * allocated in the object set (not counting the "special"
1057 * objects that are stored in the objset_phys_t -- the meta
1058 * dnode and user/group accounting objects).
1061 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1062 bp
->blk_fill
+= dnp
->dn_blkptr
[i
].blk_fill
;
1067 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1069 blkptr_t
*bp
= zio
->io_bp
;
1070 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1073 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1074 ASSERT(BP_EQUAL(bp
, bp_orig
));
1076 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1077 dmu_tx_t
*tx
= os
->os_synctx
;
1079 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1080 dsl_dataset_block_born(ds
, bp
, tx
);
1084 /* called from dsl */
1086 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1093 list_t
*newlist
= NULL
;
1094 dbuf_dirty_record_t
*dr
;
1096 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", tx
->tx_txg
);
1098 ASSERT(dmu_tx_is_syncing(tx
));
1099 /* XXX the write_done callback should really give us the tx... */
1102 if (os
->os_dsl_dataset
== NULL
) {
1104 * This is the MOS. If we have upgraded,
1105 * spa_max_replication() could change, so reset
1108 os
->os_copies
= spa_max_replication(os
->os_spa
);
1112 * Create the root block IO
1114 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1115 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1116 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1117 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1119 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1121 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1122 os
->os_rootbp
, os
->os_phys_buf
, DMU_OS_IS_L2CACHEABLE(os
), &zp
,
1123 dmu_objset_write_ready
, dmu_objset_write_done
, os
,
1124 ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
1127 * Sync special dnodes - the parent IO for the sync is the root block
1129 DMU_META_DNODE(os
)->dn_zio
= zio
;
1130 dnode_sync(DMU_META_DNODE(os
), tx
);
1132 os
->os_phys
->os_flags
= os
->os_flags
;
1134 if (DMU_USERUSED_DNODE(os
) &&
1135 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1136 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1137 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1138 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1139 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1142 txgoff
= tx
->tx_txg
& TXG_MASK
;
1144 if (dmu_objset_userused_enabled(os
)) {
1145 newlist
= &os
->os_synced_dnodes
;
1147 * We must create the list here because it uses the
1148 * dn_dirty_link[] of this txg.
1150 list_create(newlist
, sizeof (dnode_t
),
1151 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1154 dmu_objset_sync_dnodes(&os
->os_free_dnodes
[txgoff
], newlist
, tx
);
1155 dmu_objset_sync_dnodes(&os
->os_dirty_dnodes
[txgoff
], newlist
, tx
);
1157 list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1158 while ((dr
= list_head(list
)) != NULL
) {
1159 ASSERT(dr
->dr_dbuf
->db_level
== 0);
1160 list_remove(list
, dr
);
1162 zio_nowait(dr
->dr_zio
);
1165 * Free intent log blocks up to this tx.
1167 zil_sync(os
->os_zil
, tx
);
1168 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1173 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1175 return (!list_is_empty(&os
->os_dirty_dnodes
[txg
& TXG_MASK
]) ||
1176 !list_is_empty(&os
->os_free_dnodes
[txg
& TXG_MASK
]));
1179 static objset_used_cb_t
*used_cbs
[DMU_OST_NUMTYPES
];
1182 dmu_objset_register_type(dmu_objset_type_t ost
, objset_used_cb_t
*cb
)
1188 dmu_objset_userused_enabled(objset_t
*os
)
1190 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1191 used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1192 DMU_USERUSED_DNODE(os
) != NULL
);
1196 do_userquota_update(objset_t
*os
, uint64_t used
, uint64_t flags
,
1197 uint64_t user
, uint64_t group
, boolean_t subtract
, dmu_tx_t
*tx
)
1199 if ((flags
& DNODE_FLAG_USERUSED_ACCOUNTED
)) {
1200 int64_t delta
= DNODE_SIZE
+ used
;
1203 VERIFY3U(0, ==, zap_increment_int(os
, DMU_USERUSED_OBJECT
,
1205 VERIFY3U(0, ==, zap_increment_int(os
, DMU_GROUPUSED_OBJECT
,
1211 dmu_objset_do_userquota_updates(objset_t
*os
, dmu_tx_t
*tx
)
1214 list_t
*list
= &os
->os_synced_dnodes
;
1216 ASSERT(list_head(list
) == NULL
|| dmu_objset_userused_enabled(os
));
1218 while ((dn
= list_head(list
)) != NULL
) {
1220 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
1221 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
1222 dn
->dn_phys
->dn_flags
&
1223 DNODE_FLAG_USERUSED_ACCOUNTED
);
1225 /* Allocate the user/groupused objects if necessary. */
1226 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1227 VERIFY(0 == zap_create_claim(os
,
1228 DMU_USERUSED_OBJECT
,
1229 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1230 VERIFY(0 == zap_create_claim(os
,
1231 DMU_GROUPUSED_OBJECT
,
1232 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1236 * We intentionally modify the zap object even if the
1237 * net delta is zero. Otherwise
1238 * the block of the zap obj could be shared between
1239 * datasets but need to be different between them after
1243 flags
= dn
->dn_id_flags
;
1245 if (flags
& DN_ID_OLD_EXIST
) {
1246 do_userquota_update(os
, dn
->dn_oldused
, dn
->dn_oldflags
,
1247 dn
->dn_olduid
, dn
->dn_oldgid
, B_TRUE
, tx
);
1249 if (flags
& DN_ID_NEW_EXIST
) {
1250 do_userquota_update(os
, DN_USED_BYTES(dn
->dn_phys
),
1251 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
,
1252 dn
->dn_newgid
, B_FALSE
, tx
);
1255 mutex_enter(&dn
->dn_mtx
);
1257 dn
->dn_oldflags
= 0;
1258 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
1259 dn
->dn_olduid
= dn
->dn_newuid
;
1260 dn
->dn_oldgid
= dn
->dn_newgid
;
1261 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1262 if (dn
->dn_bonuslen
== 0)
1263 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1265 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1267 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
1268 mutex_exit(&dn
->dn_mtx
);
1270 list_remove(list
, dn
);
1271 dnode_rele(dn
, list
);
1276 * Returns a pointer to data to find uid/gid from
1278 * If a dirty record for transaction group that is syncing can't
1279 * be found then NULL is returned. In the NULL case it is assumed
1280 * the uid/gid aren't changing.
1283 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1285 dbuf_dirty_record_t
*dr
, **drp
;
1288 if (db
->db_dirtycnt
== 0)
1289 return (db
->db
.db_data
); /* Nothing is changing */
1291 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1292 if (dr
->dr_txg
== tx
->tx_txg
)
1300 DB_DNODE_ENTER(dr
->dr_dbuf
);
1301 dn
= DB_DNODE(dr
->dr_dbuf
);
1303 if (dn
->dn_bonuslen
== 0 &&
1304 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
1305 data
= dr
->dt
.dl
.dr_data
->b_data
;
1307 data
= dr
->dt
.dl
.dr_data
;
1309 DB_DNODE_EXIT(dr
->dr_dbuf
);
1316 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
1318 objset_t
*os
= dn
->dn_objset
;
1320 dmu_buf_impl_t
*db
= NULL
;
1321 uint64_t *user
= NULL
, *group
= NULL
;
1322 int flags
= dn
->dn_id_flags
;
1324 boolean_t have_spill
= B_FALSE
;
1326 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
1329 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
1330 DN_ID_CHKED_SPILL
)))
1333 if (before
&& dn
->dn_bonuslen
!= 0)
1334 data
= DN_BONUS(dn
->dn_phys
);
1335 else if (!before
&& dn
->dn_bonuslen
!= 0) {
1338 mutex_enter(&db
->db_mtx
);
1339 data
= dmu_objset_userquota_find_data(db
, tx
);
1341 data
= DN_BONUS(dn
->dn_phys
);
1343 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
1346 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
1347 rf
|= DB_RF_HAVESTRUCT
;
1348 error
= dmu_spill_hold_by_dnode(dn
,
1349 rf
| DB_RF_MUST_SUCCEED
,
1350 FTAG
, (dmu_buf_t
**)&db
);
1352 mutex_enter(&db
->db_mtx
);
1353 data
= (before
) ? db
->db
.db_data
:
1354 dmu_objset_userquota_find_data(db
, tx
);
1355 have_spill
= B_TRUE
;
1357 mutex_enter(&dn
->dn_mtx
);
1358 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1359 mutex_exit(&dn
->dn_mtx
);
1365 user
= &dn
->dn_olduid
;
1366 group
= &dn
->dn_oldgid
;
1368 user
= &dn
->dn_newuid
;
1369 group
= &dn
->dn_newgid
;
1373 * Must always call the callback in case the object
1374 * type has changed and that type isn't an object type to track
1376 error
= used_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
,
1380 * Preserve existing uid/gid when the callback can't determine
1381 * what the new uid/gid are and the callback returned EEXIST.
1382 * The EEXIST error tells us to just use the existing uid/gid.
1383 * If we don't know what the old values are then just assign
1384 * them to 0, since that is a new file being created.
1386 if (!before
&& data
== NULL
&& error
== EEXIST
) {
1387 if (flags
& DN_ID_OLD_EXIST
) {
1388 dn
->dn_newuid
= dn
->dn_olduid
;
1389 dn
->dn_newgid
= dn
->dn_oldgid
;
1398 mutex_exit(&db
->db_mtx
);
1400 mutex_enter(&dn
->dn_mtx
);
1401 if (error
== 0 && before
)
1402 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1403 if (error
== 0 && !before
)
1404 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
1407 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1409 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1411 mutex_exit(&dn
->dn_mtx
);
1413 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
1417 dmu_objset_userspace_present(objset_t
*os
)
1419 return (os
->os_phys
->os_flags
&
1420 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1424 dmu_objset_userspace_upgrade(objset_t
*os
)
1429 if (dmu_objset_userspace_present(os
))
1431 if (!dmu_objset_userused_enabled(os
))
1433 if (dmu_objset_is_snapshot(os
))
1437 * We simply need to mark every object dirty, so that it will be
1438 * synced out and now accounted. If this is called
1439 * concurrently, or if we already did some work before crashing,
1440 * that's fine, since we track each object's accounted state
1444 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
1449 if (issig(JUSTLOOKING
) && issig(FORREAL
))
1452 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
1455 tx
= dmu_tx_create(os
);
1456 dmu_tx_hold_bonus(tx
, obj
);
1457 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
1462 dmu_buf_will_dirty(db
, tx
);
1463 dmu_buf_rele(db
, FTAG
);
1467 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1468 txg_wait_synced(dmu_objset_pool(os
), 0);
1473 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
1474 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1476 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
1477 usedobjsp
, availobjsp
);
1481 dmu_objset_fsid_guid(objset_t
*os
)
1483 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
1487 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
1489 stat
->dds_type
= os
->os_phys
->os_type
;
1490 if (os
->os_dsl_dataset
)
1491 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
1495 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
1497 ASSERT(os
->os_dsl_dataset
||
1498 os
->os_phys
->os_type
== DMU_OST_META
);
1500 if (os
->os_dsl_dataset
!= NULL
)
1501 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
1503 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
1504 os
->os_phys
->os_type
);
1505 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
1506 dmu_objset_userspace_present(os
));
1510 dmu_objset_is_snapshot(objset_t
*os
)
1512 if (os
->os_dsl_dataset
!= NULL
)
1513 return (dsl_dataset_is_snapshot(os
->os_dsl_dataset
));
1519 dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
, int maxlen
,
1520 boolean_t
*conflict
)
1522 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1525 if (ds
->ds_phys
->ds_snapnames_zapobj
== 0)
1528 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1529 ds
->ds_phys
->ds_snapnames_zapobj
, name
, 8, 1, &ignored
, MT_FIRST
,
1530 real
, maxlen
, conflict
));
1534 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
1535 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
1537 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1538 zap_cursor_t cursor
;
1539 zap_attribute_t attr
;
1541 if (ds
->ds_phys
->ds_snapnames_zapobj
== 0)
1544 zap_cursor_init_serialized(&cursor
,
1545 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1546 ds
->ds_phys
->ds_snapnames_zapobj
, *offp
);
1548 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1549 zap_cursor_fini(&cursor
);
1553 if (strlen(attr
.za_name
) + 1 > namelen
) {
1554 zap_cursor_fini(&cursor
);
1555 return (ENAMETOOLONG
);
1558 (void) strcpy(name
, attr
.za_name
);
1560 *idp
= attr
.za_first_integer
;
1562 *case_conflict
= attr
.za_normalization_conflict
;
1563 zap_cursor_advance(&cursor
);
1564 *offp
= zap_cursor_serialize(&cursor
);
1565 zap_cursor_fini(&cursor
);
1571 dmu_snapshot_lookup(objset_t
*os
, const char *name
, uint64_t *value
)
1573 return dsl_dataset_snap_lookup(os
->os_dsl_dataset
, name
, value
);
1577 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
1578 uint64_t *idp
, uint64_t *offp
)
1580 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
1581 zap_cursor_t cursor
;
1582 zap_attribute_t attr
;
1584 /* there is no next dir on a snapshot! */
1585 if (os
->os_dsl_dataset
->ds_object
!=
1586 dd
->dd_phys
->dd_head_dataset_obj
)
1589 zap_cursor_init_serialized(&cursor
,
1590 dd
->dd_pool
->dp_meta_objset
,
1591 dd
->dd_phys
->dd_child_dir_zapobj
, *offp
);
1593 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1594 zap_cursor_fini(&cursor
);
1598 if (strlen(attr
.za_name
) + 1 > namelen
) {
1599 zap_cursor_fini(&cursor
);
1600 return (ENAMETOOLONG
);
1603 (void) strcpy(name
, attr
.za_name
);
1605 *idp
= attr
.za_first_integer
;
1606 zap_cursor_advance(&cursor
);
1607 *offp
= zap_cursor_serialize(&cursor
);
1608 zap_cursor_fini(&cursor
);
1614 int (*func
)(const char *, void *);
1620 findfunc(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
1622 struct findarg
*fa
= arg
;
1623 return (fa
->func(dsname
, fa
->arg
));
1627 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1628 * Perhaps change all callers to use dmu_objset_find_spa()?
1631 dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
1637 return (dmu_objset_find_spa(NULL
, name
, findfunc
, &fa
, flags
));
1641 * Find all objsets under name, call func on each
1644 dmu_objset_find_spa(spa_t
*spa
, const char *name
,
1645 int func(spa_t
*, uint64_t, const char *, void *), void *arg
, int flags
)
1651 zap_attribute_t
*attr
;
1657 name
= spa_name(spa
);
1658 err
= dsl_dir_open_spa(spa
, name
, FTAG
, &dd
, NULL
);
1662 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1663 if (dd
->dd_myname
[0] == '$') {
1664 dsl_dir_close(dd
, FTAG
);
1668 thisobj
= dd
->dd_phys
->dd_head_dataset_obj
;
1669 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_PUSHPAGE
);
1673 * Iterate over all children.
1675 if (flags
& DS_FIND_CHILDREN
) {
1676 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
1677 dd
->dd_phys
->dd_child_dir_zapobj
);
1678 zap_cursor_retrieve(&zc
, attr
) == 0;
1679 (void) zap_cursor_advance(&zc
)) {
1680 ASSERT(attr
->za_integer_length
== sizeof (uint64_t));
1681 ASSERT(attr
->za_num_integers
== 1);
1683 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
1684 err
= dmu_objset_find_spa(spa
, child
, func
, arg
, flags
);
1689 zap_cursor_fini(&zc
);
1692 dsl_dir_close(dd
, FTAG
);
1693 kmem_free(attr
, sizeof (zap_attribute_t
));
1699 * Iterate over all snapshots.
1701 if (flags
& DS_FIND_SNAPSHOTS
) {
1702 if (!dsl_pool_sync_context(dp
))
1703 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
1704 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
1705 if (!dsl_pool_sync_context(dp
))
1706 rw_exit(&dp
->dp_config_rwlock
);
1709 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
1710 dsl_dataset_rele(ds
, FTAG
);
1712 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
1713 zap_cursor_retrieve(&zc
, attr
) == 0;
1714 (void) zap_cursor_advance(&zc
)) {
1715 ASSERT(attr
->za_integer_length
==
1717 ASSERT(attr
->za_num_integers
== 1);
1719 child
= kmem_asprintf("%s@%s",
1720 name
, attr
->za_name
);
1721 err
= func(spa
, attr
->za_first_integer
,
1727 zap_cursor_fini(&zc
);
1731 dsl_dir_close(dd
, FTAG
);
1732 kmem_free(attr
, sizeof (zap_attribute_t
));
1738 * Apply to self if appropriate.
1740 err
= func(spa
, thisobj
, name
, arg
);
1746 dmu_objset_prefetch(const char *name
, void *arg
)
1750 if (dsl_dataset_hold(name
, FTAG
, &ds
))
1753 if (!BP_IS_HOLE(&ds
->ds_phys
->ds_bp
)) {
1754 mutex_enter(&ds
->ds_opening_lock
);
1755 if (ds
->ds_objset
== NULL
) {
1756 uint32_t aflags
= ARC_NOWAIT
| ARC_PREFETCH
;
1759 SET_BOOKMARK(&zb
, ds
->ds_object
, ZB_ROOT_OBJECT
,
1760 ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1762 (void) arc_read(NULL
, dsl_dataset_get_spa(ds
),
1763 &ds
->ds_phys
->ds_bp
, NULL
, NULL
,
1764 ZIO_PRIORITY_ASYNC_READ
,
1765 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
,
1768 mutex_exit(&ds
->ds_opening_lock
);
1771 dsl_dataset_rele(ds
, FTAG
);
1776 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
1778 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1779 os
->os_user_ptr
= user_ptr
;
1783 dmu_objset_get_user(objset_t
*os
)
1785 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1786 return (os
->os_user_ptr
);
1789 #if defined(_KERNEL) && defined(HAVE_SPL)
1790 EXPORT_SYMBOL(dmu_objset_zil
);
1791 EXPORT_SYMBOL(dmu_objset_pool
);
1792 EXPORT_SYMBOL(dmu_objset_ds
);
1793 EXPORT_SYMBOL(dmu_objset_type
);
1794 EXPORT_SYMBOL(dmu_objset_name
);
1795 EXPORT_SYMBOL(dmu_objset_hold
);
1796 EXPORT_SYMBOL(dmu_objset_own
);
1797 EXPORT_SYMBOL(dmu_objset_rele
);
1798 EXPORT_SYMBOL(dmu_objset_disown
);
1799 EXPORT_SYMBOL(dmu_objset_from_ds
);
1800 EXPORT_SYMBOL(dmu_objset_create
);
1801 EXPORT_SYMBOL(dmu_objset_clone
);
1802 EXPORT_SYMBOL(dmu_objset_destroy
);
1803 EXPORT_SYMBOL(dmu_objset_snapshot
);
1804 EXPORT_SYMBOL(dmu_objset_stats
);
1805 EXPORT_SYMBOL(dmu_objset_fast_stat
);
1806 EXPORT_SYMBOL(dmu_objset_spa
);
1807 EXPORT_SYMBOL(dmu_objset_space
);
1808 EXPORT_SYMBOL(dmu_objset_fsid_guid
);
1809 EXPORT_SYMBOL(dmu_objset_find
);
1810 EXPORT_SYMBOL(dmu_objset_find_spa
);
1811 EXPORT_SYMBOL(dmu_objset_prefetch
);
1812 EXPORT_SYMBOL(dmu_objset_byteswap
);
1813 EXPORT_SYMBOL(dmu_objset_evict_dbufs
);
1814 EXPORT_SYMBOL(dmu_objset_snap_cmtime
);
1816 EXPORT_SYMBOL(dmu_objset_sync
);
1817 EXPORT_SYMBOL(dmu_objset_is_dirty
);
1818 EXPORT_SYMBOL(dmu_objset_create_impl
);
1819 EXPORT_SYMBOL(dmu_objset_open_impl
);
1820 EXPORT_SYMBOL(dmu_objset_evict
);
1821 EXPORT_SYMBOL(dmu_objset_register_type
);
1822 EXPORT_SYMBOL(dmu_objset_do_userquota_updates
);
1823 EXPORT_SYMBOL(dmu_objset_userquota_get_ids
);
1824 EXPORT_SYMBOL(dmu_objset_userused_enabled
);
1825 EXPORT_SYMBOL(dmu_objset_userspace_upgrade
);
1826 EXPORT_SYMBOL(dmu_objset_userspace_present
);