4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/zfs_context.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/dsl_synctask.h>
39 #include <sys/dsl_deleg.h>
40 #include <sys/dnode.h>
43 #include <sys/dmu_tx.h>
46 #include <sys/dmu_impl.h>
47 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_onexit.h>
50 #include <sys/dsl_destroy.h>
53 * Needed to close a window in dnode_move() that allows the objset to be freed
54 * before it can be safely accessed.
61 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
71 dmu_objset_spa(objset_t
*os
)
77 dmu_objset_zil(objset_t
*os
)
83 dmu_objset_pool(objset_t
*os
)
87 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
88 return (ds
->ds_dir
->dd_pool
);
90 return (spa_get_dsl(os
->os_spa
));
94 dmu_objset_ds(objset_t
*os
)
96 return (os
->os_dsl_dataset
);
100 dmu_objset_type(objset_t
*os
)
102 return (os
->os_phys
->os_type
);
106 dmu_objset_name(objset_t
*os
, char *buf
)
108 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
112 dmu_objset_id(objset_t
*os
)
114 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
116 return (ds
? ds
->ds_object
: 0);
120 dmu_objset_syncprop(objset_t
*os
)
122 return (os
->os_sync
);
126 dmu_objset_logbias(objset_t
*os
)
128 return (os
->os_logbias
);
132 checksum_changed_cb(void *arg
, uint64_t newval
)
137 * Inheritance should have been done by now.
139 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
141 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
145 compression_changed_cb(void *arg
, uint64_t newval
)
150 * Inheritance and range checking should have been done by now.
152 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
154 os
->os_compress
= zio_compress_select(newval
, ZIO_COMPRESS_ON_VALUE
);
158 copies_changed_cb(void *arg
, uint64_t newval
)
163 * Inheritance and range checking should have been done by now.
166 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
168 os
->os_copies
= newval
;
172 dedup_changed_cb(void *arg
, uint64_t newval
)
175 spa_t
*spa
= os
->os_spa
;
176 enum zio_checksum checksum
;
179 * Inheritance should have been done by now.
181 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
183 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
185 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
186 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
190 primary_cache_changed_cb(void *arg
, uint64_t newval
)
195 * Inheritance and range checking should have been done by now.
197 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
198 newval
== ZFS_CACHE_METADATA
);
200 os
->os_primary_cache
= newval
;
204 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
209 * Inheritance and range checking should have been done by now.
211 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
212 newval
== ZFS_CACHE_METADATA
);
214 os
->os_secondary_cache
= newval
;
218 sync_changed_cb(void *arg
, uint64_t newval
)
223 * Inheritance and range checking should have been done by now.
225 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
226 newval
== ZFS_SYNC_DISABLED
);
228 os
->os_sync
= newval
;
230 zil_set_sync(os
->os_zil
, newval
);
234 redundant_metadata_changed_cb(void *arg
, uint64_t newval
)
239 * Inheritance and range checking should have been done by now.
241 ASSERT(newval
== ZFS_REDUNDANT_METADATA_ALL
||
242 newval
== ZFS_REDUNDANT_METADATA_MOST
);
244 os
->os_redundant_metadata
= newval
;
248 logbias_changed_cb(void *arg
, uint64_t newval
)
252 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
253 newval
== ZFS_LOGBIAS_THROUGHPUT
);
254 os
->os_logbias
= newval
;
256 zil_set_logbias(os
->os_zil
, newval
);
260 dmu_objset_byteswap(void *buf
, size_t size
)
262 objset_phys_t
*osp
= buf
;
264 ASSERT(size
== OBJSET_OLD_PHYS_SIZE
|| size
== sizeof (objset_phys_t
));
265 dnode_byteswap(&osp
->os_meta_dnode
);
266 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
267 osp
->os_type
= BSWAP_64(osp
->os_type
);
268 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
269 if (size
== sizeof (objset_phys_t
)) {
270 dnode_byteswap(&osp
->os_userused_dnode
);
271 dnode_byteswap(&osp
->os_groupused_dnode
);
276 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
282 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
284 os
= kmem_zalloc(sizeof (objset_t
), KM_SLEEP
);
285 os
->os_dsl_dataset
= ds
;
288 if (!BP_IS_HOLE(os
->os_rootbp
)) {
289 uint32_t aflags
= ARC_WAIT
;
291 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
292 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
294 if (DMU_OS_IS_L2CACHEABLE(os
))
295 aflags
|= ARC_L2CACHE
;
296 if (DMU_OS_IS_L2COMPRESSIBLE(os
))
297 aflags
|= ARC_L2COMPRESS
;
299 dprintf_bp(os
->os_rootbp
, "reading %s", "");
300 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
301 arc_getbuf_func
, &os
->os_phys_buf
,
302 ZIO_PRIORITY_SYNC_READ
, ZIO_FLAG_CANFAIL
, &aflags
, &zb
);
304 kmem_free(os
, sizeof (objset_t
));
305 /* convert checksum errors into IO errors */
307 err
= SET_ERROR(EIO
);
311 /* Increase the blocksize if we are permitted. */
312 if (spa_version(spa
) >= SPA_VERSION_USERSPACE
&&
313 arc_buf_size(os
->os_phys_buf
) < sizeof (objset_phys_t
)) {
314 arc_buf_t
*buf
= arc_buf_alloc(spa
,
315 sizeof (objset_phys_t
), &os
->os_phys_buf
,
317 bzero(buf
->b_data
, sizeof (objset_phys_t
));
318 bcopy(os
->os_phys_buf
->b_data
, buf
->b_data
,
319 arc_buf_size(os
->os_phys_buf
));
320 (void) arc_buf_remove_ref(os
->os_phys_buf
,
322 os
->os_phys_buf
= buf
;
325 os
->os_phys
= os
->os_phys_buf
->b_data
;
326 os
->os_flags
= os
->os_phys
->os_flags
;
328 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
329 sizeof (objset_phys_t
) : OBJSET_OLD_PHYS_SIZE
;
330 os
->os_phys_buf
= arc_buf_alloc(spa
, size
,
331 &os
->os_phys_buf
, ARC_BUFC_METADATA
);
332 os
->os_phys
= os
->os_phys_buf
->b_data
;
333 bzero(os
->os_phys
, size
);
337 * Note: the changed_cb will be called once before the register
338 * func returns, thus changing the checksum/compression from the
339 * default (fletcher2/off). Snapshots don't need to know about
340 * checksum/compression/copies.
343 err
= dsl_prop_register(ds
,
344 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE
),
345 primary_cache_changed_cb
, os
);
347 err
= dsl_prop_register(ds
,
348 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE
),
349 secondary_cache_changed_cb
, os
);
351 if (!ds
->ds_is_snapshot
) {
353 err
= dsl_prop_register(ds
,
354 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
355 checksum_changed_cb
, os
);
358 err
= dsl_prop_register(ds
,
359 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
360 compression_changed_cb
, os
);
363 err
= dsl_prop_register(ds
,
364 zfs_prop_to_name(ZFS_PROP_COPIES
),
365 copies_changed_cb
, os
);
368 err
= dsl_prop_register(ds
,
369 zfs_prop_to_name(ZFS_PROP_DEDUP
),
370 dedup_changed_cb
, os
);
373 err
= dsl_prop_register(ds
,
374 zfs_prop_to_name(ZFS_PROP_LOGBIAS
),
375 logbias_changed_cb
, os
);
378 err
= dsl_prop_register(ds
,
379 zfs_prop_to_name(ZFS_PROP_SYNC
),
380 sync_changed_cb
, os
);
383 err
= dsl_prop_register(ds
,
385 ZFS_PROP_REDUNDANT_METADATA
),
386 redundant_metadata_changed_cb
, os
);
390 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
,
392 kmem_free(os
, sizeof (objset_t
));
396 /* It's the meta-objset. */
397 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
398 os
->os_compress
= ZIO_COMPRESS_LZJB
;
399 os
->os_copies
= spa_max_replication(spa
);
400 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
401 os
->os_dedup_verify
= B_FALSE
;
402 os
->os_logbias
= ZFS_LOGBIAS_LATENCY
;
403 os
->os_sync
= ZFS_SYNC_STANDARD
;
404 os
->os_primary_cache
= ZFS_CACHE_ALL
;
405 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
408 if (ds
== NULL
|| !ds
->ds_is_snapshot
)
409 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
410 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
412 for (i
= 0; i
< TXG_SIZE
; i
++) {
413 list_create(&os
->os_dirty_dnodes
[i
], sizeof (dnode_t
),
414 offsetof(dnode_t
, dn_dirty_link
[i
]));
415 list_create(&os
->os_free_dnodes
[i
], sizeof (dnode_t
),
416 offsetof(dnode_t
, dn_dirty_link
[i
]));
418 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
419 offsetof(dnode_t
, dn_link
));
420 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
421 offsetof(dmu_buf_impl_t
, db_link
));
423 list_link_init(&os
->os_evicting_node
);
425 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
426 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
427 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
429 dnode_special_open(os
, &os
->os_phys
->os_meta_dnode
,
430 DMU_META_DNODE_OBJECT
, &os
->os_meta_dnode
);
431 if (arc_buf_size(os
->os_phys_buf
) >= sizeof (objset_phys_t
)) {
432 dnode_special_open(os
, &os
->os_phys
->os_userused_dnode
,
433 DMU_USERUSED_OBJECT
, &os
->os_userused_dnode
);
434 dnode_special_open(os
, &os
->os_phys
->os_groupused_dnode
,
435 DMU_GROUPUSED_OBJECT
, &os
->os_groupused_dnode
);
443 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
447 mutex_enter(&ds
->ds_opening_lock
);
448 if (ds
->ds_objset
== NULL
) {
450 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
451 ds
, dsl_dataset_get_blkptr(ds
), &os
);
454 mutex_enter(&ds
->ds_lock
);
455 ASSERT(ds
->ds_objset
== NULL
);
457 mutex_exit(&ds
->ds_lock
);
460 *osp
= ds
->ds_objset
;
461 mutex_exit(&ds
->ds_opening_lock
);
466 * Holds the pool while the objset is held. Therefore only one objset
467 * can be held at a time.
470 dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
)
476 err
= dsl_pool_hold(name
, tag
, &dp
);
479 err
= dsl_dataset_hold(dp
, name
, tag
, &ds
);
481 dsl_pool_rele(dp
, tag
);
485 err
= dmu_objset_from_ds(ds
, osp
);
487 dsl_dataset_rele(ds
, tag
);
488 dsl_pool_rele(dp
, tag
);
495 * dsl_pool must not be held when this is called.
496 * Upon successful return, there will be a longhold on the dataset,
497 * and the dsl_pool will not be held.
500 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
501 boolean_t readonly
, void *tag
, objset_t
**osp
)
507 err
= dsl_pool_hold(name
, FTAG
, &dp
);
510 err
= dsl_dataset_own(dp
, name
, tag
, &ds
);
512 dsl_pool_rele(dp
, FTAG
);
516 err
= dmu_objset_from_ds(ds
, osp
);
517 dsl_pool_rele(dp
, FTAG
);
519 dsl_dataset_disown(ds
, tag
);
520 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
521 dsl_dataset_disown(ds
, tag
);
522 return (SET_ERROR(EINVAL
));
523 } else if (!readonly
&& ds
->ds_is_snapshot
) {
524 dsl_dataset_disown(ds
, tag
);
525 return (SET_ERROR(EROFS
));
531 dmu_objset_rele(objset_t
*os
, void *tag
)
533 dsl_pool_t
*dp
= dmu_objset_pool(os
);
534 dsl_dataset_rele(os
->os_dsl_dataset
, tag
);
535 dsl_pool_rele(dp
, tag
);
539 * When we are called, os MUST refer to an objset associated with a dataset
540 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
541 * == tag. We will then release and reacquire ownership of the dataset while
542 * holding the pool config_rwlock to avoid intervening namespace or ownership
545 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
546 * release the hold on its dataset and acquire a new one on the dataset of the
547 * same name so that it can be partially torn down and reconstructed.
550 dmu_objset_refresh_ownership(objset_t
*os
, void *tag
)
553 dsl_dataset_t
*ds
, *newds
;
554 char name
[MAXNAMELEN
];
556 ds
= os
->os_dsl_dataset
;
557 VERIFY3P(ds
, !=, NULL
);
558 VERIFY3P(ds
->ds_owner
, ==, tag
);
559 VERIFY(dsl_dataset_long_held(ds
));
561 dsl_dataset_name(ds
, name
);
562 dp
= dmu_objset_pool(os
);
563 dsl_pool_config_enter(dp
, FTAG
);
564 dmu_objset_disown(os
, tag
);
565 VERIFY0(dsl_dataset_own(dp
, name
, tag
, &newds
));
566 VERIFY3P(newds
, ==, os
->os_dsl_dataset
);
567 dsl_pool_config_exit(dp
, FTAG
);
571 dmu_objset_disown(objset_t
*os
, void *tag
)
573 dsl_dataset_disown(os
->os_dsl_dataset
, tag
);
577 dmu_objset_evict_dbufs(objset_t
*os
)
582 dn_marker
= kmem_alloc(sizeof (dnode_t
), KM_SLEEP
);
584 mutex_enter(&os
->os_lock
);
585 dn
= list_head(&os
->os_dnodes
);
588 * Skip dnodes without holds. We have to do this dance
589 * because dnode_add_ref() only works if there is already a
590 * hold. If the dnode has no holds, then it has no dbufs.
592 if (dnode_add_ref(dn
, FTAG
)) {
593 list_insert_after(&os
->os_dnodes
, dn
, dn_marker
);
594 mutex_exit(&os
->os_lock
);
596 dnode_evict_dbufs(dn
);
597 dnode_rele(dn
, FTAG
);
599 mutex_enter(&os
->os_lock
);
600 dn
= list_next(&os
->os_dnodes
, dn_marker
);
601 list_remove(&os
->os_dnodes
, dn_marker
);
603 dn
= list_next(&os
->os_dnodes
, dn
);
606 mutex_exit(&os
->os_lock
);
608 kmem_free(dn_marker
, sizeof (dnode_t
));
610 if (DMU_USERUSED_DNODE(os
) != NULL
) {
611 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os
));
612 dnode_evict_dbufs(DMU_USERUSED_DNODE(os
));
614 dnode_evict_dbufs(DMU_META_DNODE(os
));
618 * Objset eviction processing is split into into two pieces.
619 * The first marks the objset as evicting, evicts any dbufs that
620 * have a refcount of zero, and then queues up the objset for the
621 * second phase of eviction. Once os->os_dnodes has been cleared by
622 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
623 * The second phase closes the special dnodes, dequeues the objset from
624 * the list of those undergoing eviction, and finally frees the objset.
626 * NOTE: Due to asynchronous eviction processing (invocation of
627 * dnode_buf_pageout()), it is possible for the meta dnode for the
628 * objset to have no holds even though os->os_dnodes is not empty.
631 dmu_objset_evict(objset_t
*os
)
635 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
637 for (t
= 0; t
< TXG_SIZE
; t
++)
638 ASSERT(!dmu_objset_is_dirty(os
, t
));
641 if (!ds
->ds_is_snapshot
) {
642 VERIFY0(dsl_prop_unregister(ds
,
643 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
644 checksum_changed_cb
, os
));
645 VERIFY0(dsl_prop_unregister(ds
,
646 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
647 compression_changed_cb
, os
));
648 VERIFY0(dsl_prop_unregister(ds
,
649 zfs_prop_to_name(ZFS_PROP_COPIES
),
650 copies_changed_cb
, os
));
651 VERIFY0(dsl_prop_unregister(ds
,
652 zfs_prop_to_name(ZFS_PROP_DEDUP
),
653 dedup_changed_cb
, os
));
654 VERIFY0(dsl_prop_unregister(ds
,
655 zfs_prop_to_name(ZFS_PROP_LOGBIAS
),
656 logbias_changed_cb
, os
));
657 VERIFY0(dsl_prop_unregister(ds
,
658 zfs_prop_to_name(ZFS_PROP_SYNC
),
659 sync_changed_cb
, os
));
660 VERIFY0(dsl_prop_unregister(ds
,
661 zfs_prop_to_name(ZFS_PROP_REDUNDANT_METADATA
),
662 redundant_metadata_changed_cb
, os
));
664 VERIFY0(dsl_prop_unregister(ds
,
665 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE
),
666 primary_cache_changed_cb
, os
));
667 VERIFY0(dsl_prop_unregister(ds
,
668 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE
),
669 secondary_cache_changed_cb
, os
));
675 os
->os_evicting
= B_TRUE
;
676 dmu_objset_evict_dbufs(os
);
678 mutex_enter(&os
->os_lock
);
679 spa_evicting_os_register(os
->os_spa
, os
);
680 if (list_is_empty(&os
->os_dnodes
)) {
681 mutex_exit(&os
->os_lock
);
682 dmu_objset_evict_done(os
);
684 mutex_exit(&os
->os_lock
);
689 dmu_objset_evict_done(objset_t
*os
)
691 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
693 dnode_special_close(&os
->os_meta_dnode
);
694 if (DMU_USERUSED_DNODE(os
)) {
695 dnode_special_close(&os
->os_userused_dnode
);
696 dnode_special_close(&os
->os_groupused_dnode
);
698 zil_free(os
->os_zil
);
700 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
, &os
->os_phys_buf
));
703 * This is a barrier to prevent the objset from going away in
704 * dnode_move() until we can safely ensure that the objset is still in
705 * use. We consider the objset valid before the barrier and invalid
708 rw_enter(&os_lock
, RW_READER
);
711 mutex_destroy(&os
->os_lock
);
712 mutex_destroy(&os
->os_obj_lock
);
713 mutex_destroy(&os
->os_user_ptr_lock
);
714 spa_evicting_os_deregister(os
->os_spa
, os
);
715 kmem_free(os
, sizeof (objset_t
));
719 dmu_objset_snap_cmtime(objset_t
*os
)
721 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
724 /* called from dsl for meta-objset */
726 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
727 dmu_objset_type_t type
, dmu_tx_t
*tx
)
732 ASSERT(dmu_tx_is_syncing(tx
));
735 VERIFY0(dmu_objset_from_ds(ds
, &os
));
737 VERIFY0(dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
739 mdn
= DMU_META_DNODE(os
);
741 dnode_allocate(mdn
, DMU_OT_DNODE
, 1 << DNODE_BLOCK_SHIFT
,
742 DN_MAX_INDBLKSHIFT
, DMU_OT_NONE
, 0, tx
);
745 * We don't want to have to increase the meta-dnode's nlevels
746 * later, because then we could do it in quescing context while
747 * we are also accessing it in open context.
749 * This precaution is not necessary for the MOS (ds == NULL),
750 * because the MOS is only updated in syncing context.
751 * This is most fortunate: the MOS is the only objset that
752 * needs to be synced multiple times as spa_sync() iterates
753 * to convergence, so minimizing its dn_nlevels matters.
759 * Determine the number of levels necessary for the meta-dnode
760 * to contain DN_MAX_OBJECT dnodes.
762 while ((uint64_t)mdn
->dn_nblkptr
<< (mdn
->dn_datablkshift
+
763 (levels
- 1) * (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
764 DN_MAX_OBJECT
* sizeof (dnode_phys_t
))
767 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
768 mdn
->dn_nlevels
= levels
;
771 ASSERT(type
!= DMU_OST_NONE
);
772 ASSERT(type
!= DMU_OST_ANY
);
773 ASSERT(type
< DMU_OST_NUMTYPES
);
774 os
->os_phys
->os_type
= type
;
775 if (dmu_objset_userused_enabled(os
)) {
776 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
777 os
->os_flags
= os
->os_phys
->os_flags
;
780 dsl_dataset_dirty(ds
, tx
);
785 typedef struct dmu_objset_create_arg
{
786 const char *doca_name
;
788 void (*doca_userfunc
)(objset_t
*os
, void *arg
,
789 cred_t
*cr
, dmu_tx_t
*tx
);
791 dmu_objset_type_t doca_type
;
793 } dmu_objset_create_arg_t
;
797 dmu_objset_create_check(void *arg
, dmu_tx_t
*tx
)
799 dmu_objset_create_arg_t
*doca
= arg
;
800 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
805 if (strchr(doca
->doca_name
, '@') != NULL
)
806 return (SET_ERROR(EINVAL
));
808 error
= dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
);
812 dsl_dir_rele(pdd
, FTAG
);
813 return (SET_ERROR(EEXIST
));
815 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
817 dsl_dir_rele(pdd
, FTAG
);
823 dmu_objset_create_sync(void *arg
, dmu_tx_t
*tx
)
825 dmu_objset_create_arg_t
*doca
= arg
;
826 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
834 VERIFY0(dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
));
836 obj
= dsl_dataset_create_sync(pdd
, tail
, NULL
, doca
->doca_flags
,
837 doca
->doca_cred
, tx
);
839 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
840 bp
= dsl_dataset_get_blkptr(ds
);
841 os
= dmu_objset_create_impl(pdd
->dd_pool
->dp_spa
,
842 ds
, bp
, doca
->doca_type
, tx
);
844 if (doca
->doca_userfunc
!= NULL
) {
845 doca
->doca_userfunc(os
, doca
->doca_userarg
,
846 doca
->doca_cred
, tx
);
849 spa_history_log_internal_ds(ds
, "create", tx
, "");
850 dsl_dataset_rele(ds
, FTAG
);
851 dsl_dir_rele(pdd
, FTAG
);
855 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
856 void (*func
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
), void *arg
)
858 dmu_objset_create_arg_t doca
;
860 doca
.doca_name
= name
;
861 doca
.doca_cred
= CRED();
862 doca
.doca_flags
= flags
;
863 doca
.doca_userfunc
= func
;
864 doca
.doca_userarg
= arg
;
865 doca
.doca_type
= type
;
867 return (dsl_sync_task(name
,
868 dmu_objset_create_check
, dmu_objset_create_sync
, &doca
, 5));
871 typedef struct dmu_objset_clone_arg
{
872 const char *doca_clone
;
873 const char *doca_origin
;
875 } dmu_objset_clone_arg_t
;
879 dmu_objset_clone_check(void *arg
, dmu_tx_t
*tx
)
881 dmu_objset_clone_arg_t
*doca
= arg
;
885 dsl_dataset_t
*origin
;
886 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
888 if (strchr(doca
->doca_clone
, '@') != NULL
)
889 return (SET_ERROR(EINVAL
));
891 error
= dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
);
895 dsl_dir_rele(pdd
, FTAG
);
896 return (SET_ERROR(EEXIST
));
898 /* You can't clone across pools. */
899 if (pdd
->dd_pool
!= dp
) {
900 dsl_dir_rele(pdd
, FTAG
);
901 return (SET_ERROR(EXDEV
));
903 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
906 dsl_dir_rele(pdd
, FTAG
);
907 return (SET_ERROR(EDQUOT
));
909 dsl_dir_rele(pdd
, FTAG
);
911 error
= dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
);
915 /* You can't clone across pools. */
916 if (origin
->ds_dir
->dd_pool
!= dp
) {
917 dsl_dataset_rele(origin
, FTAG
);
918 return (SET_ERROR(EXDEV
));
921 /* You can only clone snapshots, not the head datasets. */
922 if (!origin
->ds_is_snapshot
) {
923 dsl_dataset_rele(origin
, FTAG
);
924 return (SET_ERROR(EINVAL
));
926 dsl_dataset_rele(origin
, FTAG
);
932 dmu_objset_clone_sync(void *arg
, dmu_tx_t
*tx
)
934 dmu_objset_clone_arg_t
*doca
= arg
;
935 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
938 dsl_dataset_t
*origin
, *ds
;
940 char namebuf
[MAXNAMELEN
];
942 VERIFY0(dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
));
943 VERIFY0(dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
));
945 obj
= dsl_dataset_create_sync(pdd
, tail
, origin
, 0,
946 doca
->doca_cred
, tx
);
948 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
949 dsl_dataset_name(origin
, namebuf
);
950 spa_history_log_internal_ds(ds
, "clone", tx
,
951 "origin=%s (%llu)", namebuf
, origin
->ds_object
);
952 dsl_dataset_rele(ds
, FTAG
);
953 dsl_dataset_rele(origin
, FTAG
);
954 dsl_dir_rele(pdd
, FTAG
);
958 dmu_objset_clone(const char *clone
, const char *origin
)
960 dmu_objset_clone_arg_t doca
;
962 doca
.doca_clone
= clone
;
963 doca
.doca_origin
= origin
;
964 doca
.doca_cred
= CRED();
966 return (dsl_sync_task(clone
,
967 dmu_objset_clone_check
, dmu_objset_clone_sync
, &doca
, 5));
971 dmu_objset_snapshot_one(const char *fsname
, const char *snapname
)
974 char *longsnap
= kmem_asprintf("%s@%s", fsname
, snapname
);
975 nvlist_t
*snaps
= fnvlist_alloc();
977 fnvlist_add_boolean(snaps
, longsnap
);
979 err
= dsl_dataset_snapshot(snaps
, NULL
, NULL
);
985 dmu_objset_sync_dnodes(list_t
*list
, list_t
*newlist
, dmu_tx_t
*tx
)
989 while ((dn
= list_head(list
))) {
990 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
991 ASSERT(dn
->dn_dbuf
->db_data_pending
);
993 * Initialize dn_zio outside dnode_sync() because the
994 * meta-dnode needs to set it ouside dnode_sync().
996 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
999 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1000 list_remove(list
, dn
);
1003 (void) dnode_add_ref(dn
, newlist
);
1004 list_insert_tail(newlist
, dn
);
1013 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1017 blkptr_t
*bp
= zio
->io_bp
;
1019 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1021 ASSERT(!BP_IS_EMBEDDED(bp
));
1022 ASSERT3P(bp
, ==, os
->os_rootbp
);
1023 ASSERT3U(BP_GET_TYPE(bp
), ==, DMU_OT_OBJSET
);
1024 ASSERT0(BP_GET_LEVEL(bp
));
1027 * Update rootbp fill count: it should be the number of objects
1028 * allocated in the object set (not counting the "special"
1029 * objects that are stored in the objset_phys_t -- the meta
1030 * dnode and user/group accounting objects).
1033 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1034 bp
->blk_fill
+= BP_GET_FILL(&dnp
->dn_blkptr
[i
]);
1039 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1041 blkptr_t
*bp
= zio
->io_bp
;
1042 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1045 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1046 ASSERT(BP_EQUAL(bp
, bp_orig
));
1048 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1049 dmu_tx_t
*tx
= os
->os_synctx
;
1051 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1052 dsl_dataset_block_born(ds
, bp
, tx
);
1056 /* called from dsl */
1058 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1061 zbookmark_phys_t zb
;
1065 list_t
*newlist
= NULL
;
1066 dbuf_dirty_record_t
*dr
;
1068 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", tx
->tx_txg
);
1070 ASSERT(dmu_tx_is_syncing(tx
));
1071 /* XXX the write_done callback should really give us the tx... */
1074 if (os
->os_dsl_dataset
== NULL
) {
1076 * This is the MOS. If we have upgraded,
1077 * spa_max_replication() could change, so reset
1080 os
->os_copies
= spa_max_replication(os
->os_spa
);
1084 * Create the root block IO
1086 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1087 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1088 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1089 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1091 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1093 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1094 os
->os_rootbp
, os
->os_phys_buf
, DMU_OS_IS_L2CACHEABLE(os
),
1095 DMU_OS_IS_L2COMPRESSIBLE(os
), &zp
, dmu_objset_write_ready
,
1096 NULL
, dmu_objset_write_done
, os
, ZIO_PRIORITY_ASYNC_WRITE
,
1097 ZIO_FLAG_MUSTSUCCEED
, &zb
);
1100 * Sync special dnodes - the parent IO for the sync is the root block
1102 DMU_META_DNODE(os
)->dn_zio
= zio
;
1103 dnode_sync(DMU_META_DNODE(os
), tx
);
1105 os
->os_phys
->os_flags
= os
->os_flags
;
1107 if (DMU_USERUSED_DNODE(os
) &&
1108 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1109 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1110 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1111 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1112 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1115 txgoff
= tx
->tx_txg
& TXG_MASK
;
1117 if (dmu_objset_userused_enabled(os
)) {
1118 newlist
= &os
->os_synced_dnodes
;
1120 * We must create the list here because it uses the
1121 * dn_dirty_link[] of this txg.
1123 list_create(newlist
, sizeof (dnode_t
),
1124 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1127 dmu_objset_sync_dnodes(&os
->os_free_dnodes
[txgoff
], newlist
, tx
);
1128 dmu_objset_sync_dnodes(&os
->os_dirty_dnodes
[txgoff
], newlist
, tx
);
1130 list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1131 while ((dr
= list_head(list
))) {
1132 ASSERT0(dr
->dr_dbuf
->db_level
);
1133 list_remove(list
, dr
);
1135 zio_nowait(dr
->dr_zio
);
1138 * Free intent log blocks up to this tx.
1140 zil_sync(os
->os_zil
, tx
);
1141 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1146 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1148 return (!list_is_empty(&os
->os_dirty_dnodes
[txg
& TXG_MASK
]) ||
1149 !list_is_empty(&os
->os_free_dnodes
[txg
& TXG_MASK
]));
1152 static objset_used_cb_t
*used_cbs
[DMU_OST_NUMTYPES
];
1155 dmu_objset_register_type(dmu_objset_type_t ost
, objset_used_cb_t
*cb
)
1161 dmu_objset_userused_enabled(objset_t
*os
)
1163 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1164 used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1165 DMU_USERUSED_DNODE(os
) != NULL
);
1169 do_userquota_update(objset_t
*os
, uint64_t used
, uint64_t flags
,
1170 uint64_t user
, uint64_t group
, boolean_t subtract
, dmu_tx_t
*tx
)
1172 if ((flags
& DNODE_FLAG_USERUSED_ACCOUNTED
)) {
1173 int64_t delta
= DNODE_SIZE
+ used
;
1176 VERIFY3U(0, ==, zap_increment_int(os
, DMU_USERUSED_OBJECT
,
1178 VERIFY3U(0, ==, zap_increment_int(os
, DMU_GROUPUSED_OBJECT
,
1184 dmu_objset_do_userquota_updates(objset_t
*os
, dmu_tx_t
*tx
)
1187 list_t
*list
= &os
->os_synced_dnodes
;
1189 ASSERT(list_head(list
) == NULL
|| dmu_objset_userused_enabled(os
));
1191 while ((dn
= list_head(list
))) {
1193 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
1194 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
1195 dn
->dn_phys
->dn_flags
&
1196 DNODE_FLAG_USERUSED_ACCOUNTED
);
1198 /* Allocate the user/groupused objects if necessary. */
1199 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1200 VERIFY(0 == zap_create_claim(os
,
1201 DMU_USERUSED_OBJECT
,
1202 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1203 VERIFY(0 == zap_create_claim(os
,
1204 DMU_GROUPUSED_OBJECT
,
1205 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1209 * We intentionally modify the zap object even if the
1210 * net delta is zero. Otherwise
1211 * the block of the zap obj could be shared between
1212 * datasets but need to be different between them after
1216 flags
= dn
->dn_id_flags
;
1218 if (flags
& DN_ID_OLD_EXIST
) {
1219 do_userquota_update(os
, dn
->dn_oldused
, dn
->dn_oldflags
,
1220 dn
->dn_olduid
, dn
->dn_oldgid
, B_TRUE
, tx
);
1222 if (flags
& DN_ID_NEW_EXIST
) {
1223 do_userquota_update(os
, DN_USED_BYTES(dn
->dn_phys
),
1224 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
,
1225 dn
->dn_newgid
, B_FALSE
, tx
);
1228 mutex_enter(&dn
->dn_mtx
);
1230 dn
->dn_oldflags
= 0;
1231 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
1232 dn
->dn_olduid
= dn
->dn_newuid
;
1233 dn
->dn_oldgid
= dn
->dn_newgid
;
1234 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1235 if (dn
->dn_bonuslen
== 0)
1236 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1238 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1240 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
1241 mutex_exit(&dn
->dn_mtx
);
1243 list_remove(list
, dn
);
1244 dnode_rele(dn
, list
);
1249 * Returns a pointer to data to find uid/gid from
1251 * If a dirty record for transaction group that is syncing can't
1252 * be found then NULL is returned. In the NULL case it is assumed
1253 * the uid/gid aren't changing.
1256 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1258 dbuf_dirty_record_t
*dr
, **drp
;
1261 if (db
->db_dirtycnt
== 0)
1262 return (db
->db
.db_data
); /* Nothing is changing */
1264 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1265 if (dr
->dr_txg
== tx
->tx_txg
)
1273 DB_DNODE_ENTER(dr
->dr_dbuf
);
1274 dn
= DB_DNODE(dr
->dr_dbuf
);
1276 if (dn
->dn_bonuslen
== 0 &&
1277 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
1278 data
= dr
->dt
.dl
.dr_data
->b_data
;
1280 data
= dr
->dt
.dl
.dr_data
;
1282 DB_DNODE_EXIT(dr
->dr_dbuf
);
1289 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
1291 objset_t
*os
= dn
->dn_objset
;
1293 dmu_buf_impl_t
*db
= NULL
;
1294 uint64_t *user
= NULL
;
1295 uint64_t *group
= NULL
;
1296 int flags
= dn
->dn_id_flags
;
1298 boolean_t have_spill
= B_FALSE
;
1300 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
1303 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
1304 DN_ID_CHKED_SPILL
)))
1307 if (before
&& dn
->dn_bonuslen
!= 0)
1308 data
= DN_BONUS(dn
->dn_phys
);
1309 else if (!before
&& dn
->dn_bonuslen
!= 0) {
1312 mutex_enter(&db
->db_mtx
);
1313 data
= dmu_objset_userquota_find_data(db
, tx
);
1315 data
= DN_BONUS(dn
->dn_phys
);
1317 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
1320 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
1321 rf
|= DB_RF_HAVESTRUCT
;
1322 error
= dmu_spill_hold_by_dnode(dn
,
1323 rf
| DB_RF_MUST_SUCCEED
,
1324 FTAG
, (dmu_buf_t
**)&db
);
1326 mutex_enter(&db
->db_mtx
);
1327 data
= (before
) ? db
->db
.db_data
:
1328 dmu_objset_userquota_find_data(db
, tx
);
1329 have_spill
= B_TRUE
;
1331 mutex_enter(&dn
->dn_mtx
);
1332 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1333 mutex_exit(&dn
->dn_mtx
);
1339 user
= &dn
->dn_olduid
;
1340 group
= &dn
->dn_oldgid
;
1342 user
= &dn
->dn_newuid
;
1343 group
= &dn
->dn_newgid
;
1347 * Must always call the callback in case the object
1348 * type has changed and that type isn't an object type to track
1350 error
= used_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
,
1354 * Preserve existing uid/gid when the callback can't determine
1355 * what the new uid/gid are and the callback returned EEXIST.
1356 * The EEXIST error tells us to just use the existing uid/gid.
1357 * If we don't know what the old values are then just assign
1358 * them to 0, since that is a new file being created.
1360 if (!before
&& data
== NULL
&& error
== EEXIST
) {
1361 if (flags
& DN_ID_OLD_EXIST
) {
1362 dn
->dn_newuid
= dn
->dn_olduid
;
1363 dn
->dn_newgid
= dn
->dn_oldgid
;
1372 mutex_exit(&db
->db_mtx
);
1374 mutex_enter(&dn
->dn_mtx
);
1375 if (error
== 0 && before
)
1376 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1377 if (error
== 0 && !before
)
1378 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
1381 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1383 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1385 mutex_exit(&dn
->dn_mtx
);
1387 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
1391 dmu_objset_userspace_present(objset_t
*os
)
1393 return (os
->os_phys
->os_flags
&
1394 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1398 dmu_objset_userspace_upgrade(objset_t
*os
)
1403 if (dmu_objset_userspace_present(os
))
1405 if (!dmu_objset_userused_enabled(os
))
1406 return (SET_ERROR(ENOTSUP
));
1407 if (dmu_objset_is_snapshot(os
))
1408 return (SET_ERROR(EINVAL
));
1411 * We simply need to mark every object dirty, so that it will be
1412 * synced out and now accounted. If this is called
1413 * concurrently, or if we already did some work before crashing,
1414 * that's fine, since we track each object's accounted state
1418 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
1423 if (issig(JUSTLOOKING
) && issig(FORREAL
))
1424 return (SET_ERROR(EINTR
));
1426 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
1429 tx
= dmu_tx_create(os
);
1430 dmu_tx_hold_bonus(tx
, obj
);
1431 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
1436 dmu_buf_will_dirty(db
, tx
);
1437 dmu_buf_rele(db
, FTAG
);
1441 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1442 txg_wait_synced(dmu_objset_pool(os
), 0);
1447 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
1448 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1450 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
1451 usedobjsp
, availobjsp
);
1455 dmu_objset_fsid_guid(objset_t
*os
)
1457 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
1461 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
1463 stat
->dds_type
= os
->os_phys
->os_type
;
1464 if (os
->os_dsl_dataset
)
1465 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
1469 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
1471 ASSERT(os
->os_dsl_dataset
||
1472 os
->os_phys
->os_type
== DMU_OST_META
);
1474 if (os
->os_dsl_dataset
!= NULL
)
1475 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
1477 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
1478 os
->os_phys
->os_type
);
1479 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
1480 dmu_objset_userspace_present(os
));
1484 dmu_objset_is_snapshot(objset_t
*os
)
1486 if (os
->os_dsl_dataset
!= NULL
)
1487 return (os
->os_dsl_dataset
->ds_is_snapshot
);
1493 dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
, int maxlen
,
1494 boolean_t
*conflict
)
1496 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1499 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
1500 return (SET_ERROR(ENOENT
));
1502 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1503 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, name
, 8, 1, &ignored
,
1504 MT_FIRST
, real
, maxlen
, conflict
));
1508 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
1509 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
1511 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1512 zap_cursor_t cursor
;
1513 zap_attribute_t attr
;
1515 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
1517 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
1518 return (SET_ERROR(ENOENT
));
1520 zap_cursor_init_serialized(&cursor
,
1521 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1522 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, *offp
);
1524 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1525 zap_cursor_fini(&cursor
);
1526 return (SET_ERROR(ENOENT
));
1529 if (strlen(attr
.za_name
) + 1 > namelen
) {
1530 zap_cursor_fini(&cursor
);
1531 return (SET_ERROR(ENAMETOOLONG
));
1534 (void) strcpy(name
, attr
.za_name
);
1536 *idp
= attr
.za_first_integer
;
1538 *case_conflict
= attr
.za_normalization_conflict
;
1539 zap_cursor_advance(&cursor
);
1540 *offp
= zap_cursor_serialize(&cursor
);
1541 zap_cursor_fini(&cursor
);
1547 dmu_snapshot_lookup(objset_t
*os
, const char *name
, uint64_t *value
)
1549 return (dsl_dataset_snap_lookup(os
->os_dsl_dataset
, name
, value
));
1553 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
1554 uint64_t *idp
, uint64_t *offp
)
1556 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
1557 zap_cursor_t cursor
;
1558 zap_attribute_t attr
;
1560 /* there is no next dir on a snapshot! */
1561 if (os
->os_dsl_dataset
->ds_object
!=
1562 dsl_dir_phys(dd
)->dd_head_dataset_obj
)
1563 return (SET_ERROR(ENOENT
));
1565 zap_cursor_init_serialized(&cursor
,
1566 dd
->dd_pool
->dp_meta_objset
,
1567 dsl_dir_phys(dd
)->dd_child_dir_zapobj
, *offp
);
1569 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1570 zap_cursor_fini(&cursor
);
1571 return (SET_ERROR(ENOENT
));
1574 if (strlen(attr
.za_name
) + 1 > namelen
) {
1575 zap_cursor_fini(&cursor
);
1576 return (SET_ERROR(ENAMETOOLONG
));
1579 (void) strcpy(name
, attr
.za_name
);
1581 *idp
= attr
.za_first_integer
;
1582 zap_cursor_advance(&cursor
);
1583 *offp
= zap_cursor_serialize(&cursor
);
1584 zap_cursor_fini(&cursor
);
1590 * Find objsets under and including ddobj, call func(ds) on each.
1593 dmu_objset_find_dp(dsl_pool_t
*dp
, uint64_t ddobj
,
1594 int func(dsl_pool_t
*, dsl_dataset_t
*, void *), void *arg
, int flags
)
1599 zap_attribute_t
*attr
;
1603 ASSERT(dsl_pool_config_held(dp
));
1605 err
= dsl_dir_hold_obj(dp
, ddobj
, NULL
, FTAG
, &dd
);
1609 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1610 if (dd
->dd_myname
[0] == '$') {
1611 dsl_dir_rele(dd
, FTAG
);
1615 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
1616 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
1619 * Iterate over all children.
1621 if (flags
& DS_FIND_CHILDREN
) {
1622 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
1623 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
1624 zap_cursor_retrieve(&zc
, attr
) == 0;
1625 (void) zap_cursor_advance(&zc
)) {
1626 ASSERT3U(attr
->za_integer_length
, ==,
1628 ASSERT3U(attr
->za_num_integers
, ==, 1);
1630 err
= dmu_objset_find_dp(dp
, attr
->za_first_integer
,
1635 zap_cursor_fini(&zc
);
1638 dsl_dir_rele(dd
, FTAG
);
1639 kmem_free(attr
, sizeof (zap_attribute_t
));
1645 * Iterate over all snapshots.
1647 if (flags
& DS_FIND_SNAPSHOTS
) {
1649 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
1654 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
1655 dsl_dataset_rele(ds
, FTAG
);
1657 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
1658 zap_cursor_retrieve(&zc
, attr
) == 0;
1659 (void) zap_cursor_advance(&zc
)) {
1660 ASSERT3U(attr
->za_integer_length
, ==,
1662 ASSERT3U(attr
->za_num_integers
, ==, 1);
1664 err
= dsl_dataset_hold_obj(dp
,
1665 attr
->za_first_integer
, FTAG
, &ds
);
1668 err
= func(dp
, ds
, arg
);
1669 dsl_dataset_rele(ds
, FTAG
);
1673 zap_cursor_fini(&zc
);
1677 dsl_dir_rele(dd
, FTAG
);
1678 kmem_free(attr
, sizeof (zap_attribute_t
));
1686 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
1689 err
= func(dp
, ds
, arg
);
1690 dsl_dataset_rele(ds
, FTAG
);
1695 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1696 * The dp_config_rwlock must not be held when this is called, and it
1697 * will not be held when the callback is called.
1698 * Therefore this function should only be used when the pool is not changing
1699 * (e.g. in syncing context), or the callback can deal with the possible races.
1702 dmu_objset_find_impl(spa_t
*spa
, const char *name
,
1703 int func(const char *, void *), void *arg
, int flags
)
1706 dsl_pool_t
*dp
= spa_get_dsl(spa
);
1709 zap_attribute_t
*attr
;
1714 dsl_pool_config_enter(dp
, FTAG
);
1716 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, NULL
);
1718 dsl_pool_config_exit(dp
, FTAG
);
1722 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1723 if (dd
->dd_myname
[0] == '$') {
1724 dsl_dir_rele(dd
, FTAG
);
1725 dsl_pool_config_exit(dp
, FTAG
);
1729 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
1730 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
1733 * Iterate over all children.
1735 if (flags
& DS_FIND_CHILDREN
) {
1736 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
1737 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
1738 zap_cursor_retrieve(&zc
, attr
) == 0;
1739 (void) zap_cursor_advance(&zc
)) {
1740 ASSERT3U(attr
->za_integer_length
, ==,
1742 ASSERT3U(attr
->za_num_integers
, ==, 1);
1744 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
1745 dsl_pool_config_exit(dp
, FTAG
);
1746 err
= dmu_objset_find_impl(spa
, child
,
1748 dsl_pool_config_enter(dp
, FTAG
);
1753 zap_cursor_fini(&zc
);
1756 dsl_dir_rele(dd
, FTAG
);
1757 dsl_pool_config_exit(dp
, FTAG
);
1758 kmem_free(attr
, sizeof (zap_attribute_t
));
1764 * Iterate over all snapshots.
1766 if (flags
& DS_FIND_SNAPSHOTS
) {
1767 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
1772 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
1773 dsl_dataset_rele(ds
, FTAG
);
1775 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
1776 zap_cursor_retrieve(&zc
, attr
) == 0;
1777 (void) zap_cursor_advance(&zc
)) {
1778 ASSERT3U(attr
->za_integer_length
, ==,
1780 ASSERT3U(attr
->za_num_integers
, ==, 1);
1782 child
= kmem_asprintf("%s@%s",
1783 name
, attr
->za_name
);
1784 dsl_pool_config_exit(dp
, FTAG
);
1785 err
= func(child
, arg
);
1786 dsl_pool_config_enter(dp
, FTAG
);
1791 zap_cursor_fini(&zc
);
1795 dsl_dir_rele(dd
, FTAG
);
1796 kmem_free(attr
, sizeof (zap_attribute_t
));
1797 dsl_pool_config_exit(dp
, FTAG
);
1802 /* Apply to self. */
1803 return (func(name
, arg
));
1807 * See comment above dmu_objset_find_impl().
1810 dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
1816 error
= spa_open(name
, &spa
, FTAG
);
1819 error
= dmu_objset_find_impl(spa
, name
, func
, arg
, flags
);
1820 spa_close(spa
, FTAG
);
1825 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
1827 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1828 os
->os_user_ptr
= user_ptr
;
1832 dmu_objset_get_user(objset_t
*os
)
1834 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1835 return (os
->os_user_ptr
);
1839 * Determine name of filesystem, given name of snapshot.
1840 * buf must be at least MAXNAMELEN bytes
1843 dmu_fsname(const char *snapname
, char *buf
)
1845 char *atp
= strchr(snapname
, '@');
1847 return (SET_ERROR(EINVAL
));
1848 if (atp
- snapname
>= MAXNAMELEN
)
1849 return (SET_ERROR(ENAMETOOLONG
));
1850 (void) strlcpy(buf
, snapname
, atp
- snapname
+ 1);
1854 #if defined(_KERNEL) && defined(HAVE_SPL)
1855 EXPORT_SYMBOL(dmu_objset_zil
);
1856 EXPORT_SYMBOL(dmu_objset_pool
);
1857 EXPORT_SYMBOL(dmu_objset_ds
);
1858 EXPORT_SYMBOL(dmu_objset_type
);
1859 EXPORT_SYMBOL(dmu_objset_name
);
1860 EXPORT_SYMBOL(dmu_objset_hold
);
1861 EXPORT_SYMBOL(dmu_objset_own
);
1862 EXPORT_SYMBOL(dmu_objset_rele
);
1863 EXPORT_SYMBOL(dmu_objset_disown
);
1864 EXPORT_SYMBOL(dmu_objset_from_ds
);
1865 EXPORT_SYMBOL(dmu_objset_create
);
1866 EXPORT_SYMBOL(dmu_objset_clone
);
1867 EXPORT_SYMBOL(dmu_objset_stats
);
1868 EXPORT_SYMBOL(dmu_objset_fast_stat
);
1869 EXPORT_SYMBOL(dmu_objset_spa
);
1870 EXPORT_SYMBOL(dmu_objset_space
);
1871 EXPORT_SYMBOL(dmu_objset_fsid_guid
);
1872 EXPORT_SYMBOL(dmu_objset_find
);
1873 EXPORT_SYMBOL(dmu_objset_byteswap
);
1874 EXPORT_SYMBOL(dmu_objset_evict_dbufs
);
1875 EXPORT_SYMBOL(dmu_objset_snap_cmtime
);
1877 EXPORT_SYMBOL(dmu_objset_sync
);
1878 EXPORT_SYMBOL(dmu_objset_is_dirty
);
1879 EXPORT_SYMBOL(dmu_objset_create_impl
);
1880 EXPORT_SYMBOL(dmu_objset_open_impl
);
1881 EXPORT_SYMBOL(dmu_objset_evict
);
1882 EXPORT_SYMBOL(dmu_objset_register_type
);
1883 EXPORT_SYMBOL(dmu_objset_do_userquota_updates
);
1884 EXPORT_SYMBOL(dmu_objset_userquota_get_ids
);
1885 EXPORT_SYMBOL(dmu_objset_userused_enabled
);
1886 EXPORT_SYMBOL(dmu_objset_userspace_upgrade
);
1887 EXPORT_SYMBOL(dmu_objset_userspace_present
);