4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/zfs_context.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dsl_deleg.h>
37 #include <sys/dnode.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/dmu_impl.h>
44 #include <sys/zfs_ioctl.h>
46 #include <sys/zfs_onexit.h>
49 * Needed to close a window in dnode_move() that allows the objset to be freed
50 * before it can be safely accessed.
57 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
67 dmu_objset_spa(objset_t
*os
)
73 dmu_objset_zil(objset_t
*os
)
79 dmu_objset_pool(objset_t
*os
)
83 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
84 return (ds
->ds_dir
->dd_pool
);
86 return (spa_get_dsl(os
->os_spa
));
90 dmu_objset_ds(objset_t
*os
)
92 return (os
->os_dsl_dataset
);
96 dmu_objset_type(objset_t
*os
)
98 return (os
->os_phys
->os_type
);
102 dmu_objset_name(objset_t
*os
, char *buf
)
104 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
108 dmu_objset_id(objset_t
*os
)
110 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
112 return (ds
? ds
->ds_object
: 0);
116 dmu_objset_syncprop(objset_t
*os
)
118 return (os
->os_sync
);
122 dmu_objset_logbias(objset_t
*os
)
124 return (os
->os_logbias
);
128 checksum_changed_cb(void *arg
, uint64_t newval
)
133 * Inheritance should have been done by now.
135 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
137 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
141 compression_changed_cb(void *arg
, uint64_t newval
)
146 * Inheritance and range checking should have been done by now.
148 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
150 os
->os_compress
= zio_compress_select(newval
, ZIO_COMPRESS_ON_VALUE
);
154 copies_changed_cb(void *arg
, uint64_t newval
)
159 * Inheritance and range checking should have been done by now.
162 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
164 os
->os_copies
= newval
;
168 dedup_changed_cb(void *arg
, uint64_t newval
)
171 spa_t
*spa
= os
->os_spa
;
172 enum zio_checksum checksum
;
175 * Inheritance should have been done by now.
177 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
179 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
181 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
182 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
186 primary_cache_changed_cb(void *arg
, uint64_t newval
)
191 * Inheritance and range checking should have been done by now.
193 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
194 newval
== ZFS_CACHE_METADATA
);
196 os
->os_primary_cache
= newval
;
200 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
205 * Inheritance and range checking should have been done by now.
207 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
208 newval
== ZFS_CACHE_METADATA
);
210 os
->os_secondary_cache
= newval
;
214 sync_changed_cb(void *arg
, uint64_t newval
)
219 * Inheritance and range checking should have been done by now.
221 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
222 newval
== ZFS_SYNC_DISABLED
);
224 os
->os_sync
= newval
;
226 zil_set_sync(os
->os_zil
, newval
);
230 logbias_changed_cb(void *arg
, uint64_t newval
)
234 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
235 newval
== ZFS_LOGBIAS_THROUGHPUT
);
236 os
->os_logbias
= newval
;
238 zil_set_logbias(os
->os_zil
, newval
);
242 dmu_objset_byteswap(void *buf
, size_t size
)
244 objset_phys_t
*osp
= buf
;
246 ASSERT(size
== OBJSET_OLD_PHYS_SIZE
|| size
== sizeof (objset_phys_t
));
247 dnode_byteswap(&osp
->os_meta_dnode
);
248 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
249 osp
->os_type
= BSWAP_64(osp
->os_type
);
250 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
251 if (size
== sizeof (objset_phys_t
)) {
252 dnode_byteswap(&osp
->os_userused_dnode
);
253 dnode_byteswap(&osp
->os_groupused_dnode
);
258 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
264 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
266 os
= kmem_zalloc(sizeof (objset_t
), KM_PUSHPAGE
);
267 os
->os_dsl_dataset
= ds
;
270 if (!BP_IS_HOLE(os
->os_rootbp
)) {
271 uint32_t aflags
= ARC_WAIT
;
273 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
274 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
276 if (DMU_OS_IS_L2CACHEABLE(os
))
277 aflags
|= ARC_L2CACHE
;
278 if (DMU_OS_IS_L2COMPRESSIBLE(os
))
279 aflags
|= ARC_L2COMPRESS
;
281 dprintf_bp(os
->os_rootbp
, "reading %s", "");
282 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
283 arc_getbuf_func
, &os
->os_phys_buf
,
284 ZIO_PRIORITY_SYNC_READ
, ZIO_FLAG_CANFAIL
, &aflags
, &zb
);
286 kmem_free(os
, sizeof (objset_t
));
287 /* convert checksum errors into IO errors */
293 /* Increase the blocksize if we are permitted. */
294 if (spa_version(spa
) >= SPA_VERSION_USERSPACE
&&
295 arc_buf_size(os
->os_phys_buf
) < sizeof (objset_phys_t
)) {
296 arc_buf_t
*buf
= arc_buf_alloc(spa
,
297 sizeof (objset_phys_t
), &os
->os_phys_buf
,
299 bzero(buf
->b_data
, sizeof (objset_phys_t
));
300 bcopy(os
->os_phys_buf
->b_data
, buf
->b_data
,
301 arc_buf_size(os
->os_phys_buf
));
302 (void) arc_buf_remove_ref(os
->os_phys_buf
,
304 os
->os_phys_buf
= buf
;
307 os
->os_phys
= os
->os_phys_buf
->b_data
;
308 os
->os_flags
= os
->os_phys
->os_flags
;
310 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
311 sizeof (objset_phys_t
) : OBJSET_OLD_PHYS_SIZE
;
312 os
->os_phys_buf
= arc_buf_alloc(spa
, size
,
313 &os
->os_phys_buf
, ARC_BUFC_METADATA
);
314 os
->os_phys
= os
->os_phys_buf
->b_data
;
315 bzero(os
->os_phys
, size
);
319 * Note: the changed_cb will be called once before the register
320 * func returns, thus changing the checksum/compression from the
321 * default (fletcher2/off). Snapshots don't need to know about
322 * checksum/compression/copies.
325 err
= dsl_prop_register(ds
, "primarycache",
326 primary_cache_changed_cb
, os
);
328 err
= dsl_prop_register(ds
, "secondarycache",
329 secondary_cache_changed_cb
, os
);
330 if (!dsl_dataset_is_snapshot(ds
)) {
332 err
= dsl_prop_register(ds
, "checksum",
333 checksum_changed_cb
, os
);
335 err
= dsl_prop_register(ds
, "compression",
336 compression_changed_cb
, os
);
338 err
= dsl_prop_register(ds
, "copies",
339 copies_changed_cb
, os
);
341 err
= dsl_prop_register(ds
, "dedup",
342 dedup_changed_cb
, os
);
344 err
= dsl_prop_register(ds
, "logbias",
345 logbias_changed_cb
, os
);
347 err
= dsl_prop_register(ds
, "sync",
348 sync_changed_cb
, os
);
351 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
,
352 &os
->os_phys_buf
) == 1);
353 kmem_free(os
, sizeof (objset_t
));
356 } else if (ds
== NULL
) {
357 /* It's the meta-objset. */
358 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
359 os
->os_compress
= ZIO_COMPRESS_LZJB
;
360 os
->os_copies
= spa_max_replication(spa
);
361 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
362 os
->os_dedup_verify
= 0;
365 os
->os_primary_cache
= ZFS_CACHE_ALL
;
366 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
369 if (ds
== NULL
|| !dsl_dataset_is_snapshot(ds
))
370 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
371 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
373 for (i
= 0; i
< TXG_SIZE
; i
++) {
374 list_create(&os
->os_dirty_dnodes
[i
], sizeof (dnode_t
),
375 offsetof(dnode_t
, dn_dirty_link
[i
]));
376 list_create(&os
->os_free_dnodes
[i
], sizeof (dnode_t
),
377 offsetof(dnode_t
, dn_dirty_link
[i
]));
379 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
380 offsetof(dnode_t
, dn_link
));
381 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
382 offsetof(dmu_buf_impl_t
, db_link
));
384 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
385 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
386 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
388 DMU_META_DNODE(os
) = dnode_special_open(os
,
389 &os
->os_phys
->os_meta_dnode
, DMU_META_DNODE_OBJECT
,
391 if (arc_buf_size(os
->os_phys_buf
) >= sizeof (objset_phys_t
)) {
392 DMU_USERUSED_DNODE(os
) = dnode_special_open(os
,
393 &os
->os_phys
->os_userused_dnode
, DMU_USERUSED_OBJECT
,
394 &os
->os_userused_dnode
);
395 DMU_GROUPUSED_DNODE(os
) = dnode_special_open(os
,
396 &os
->os_phys
->os_groupused_dnode
, DMU_GROUPUSED_OBJECT
,
397 &os
->os_groupused_dnode
);
401 * We should be the only thread trying to do this because we
402 * have ds_opening_lock
405 mutex_enter(&ds
->ds_lock
);
406 ASSERT(ds
->ds_objset
== NULL
);
408 mutex_exit(&ds
->ds_lock
);
416 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
420 mutex_enter(&ds
->ds_opening_lock
);
421 *osp
= ds
->ds_objset
;
423 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
424 ds
, dsl_dataset_get_blkptr(ds
), osp
);
426 mutex_exit(&ds
->ds_opening_lock
);
430 /* called from zpl */
432 dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
)
437 err
= dsl_dataset_hold(name
, tag
, &ds
);
441 err
= dmu_objset_from_ds(ds
, osp
);
443 dsl_dataset_rele(ds
, tag
);
448 /* called from zpl */
450 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
451 boolean_t readonly
, void *tag
, objset_t
**osp
)
456 err
= dsl_dataset_own(name
, B_FALSE
, tag
, &ds
);
460 err
= dmu_objset_from_ds(ds
, osp
);
462 dsl_dataset_disown(ds
, tag
);
463 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
464 dmu_objset_disown(*osp
, tag
);
466 } else if (!readonly
&& dsl_dataset_is_snapshot(ds
)) {
467 dmu_objset_disown(*osp
, tag
);
474 dmu_objset_rele(objset_t
*os
, void *tag
)
476 dsl_dataset_rele(os
->os_dsl_dataset
, tag
);
480 dmu_objset_disown(objset_t
*os
, void *tag
)
482 dsl_dataset_disown(os
->os_dsl_dataset
, tag
);
486 dmu_objset_evict_dbufs(objset_t
*os
)
490 mutex_enter(&os
->os_lock
);
492 /* process the mdn last, since the other dnodes have holds on it */
493 list_remove(&os
->os_dnodes
, DMU_META_DNODE(os
));
494 list_insert_tail(&os
->os_dnodes
, DMU_META_DNODE(os
));
497 * Find the first dnode with holds. We have to do this dance
498 * because dnode_add_ref() only works if you already have a
499 * hold. If there are no holds then it has no dbufs so OK to
502 for (dn
= list_head(&os
->os_dnodes
);
503 dn
&& !dnode_add_ref(dn
, FTAG
);
504 dn
= list_next(&os
->os_dnodes
, dn
))
508 dnode_t
*next_dn
= dn
;
511 next_dn
= list_next(&os
->os_dnodes
, next_dn
);
512 } while (next_dn
&& !dnode_add_ref(next_dn
, FTAG
));
514 mutex_exit(&os
->os_lock
);
515 dnode_evict_dbufs(dn
);
516 dnode_rele(dn
, FTAG
);
517 mutex_enter(&os
->os_lock
);
520 dn
= list_head(&os
->os_dnodes
);
521 mutex_exit(&os
->os_lock
);
522 return (dn
!= DMU_META_DNODE(os
));
526 dmu_objset_evict(objset_t
*os
)
528 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
531 for (t
= 0; t
< TXG_SIZE
; t
++)
532 ASSERT(!dmu_objset_is_dirty(os
, t
));
535 if (!dsl_dataset_is_snapshot(ds
)) {
536 VERIFY(0 == dsl_prop_unregister(ds
, "checksum",
537 checksum_changed_cb
, os
));
538 VERIFY(0 == dsl_prop_unregister(ds
, "compression",
539 compression_changed_cb
, os
));
540 VERIFY(0 == dsl_prop_unregister(ds
, "copies",
541 copies_changed_cb
, os
));
542 VERIFY(0 == dsl_prop_unregister(ds
, "dedup",
543 dedup_changed_cb
, os
));
544 VERIFY(0 == dsl_prop_unregister(ds
, "logbias",
545 logbias_changed_cb
, os
));
546 VERIFY(0 == dsl_prop_unregister(ds
, "sync",
547 sync_changed_cb
, os
));
549 VERIFY(0 == dsl_prop_unregister(ds
, "primarycache",
550 primary_cache_changed_cb
, os
));
551 VERIFY(0 == dsl_prop_unregister(ds
, "secondarycache",
552 secondary_cache_changed_cb
, os
));
559 * We should need only a single pass over the dnode list, since
560 * nothing can be added to the list at this point.
562 (void) dmu_objset_evict_dbufs(os
);
564 dnode_special_close(&os
->os_meta_dnode
);
565 if (DMU_USERUSED_DNODE(os
)) {
566 dnode_special_close(&os
->os_userused_dnode
);
567 dnode_special_close(&os
->os_groupused_dnode
);
569 zil_free(os
->os_zil
);
571 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
573 VERIFY(arc_buf_remove_ref(os
->os_phys_buf
, &os
->os_phys_buf
) == 1);
576 * This is a barrier to prevent the objset from going away in
577 * dnode_move() until we can safely ensure that the objset is still in
578 * use. We consider the objset valid before the barrier and invalid
581 rw_enter(&os_lock
, RW_READER
);
584 mutex_destroy(&os
->os_lock
);
585 mutex_destroy(&os
->os_obj_lock
);
586 mutex_destroy(&os
->os_user_ptr_lock
);
587 kmem_free(os
, sizeof (objset_t
));
591 dmu_objset_snap_cmtime(objset_t
*os
)
593 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
596 /* called from dsl for meta-objset */
598 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
599 dmu_objset_type_t type
, dmu_tx_t
*tx
)
604 ASSERT(dmu_tx_is_syncing(tx
));
606 VERIFY(0 == dmu_objset_from_ds(ds
, &os
));
608 VERIFY(0 == dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
610 mdn
= DMU_META_DNODE(os
);
612 dnode_allocate(mdn
, DMU_OT_DNODE
, 1 << DNODE_BLOCK_SHIFT
,
613 DN_MAX_INDBLKSHIFT
, DMU_OT_NONE
, 0, tx
);
616 * We don't want to have to increase the meta-dnode's nlevels
617 * later, because then we could do it in quescing context while
618 * we are also accessing it in open context.
620 * This precaution is not necessary for the MOS (ds == NULL),
621 * because the MOS is only updated in syncing context.
622 * This is most fortunate: the MOS is the only objset that
623 * needs to be synced multiple times as spa_sync() iterates
624 * to convergence, so minimizing its dn_nlevels matters.
630 * Determine the number of levels necessary for the meta-dnode
631 * to contain DN_MAX_OBJECT dnodes.
633 while ((uint64_t)mdn
->dn_nblkptr
<< (mdn
->dn_datablkshift
+
634 (levels
- 1) * (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
635 DN_MAX_OBJECT
* sizeof (dnode_phys_t
))
638 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
639 mdn
->dn_nlevels
= levels
;
642 ASSERT(type
!= DMU_OST_NONE
);
643 ASSERT(type
!= DMU_OST_ANY
);
644 ASSERT(type
< DMU_OST_NUMTYPES
);
645 os
->os_phys
->os_type
= type
;
646 if (dmu_objset_userused_enabled(os
)) {
647 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
648 os
->os_flags
= os
->os_phys
->os_flags
;
651 dsl_dataset_dirty(ds
, tx
);
657 void (*userfunc
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
);
659 dsl_dataset_t
*clone_origin
;
660 const char *lastname
;
661 dmu_objset_type_t type
;
668 dmu_objset_create_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
670 dsl_dir_t
*dd
= arg1
;
671 struct oscarg
*oa
= arg2
;
672 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
676 err
= zap_lookup(mos
, dd
->dd_phys
->dd_child_dir_zapobj
,
677 oa
->lastname
, sizeof (uint64_t), 1, &ddobj
);
679 return (err
? err
: EEXIST
);
681 if (oa
->clone_origin
!= NULL
) {
682 /* You can't clone across pools. */
683 if (oa
->clone_origin
->ds_dir
->dd_pool
!= dd
->dd_pool
)
686 /* You can only clone snapshots, not the head datasets. */
687 if (!dsl_dataset_is_snapshot(oa
->clone_origin
))
695 dmu_objset_create_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
697 dsl_dir_t
*dd
= arg1
;
698 spa_t
*spa
= dd
->dd_pool
->dp_spa
;
699 struct oscarg
*oa
= arg2
;
702 ASSERT(dmu_tx_is_syncing(tx
));
704 obj
= dsl_dataset_create_sync(dd
, oa
->lastname
,
705 oa
->clone_origin
, oa
->flags
, oa
->cr
, tx
);
707 if (oa
->clone_origin
== NULL
) {
708 dsl_pool_t
*dp
= dd
->dd_pool
;
713 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
));
714 bp
= dsl_dataset_get_blkptr(ds
);
715 ASSERT(BP_IS_HOLE(bp
));
717 os
= dmu_objset_create_impl(spa
, ds
, bp
, oa
->type
, tx
);
720 oa
->userfunc(os
, oa
->userarg
, oa
->cr
, tx
);
721 dsl_dataset_rele(ds
, FTAG
);
724 spa_history_log_internal(LOG_DS_CREATE
, spa
, tx
, "dataset = %llu", obj
);
728 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
729 void (*func
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
), void *arg
)
734 struct oscarg oa
= { 0 };
736 ASSERT(strchr(name
, '@') == NULL
);
737 err
= dsl_dir_open(name
, FTAG
, &pdd
, &tail
);
741 dsl_dir_close(pdd
, FTAG
);
752 err
= dsl_sync_task_do(pdd
->dd_pool
, dmu_objset_create_check
,
753 dmu_objset_create_sync
, pdd
, &oa
, 5);
754 dsl_dir_close(pdd
, FTAG
);
759 dmu_objset_clone(const char *name
, dsl_dataset_t
*clone_origin
, uint64_t flags
)
764 struct oscarg oa
= { 0 };
766 ASSERT(strchr(name
, '@') == NULL
);
767 err
= dsl_dir_open(name
, FTAG
, &pdd
, &tail
);
771 dsl_dir_close(pdd
, FTAG
);
776 oa
.clone_origin
= clone_origin
;
780 err
= dsl_sync_task_do(pdd
->dd_pool
, dmu_objset_create_check
,
781 dmu_objset_create_sync
, pdd
, &oa
, 5);
782 dsl_dir_close(pdd
, FTAG
);
787 dmu_objset_destroy(const char *name
, boolean_t defer
)
792 error
= dsl_dataset_own(name
, B_TRUE
, FTAG
, &ds
);
794 error
= dsl_dataset_destroy(ds
, FTAG
, defer
);
795 /* dsl_dataset_destroy() closes the ds. */
802 dsl_sync_task_group_t
*dstg
;
805 char failed
[MAXPATHLEN
];
807 boolean_t needsuspend
;
810 struct dsl_ds_holdarg
*ha
; /* only needed in the temporary case */
811 dsl_dataset_t
*newds
;
815 snapshot_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
818 struct snaparg
*sn
= arg2
;
821 /* The props have already been checked by zfs_check_userprops(). */
823 error
= dsl_dataset_snapshot_check(os
->os_dsl_dataset
,
830 * Ideally we would just call
831 * dsl_dataset_user_hold_check() and
832 * dsl_dataset_destroy_check() here. However the
833 * dataset we want to hold and destroy is the snapshot
834 * that we just confirmed we can create, but it won't
835 * exist until after these checks are run. Do any
836 * checks we can here and if more checks are added to
837 * those routines in the future, similar checks may be
840 if (spa_version(os
->os_spa
) < SPA_VERSION_USERREFS
)
843 * Not checking number of tags because the tag will be
844 * unique, as it will be the only tag.
846 if (strlen(sn
->htag
) + MAX_TAG_PREFIX_LEN
>= MAXNAMELEN
)
849 sn
->ha
= kmem_alloc(sizeof(struct dsl_ds_holdarg
), KM_PUSHPAGE
);
850 sn
->ha
->temphold
= B_TRUE
;
851 sn
->ha
->htag
= sn
->htag
;
857 snapshot_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
860 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
861 struct snaparg
*sn
= arg2
;
863 dsl_dataset_snapshot_sync(ds
, sn
->snapname
, tx
);
867 pa
.pa_props
= sn
->props
;
868 pa
.pa_source
= ZPROP_SRC_LOCAL
;
869 dsl_props_set_sync(ds
->ds_prev
, &pa
, tx
);
873 struct dsl_ds_destroyarg da
;
875 dsl_dataset_user_hold_sync(ds
->ds_prev
, sn
->ha
, tx
);
876 kmem_free(sn
->ha
, sizeof (struct dsl_ds_holdarg
));
878 sn
->newds
= ds
->ds_prev
;
882 dsl_dataset_destroy_sync(&da
, FTAG
, tx
);
887 dmu_objset_snapshot_one(const char *name
, void *arg
)
889 struct snaparg
*sn
= arg
;
895 * If the objset starts with a '%', then ignore it unless it was
896 * explicitly named (ie, not recursive). These hidden datasets
897 * are always inconsistent, and by not opening them here, we can
898 * avoid a race with dsl_dir_destroy_check().
900 cp
= strrchr(name
, '/');
901 if (cp
&& cp
[1] == '%' && sn
->recursive
)
904 (void) strcpy(sn
->failed
, name
);
907 * Check permissions if we are doing a recursive snapshot. The
908 * permission checks for the starting dataset have already been
909 * performed in zfs_secpolicy_snapshot()
911 if (sn
->recursive
&& (err
= zfs_secpolicy_snapshot_perms(name
, CRED())))
914 err
= dmu_objset_hold(name
, sn
, &os
);
919 * If the objset is in an inconsistent state (eg, in the process
920 * of being destroyed), don't snapshot it. As with %hidden
921 * datasets, we return EBUSY if this name was explicitly
922 * requested (ie, not recursive), and otherwise ignore it.
924 if (os
->os_dsl_dataset
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
) {
925 dmu_objset_rele(os
, sn
);
926 return (sn
->recursive
? 0 : EBUSY
);
929 if (sn
->needsuspend
) {
930 err
= zil_suspend(dmu_objset_zil(os
));
932 dmu_objset_rele(os
, sn
);
936 dsl_sync_task_create(sn
->dstg
, snapshot_check
, snapshot_sync
,
943 dmu_objset_snapshot(char *fsname
, char *snapname
, char *tag
,
944 nvlist_t
*props
, boolean_t recursive
, boolean_t temporary
, int cleanup_fd
)
946 dsl_sync_task_t
*dst
;
952 sn
= kmem_alloc(sizeof (struct snaparg
), KM_SLEEP
);
953 (void) strcpy(sn
->failed
, fsname
);
955 err
= spa_open(fsname
, &spa
, FTAG
);
957 kmem_free(sn
, sizeof (struct snaparg
));
962 if (cleanup_fd
< 0) {
963 spa_close(spa
, FTAG
);
966 if ((err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
)) != 0) {
967 spa_close(spa
, FTAG
);
972 sn
->dstg
= dsl_sync_task_group_create(spa_get_dsl(spa
));
973 sn
->snapname
= snapname
;
976 sn
->recursive
= recursive
;
977 sn
->needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
978 sn
->temporary
= temporary
;
983 err
= dmu_objset_find(fsname
,
984 dmu_objset_snapshot_one
, sn
, DS_FIND_CHILDREN
);
986 err
= dmu_objset_snapshot_one(fsname
, sn
);
990 err
= dsl_sync_task_group_wait(sn
->dstg
);
992 for (dst
= list_head(&sn
->dstg
->dstg_tasks
); dst
;
993 dst
= list_next(&sn
->dstg
->dstg_tasks
, dst
)) {
994 objset_t
*os
= dst
->dst_arg1
;
995 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
997 dsl_dataset_name(ds
, sn
->failed
);
998 } else if (temporary
) {
999 dsl_register_onexit_hold_cleanup(sn
->newds
, tag
, minor
);
1001 if (sn
->needsuspend
)
1002 zil_resume(dmu_objset_zil(os
));
1003 dmu_objset_rele(os
, sn
);
1007 (void) strcpy(fsname
, sn
->failed
);
1009 zfs_onexit_fd_rele(cleanup_fd
);
1010 dsl_sync_task_group_destroy(sn
->dstg
);
1011 spa_close(spa
, FTAG
);
1012 kmem_free(sn
, sizeof (struct snaparg
));
1017 dmu_objset_sync_dnodes(list_t
*list
, list_t
*newlist
, dmu_tx_t
*tx
)
1021 while ((dn
= list_head(list
))) {
1022 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
1023 ASSERT(dn
->dn_dbuf
->db_data_pending
);
1025 * Initialize dn_zio outside dnode_sync() because the
1026 * meta-dnode needs to set it ouside dnode_sync().
1028 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
1031 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1032 list_remove(list
, dn
);
1035 (void) dnode_add_ref(dn
, newlist
);
1036 list_insert_tail(newlist
, dn
);
1045 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1049 blkptr_t
*bp
= zio
->io_bp
;
1051 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1053 ASSERT(bp
== os
->os_rootbp
);
1054 ASSERT(BP_GET_TYPE(bp
) == DMU_OT_OBJSET
);
1055 ASSERT(BP_GET_LEVEL(bp
) == 0);
1058 * Update rootbp fill count: it should be the number of objects
1059 * allocated in the object set (not counting the "special"
1060 * objects that are stored in the objset_phys_t -- the meta
1061 * dnode and user/group accounting objects).
1064 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1065 bp
->blk_fill
+= dnp
->dn_blkptr
[i
].blk_fill
;
1070 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1072 blkptr_t
*bp
= zio
->io_bp
;
1073 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1076 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1077 ASSERT(BP_EQUAL(bp
, bp_orig
));
1079 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1080 dmu_tx_t
*tx
= os
->os_synctx
;
1082 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1083 dsl_dataset_block_born(ds
, bp
, tx
);
1087 /* called from dsl */
1089 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1096 list_t
*newlist
= NULL
;
1097 dbuf_dirty_record_t
*dr
;
1099 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", tx
->tx_txg
);
1101 ASSERT(dmu_tx_is_syncing(tx
));
1102 /* XXX the write_done callback should really give us the tx... */
1105 if (os
->os_dsl_dataset
== NULL
) {
1107 * This is the MOS. If we have upgraded,
1108 * spa_max_replication() could change, so reset
1111 os
->os_copies
= spa_max_replication(os
->os_spa
);
1115 * Create the root block IO
1117 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1118 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1119 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1120 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1122 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1124 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1125 os
->os_rootbp
, os
->os_phys_buf
, DMU_OS_IS_L2CACHEABLE(os
),
1126 DMU_OS_IS_L2COMPRESSIBLE(os
), &zp
, dmu_objset_write_ready
,
1127 dmu_objset_write_done
, os
, ZIO_PRIORITY_ASYNC_WRITE
,
1128 ZIO_FLAG_MUSTSUCCEED
, &zb
);
1131 * Sync special dnodes - the parent IO for the sync is the root block
1133 DMU_META_DNODE(os
)->dn_zio
= zio
;
1134 dnode_sync(DMU_META_DNODE(os
), tx
);
1136 os
->os_phys
->os_flags
= os
->os_flags
;
1138 if (DMU_USERUSED_DNODE(os
) &&
1139 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1140 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1141 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1142 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1143 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1146 txgoff
= tx
->tx_txg
& TXG_MASK
;
1148 if (dmu_objset_userused_enabled(os
)) {
1149 newlist
= &os
->os_synced_dnodes
;
1151 * We must create the list here because it uses the
1152 * dn_dirty_link[] of this txg.
1154 list_create(newlist
, sizeof (dnode_t
),
1155 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1158 dmu_objset_sync_dnodes(&os
->os_free_dnodes
[txgoff
], newlist
, tx
);
1159 dmu_objset_sync_dnodes(&os
->os_dirty_dnodes
[txgoff
], newlist
, tx
);
1161 list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1162 while ((dr
= list_head(list
)) != NULL
) {
1163 ASSERT(dr
->dr_dbuf
->db_level
== 0);
1164 list_remove(list
, dr
);
1166 zio_nowait(dr
->dr_zio
);
1169 * Free intent log blocks up to this tx.
1171 zil_sync(os
->os_zil
, tx
);
1172 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1177 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1179 return (!list_is_empty(&os
->os_dirty_dnodes
[txg
& TXG_MASK
]) ||
1180 !list_is_empty(&os
->os_free_dnodes
[txg
& TXG_MASK
]));
1183 static objset_used_cb_t
*used_cbs
[DMU_OST_NUMTYPES
];
1186 dmu_objset_register_type(dmu_objset_type_t ost
, objset_used_cb_t
*cb
)
1192 dmu_objset_userused_enabled(objset_t
*os
)
1194 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1195 used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1196 DMU_USERUSED_DNODE(os
) != NULL
);
1200 do_userquota_update(objset_t
*os
, uint64_t used
, uint64_t flags
,
1201 uint64_t user
, uint64_t group
, boolean_t subtract
, dmu_tx_t
*tx
)
1203 if ((flags
& DNODE_FLAG_USERUSED_ACCOUNTED
)) {
1204 int64_t delta
= DNODE_SIZE
+ used
;
1207 VERIFY3U(0, ==, zap_increment_int(os
, DMU_USERUSED_OBJECT
,
1209 VERIFY3U(0, ==, zap_increment_int(os
, DMU_GROUPUSED_OBJECT
,
1215 dmu_objset_do_userquota_updates(objset_t
*os
, dmu_tx_t
*tx
)
1218 list_t
*list
= &os
->os_synced_dnodes
;
1220 ASSERT(list_head(list
) == NULL
|| dmu_objset_userused_enabled(os
));
1222 while ((dn
= list_head(list
)) != NULL
) {
1224 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
1225 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
1226 dn
->dn_phys
->dn_flags
&
1227 DNODE_FLAG_USERUSED_ACCOUNTED
);
1229 /* Allocate the user/groupused objects if necessary. */
1230 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1231 VERIFY(0 == zap_create_claim(os
,
1232 DMU_USERUSED_OBJECT
,
1233 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1234 VERIFY(0 == zap_create_claim(os
,
1235 DMU_GROUPUSED_OBJECT
,
1236 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1240 * We intentionally modify the zap object even if the
1241 * net delta is zero. Otherwise
1242 * the block of the zap obj could be shared between
1243 * datasets but need to be different between them after
1247 flags
= dn
->dn_id_flags
;
1249 if (flags
& DN_ID_OLD_EXIST
) {
1250 do_userquota_update(os
, dn
->dn_oldused
, dn
->dn_oldflags
,
1251 dn
->dn_olduid
, dn
->dn_oldgid
, B_TRUE
, tx
);
1253 if (flags
& DN_ID_NEW_EXIST
) {
1254 do_userquota_update(os
, DN_USED_BYTES(dn
->dn_phys
),
1255 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
,
1256 dn
->dn_newgid
, B_FALSE
, tx
);
1259 mutex_enter(&dn
->dn_mtx
);
1261 dn
->dn_oldflags
= 0;
1262 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
1263 dn
->dn_olduid
= dn
->dn_newuid
;
1264 dn
->dn_oldgid
= dn
->dn_newgid
;
1265 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1266 if (dn
->dn_bonuslen
== 0)
1267 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1269 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1271 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
1272 mutex_exit(&dn
->dn_mtx
);
1274 list_remove(list
, dn
);
1275 dnode_rele(dn
, list
);
1280 * Returns a pointer to data to find uid/gid from
1282 * If a dirty record for transaction group that is syncing can't
1283 * be found then NULL is returned. In the NULL case it is assumed
1284 * the uid/gid aren't changing.
1287 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1289 dbuf_dirty_record_t
*dr
, **drp
;
1292 if (db
->db_dirtycnt
== 0)
1293 return (db
->db
.db_data
); /* Nothing is changing */
1295 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1296 if (dr
->dr_txg
== tx
->tx_txg
)
1304 DB_DNODE_ENTER(dr
->dr_dbuf
);
1305 dn
= DB_DNODE(dr
->dr_dbuf
);
1307 if (dn
->dn_bonuslen
== 0 &&
1308 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
1309 data
= dr
->dt
.dl
.dr_data
->b_data
;
1311 data
= dr
->dt
.dl
.dr_data
;
1313 DB_DNODE_EXIT(dr
->dr_dbuf
);
1320 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
1322 objset_t
*os
= dn
->dn_objset
;
1324 dmu_buf_impl_t
*db
= NULL
;
1325 uint64_t *user
= NULL
, *group
= NULL
;
1326 int flags
= dn
->dn_id_flags
;
1328 boolean_t have_spill
= B_FALSE
;
1330 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
1333 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
1334 DN_ID_CHKED_SPILL
)))
1337 if (before
&& dn
->dn_bonuslen
!= 0)
1338 data
= DN_BONUS(dn
->dn_phys
);
1339 else if (!before
&& dn
->dn_bonuslen
!= 0) {
1342 mutex_enter(&db
->db_mtx
);
1343 data
= dmu_objset_userquota_find_data(db
, tx
);
1345 data
= DN_BONUS(dn
->dn_phys
);
1347 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
1350 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
1351 rf
|= DB_RF_HAVESTRUCT
;
1352 error
= dmu_spill_hold_by_dnode(dn
,
1353 rf
| DB_RF_MUST_SUCCEED
,
1354 FTAG
, (dmu_buf_t
**)&db
);
1356 mutex_enter(&db
->db_mtx
);
1357 data
= (before
) ? db
->db
.db_data
:
1358 dmu_objset_userquota_find_data(db
, tx
);
1359 have_spill
= B_TRUE
;
1361 mutex_enter(&dn
->dn_mtx
);
1362 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1363 mutex_exit(&dn
->dn_mtx
);
1369 user
= &dn
->dn_olduid
;
1370 group
= &dn
->dn_oldgid
;
1372 user
= &dn
->dn_newuid
;
1373 group
= &dn
->dn_newgid
;
1377 * Must always call the callback in case the object
1378 * type has changed and that type isn't an object type to track
1380 error
= used_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
,
1384 * Preserve existing uid/gid when the callback can't determine
1385 * what the new uid/gid are and the callback returned EEXIST.
1386 * The EEXIST error tells us to just use the existing uid/gid.
1387 * If we don't know what the old values are then just assign
1388 * them to 0, since that is a new file being created.
1390 if (!before
&& data
== NULL
&& error
== EEXIST
) {
1391 if (flags
& DN_ID_OLD_EXIST
) {
1392 dn
->dn_newuid
= dn
->dn_olduid
;
1393 dn
->dn_newgid
= dn
->dn_oldgid
;
1402 mutex_exit(&db
->db_mtx
);
1404 mutex_enter(&dn
->dn_mtx
);
1405 if (error
== 0 && before
)
1406 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1407 if (error
== 0 && !before
)
1408 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
1411 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1413 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1415 mutex_exit(&dn
->dn_mtx
);
1417 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
1421 dmu_objset_userspace_present(objset_t
*os
)
1423 return (os
->os_phys
->os_flags
&
1424 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1428 dmu_objset_userspace_upgrade(objset_t
*os
)
1433 if (dmu_objset_userspace_present(os
))
1435 if (!dmu_objset_userused_enabled(os
))
1437 if (dmu_objset_is_snapshot(os
))
1441 * We simply need to mark every object dirty, so that it will be
1442 * synced out and now accounted. If this is called
1443 * concurrently, or if we already did some work before crashing,
1444 * that's fine, since we track each object's accounted state
1448 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
1453 if (issig(JUSTLOOKING
) && issig(FORREAL
))
1456 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
1459 tx
= dmu_tx_create(os
);
1460 dmu_tx_hold_bonus(tx
, obj
);
1461 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
1466 dmu_buf_will_dirty(db
, tx
);
1467 dmu_buf_rele(db
, FTAG
);
1471 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1472 txg_wait_synced(dmu_objset_pool(os
), 0);
1477 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
1478 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1480 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
1481 usedobjsp
, availobjsp
);
1485 dmu_objset_fsid_guid(objset_t
*os
)
1487 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
1491 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
1493 stat
->dds_type
= os
->os_phys
->os_type
;
1494 if (os
->os_dsl_dataset
)
1495 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
1499 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
1501 ASSERT(os
->os_dsl_dataset
||
1502 os
->os_phys
->os_type
== DMU_OST_META
);
1504 if (os
->os_dsl_dataset
!= NULL
)
1505 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
1507 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
1508 os
->os_phys
->os_type
);
1509 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
1510 dmu_objset_userspace_present(os
));
1514 dmu_objset_is_snapshot(objset_t
*os
)
1516 if (os
->os_dsl_dataset
!= NULL
)
1517 return (dsl_dataset_is_snapshot(os
->os_dsl_dataset
));
1523 dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
, int maxlen
,
1524 boolean_t
*conflict
)
1526 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1529 if (ds
->ds_phys
->ds_snapnames_zapobj
== 0)
1532 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1533 ds
->ds_phys
->ds_snapnames_zapobj
, name
, 8, 1, &ignored
, MT_FIRST
,
1534 real
, maxlen
, conflict
));
1538 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
1539 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
1541 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1542 zap_cursor_t cursor
;
1543 zap_attribute_t attr
;
1545 if (ds
->ds_phys
->ds_snapnames_zapobj
== 0)
1548 zap_cursor_init_serialized(&cursor
,
1549 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1550 ds
->ds_phys
->ds_snapnames_zapobj
, *offp
);
1552 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1553 zap_cursor_fini(&cursor
);
1557 if (strlen(attr
.za_name
) + 1 > namelen
) {
1558 zap_cursor_fini(&cursor
);
1559 return (ENAMETOOLONG
);
1562 (void) strcpy(name
, attr
.za_name
);
1564 *idp
= attr
.za_first_integer
;
1566 *case_conflict
= attr
.za_normalization_conflict
;
1567 zap_cursor_advance(&cursor
);
1568 *offp
= zap_cursor_serialize(&cursor
);
1569 zap_cursor_fini(&cursor
);
1575 dmu_snapshot_lookup(objset_t
*os
, const char *name
, uint64_t *value
)
1577 return dsl_dataset_snap_lookup(os
->os_dsl_dataset
, name
, value
);
1581 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
1582 uint64_t *idp
, uint64_t *offp
)
1584 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
1585 zap_cursor_t cursor
;
1586 zap_attribute_t attr
;
1588 /* there is no next dir on a snapshot! */
1589 if (os
->os_dsl_dataset
->ds_object
!=
1590 dd
->dd_phys
->dd_head_dataset_obj
)
1593 zap_cursor_init_serialized(&cursor
,
1594 dd
->dd_pool
->dp_meta_objset
,
1595 dd
->dd_phys
->dd_child_dir_zapobj
, *offp
);
1597 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1598 zap_cursor_fini(&cursor
);
1602 if (strlen(attr
.za_name
) + 1 > namelen
) {
1603 zap_cursor_fini(&cursor
);
1604 return (ENAMETOOLONG
);
1607 (void) strcpy(name
, attr
.za_name
);
1609 *idp
= attr
.za_first_integer
;
1610 zap_cursor_advance(&cursor
);
1611 *offp
= zap_cursor_serialize(&cursor
);
1612 zap_cursor_fini(&cursor
);
1618 int (*func
)(const char *, void *);
1624 findfunc(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
1626 struct findarg
*fa
= arg
;
1627 return (fa
->func(dsname
, fa
->arg
));
1631 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1632 * Perhaps change all callers to use dmu_objset_find_spa()?
1635 dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
1641 return (dmu_objset_find_spa(NULL
, name
, findfunc
, &fa
, flags
));
1645 * Find all objsets under name, call func on each
1648 dmu_objset_find_spa(spa_t
*spa
, const char *name
,
1649 int func(spa_t
*, uint64_t, const char *, void *), void *arg
, int flags
)
1655 zap_attribute_t
*attr
;
1661 name
= spa_name(spa
);
1662 err
= dsl_dir_open_spa(spa
, name
, FTAG
, &dd
, NULL
);
1666 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1667 if (dd
->dd_myname
[0] == '$') {
1668 dsl_dir_close(dd
, FTAG
);
1672 thisobj
= dd
->dd_phys
->dd_head_dataset_obj
;
1673 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_PUSHPAGE
);
1677 * Iterate over all children.
1679 if (flags
& DS_FIND_CHILDREN
) {
1680 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
1681 dd
->dd_phys
->dd_child_dir_zapobj
);
1682 zap_cursor_retrieve(&zc
, attr
) == 0;
1683 (void) zap_cursor_advance(&zc
)) {
1684 ASSERT(attr
->za_integer_length
== sizeof (uint64_t));
1685 ASSERT(attr
->za_num_integers
== 1);
1687 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
1688 err
= dmu_objset_find_spa(spa
, child
, func
, arg
, flags
);
1693 zap_cursor_fini(&zc
);
1696 dsl_dir_close(dd
, FTAG
);
1697 kmem_free(attr
, sizeof (zap_attribute_t
));
1703 * Iterate over all snapshots.
1705 if (flags
& DS_FIND_SNAPSHOTS
) {
1706 if (!dsl_pool_sync_context(dp
))
1707 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
1708 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
1709 if (!dsl_pool_sync_context(dp
))
1710 rw_exit(&dp
->dp_config_rwlock
);
1713 uint64_t snapobj
= ds
->ds_phys
->ds_snapnames_zapobj
;
1714 dsl_dataset_rele(ds
, FTAG
);
1716 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
1717 zap_cursor_retrieve(&zc
, attr
) == 0;
1718 (void) zap_cursor_advance(&zc
)) {
1719 ASSERT(attr
->za_integer_length
==
1721 ASSERT(attr
->za_num_integers
== 1);
1723 child
= kmem_asprintf("%s@%s",
1724 name
, attr
->za_name
);
1725 err
= func(spa
, attr
->za_first_integer
,
1731 zap_cursor_fini(&zc
);
1735 dsl_dir_close(dd
, FTAG
);
1736 kmem_free(attr
, sizeof (zap_attribute_t
));
1742 * Apply to self if appropriate.
1744 err
= func(spa
, thisobj
, name
, arg
);
1750 dmu_objset_prefetch(const char *name
, void *arg
)
1754 if (dsl_dataset_hold(name
, FTAG
, &ds
))
1757 if (!BP_IS_HOLE(&ds
->ds_phys
->ds_bp
)) {
1758 mutex_enter(&ds
->ds_opening_lock
);
1759 if (ds
->ds_objset
== NULL
) {
1760 uint32_t aflags
= ARC_NOWAIT
| ARC_PREFETCH
;
1763 SET_BOOKMARK(&zb
, ds
->ds_object
, ZB_ROOT_OBJECT
,
1764 ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1766 (void) arc_read(NULL
, dsl_dataset_get_spa(ds
),
1767 &ds
->ds_phys
->ds_bp
, NULL
, NULL
,
1768 ZIO_PRIORITY_ASYNC_READ
,
1769 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
,
1772 mutex_exit(&ds
->ds_opening_lock
);
1775 dsl_dataset_rele(ds
, FTAG
);
1780 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
1782 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1783 os
->os_user_ptr
= user_ptr
;
1787 dmu_objset_get_user(objset_t
*os
)
1789 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
1790 return (os
->os_user_ptr
);
1793 #if defined(_KERNEL) && defined(HAVE_SPL)
1794 EXPORT_SYMBOL(dmu_objset_zil
);
1795 EXPORT_SYMBOL(dmu_objset_pool
);
1796 EXPORT_SYMBOL(dmu_objset_ds
);
1797 EXPORT_SYMBOL(dmu_objset_type
);
1798 EXPORT_SYMBOL(dmu_objset_name
);
1799 EXPORT_SYMBOL(dmu_objset_hold
);
1800 EXPORT_SYMBOL(dmu_objset_own
);
1801 EXPORT_SYMBOL(dmu_objset_rele
);
1802 EXPORT_SYMBOL(dmu_objset_disown
);
1803 EXPORT_SYMBOL(dmu_objset_from_ds
);
1804 EXPORT_SYMBOL(dmu_objset_create
);
1805 EXPORT_SYMBOL(dmu_objset_clone
);
1806 EXPORT_SYMBOL(dmu_objset_destroy
);
1807 EXPORT_SYMBOL(dmu_objset_snapshot
);
1808 EXPORT_SYMBOL(dmu_objset_stats
);
1809 EXPORT_SYMBOL(dmu_objset_fast_stat
);
1810 EXPORT_SYMBOL(dmu_objset_spa
);
1811 EXPORT_SYMBOL(dmu_objset_space
);
1812 EXPORT_SYMBOL(dmu_objset_fsid_guid
);
1813 EXPORT_SYMBOL(dmu_objset_find
);
1814 EXPORT_SYMBOL(dmu_objset_find_spa
);
1815 EXPORT_SYMBOL(dmu_objset_prefetch
);
1816 EXPORT_SYMBOL(dmu_objset_byteswap
);
1817 EXPORT_SYMBOL(dmu_objset_evict_dbufs
);
1818 EXPORT_SYMBOL(dmu_objset_snap_cmtime
);
1820 EXPORT_SYMBOL(dmu_objset_sync
);
1821 EXPORT_SYMBOL(dmu_objset_is_dirty
);
1822 EXPORT_SYMBOL(dmu_objset_create_impl
);
1823 EXPORT_SYMBOL(dmu_objset_open_impl
);
1824 EXPORT_SYMBOL(dmu_objset_evict
);
1825 EXPORT_SYMBOL(dmu_objset_register_type
);
1826 EXPORT_SYMBOL(dmu_objset_do_userquota_updates
);
1827 EXPORT_SYMBOL(dmu_objset_userquota_get_ids
);
1828 EXPORT_SYMBOL(dmu_objset_userused_enabled
);
1829 EXPORT_SYMBOL(dmu_objset_userspace_upgrade
);
1830 EXPORT_SYMBOL(dmu_objset_userspace_present
);