4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
34 /* Portions Copyright 2010 Robert Milkowski */
36 #include <sys/zfeature.h>
38 #include <sys/zfs_context.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_dataset.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/dsl_deleg.h>
46 #include <sys/dnode.h>
49 #include <sys/dmu_tx.h>
52 #include <sys/dmu_impl.h>
53 #include <sys/zfs_ioctl.h>
55 #include <sys/zfs_onexit.h>
56 #include <sys/dsl_destroy.h>
58 #include <sys/policy.h>
59 #include <sys/spa_impl.h>
60 #include <sys/dmu_send.h>
61 #include <sys/zfs_project.h>
64 * Needed to close a window in dnode_move() that allows the objset to be freed
65 * before it can be safely accessed.
70 * Tunable to overwrite the maximum number of threads for the parallelization
71 * of dmu_objset_find_dp, needed to speed up the import of pools with many
73 * Default is 4 times the number of leaf vdevs.
75 int dmu_find_threads
= 0;
78 * Backfill lower metadnode objects after this many have been freed.
79 * Backfilling negatively impacts object creation rates, so only do it
80 * if there are enough holes to fill.
82 int dmu_rescan_dnode_threshold
= 1 << DN_MAX_INDBLKSHIFT
;
84 static char *upgrade_tag
= "upgrade_tag";
86 static void dmu_objset_find_dp_cb(void *arg
);
88 static void dmu_objset_upgrade(objset_t
*os
, dmu_objset_upgrade_cb_t cb
);
89 static void dmu_objset_upgrade_stop(objset_t
*os
);
94 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
100 rw_destroy(&os_lock
);
104 dmu_objset_spa(objset_t
*os
)
110 dmu_objset_zil(objset_t
*os
)
116 dmu_objset_pool(objset_t
*os
)
120 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
121 return (ds
->ds_dir
->dd_pool
);
123 return (spa_get_dsl(os
->os_spa
));
127 dmu_objset_ds(objset_t
*os
)
129 return (os
->os_dsl_dataset
);
133 dmu_objset_type(objset_t
*os
)
135 return (os
->os_phys
->os_type
);
139 dmu_objset_name(objset_t
*os
, char *buf
)
141 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
145 dmu_objset_id(objset_t
*os
)
147 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
149 return (ds
? ds
->ds_object
: 0);
153 dmu_objset_dnodesize(objset_t
*os
)
155 return (os
->os_dnodesize
);
159 dmu_objset_syncprop(objset_t
*os
)
161 return (os
->os_sync
);
165 dmu_objset_logbias(objset_t
*os
)
167 return (os
->os_logbias
);
171 checksum_changed_cb(void *arg
, uint64_t newval
)
176 * Inheritance should have been done by now.
178 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
180 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
184 compression_changed_cb(void *arg
, uint64_t newval
)
189 * Inheritance and range checking should have been done by now.
191 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
193 os
->os_compress
= zio_compress_select(os
->os_spa
, newval
,
198 copies_changed_cb(void *arg
, uint64_t newval
)
203 * Inheritance and range checking should have been done by now.
206 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
208 os
->os_copies
= newval
;
212 dedup_changed_cb(void *arg
, uint64_t newval
)
215 spa_t
*spa
= os
->os_spa
;
216 enum zio_checksum checksum
;
219 * Inheritance should have been done by now.
221 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
223 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
225 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
226 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
230 primary_cache_changed_cb(void *arg
, uint64_t newval
)
235 * Inheritance and range checking should have been done by now.
237 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
238 newval
== ZFS_CACHE_METADATA
);
240 os
->os_primary_cache
= newval
;
244 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
249 * Inheritance and range checking should have been done by now.
251 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
252 newval
== ZFS_CACHE_METADATA
);
254 os
->os_secondary_cache
= newval
;
258 sync_changed_cb(void *arg
, uint64_t newval
)
263 * Inheritance and range checking should have been done by now.
265 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
266 newval
== ZFS_SYNC_DISABLED
);
268 os
->os_sync
= newval
;
270 zil_set_sync(os
->os_zil
, newval
);
274 redundant_metadata_changed_cb(void *arg
, uint64_t newval
)
279 * Inheritance and range checking should have been done by now.
281 ASSERT(newval
== ZFS_REDUNDANT_METADATA_ALL
||
282 newval
== ZFS_REDUNDANT_METADATA_MOST
);
284 os
->os_redundant_metadata
= newval
;
288 dnodesize_changed_cb(void *arg
, uint64_t newval
)
293 case ZFS_DNSIZE_LEGACY
:
294 os
->os_dnodesize
= DNODE_MIN_SIZE
;
296 case ZFS_DNSIZE_AUTO
:
298 * Choose a dnode size that will work well for most
299 * workloads if the user specified "auto". Future code
300 * improvements could dynamically select a dnode size
301 * based on observed workload patterns.
303 os
->os_dnodesize
= DNODE_MIN_SIZE
* 2;
310 os
->os_dnodesize
= newval
;
316 logbias_changed_cb(void *arg
, uint64_t newval
)
320 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
321 newval
== ZFS_LOGBIAS_THROUGHPUT
);
322 os
->os_logbias
= newval
;
324 zil_set_logbias(os
->os_zil
, newval
);
328 recordsize_changed_cb(void *arg
, uint64_t newval
)
332 os
->os_recordsize
= newval
;
336 dmu_objset_byteswap(void *buf
, size_t size
)
338 objset_phys_t
*osp
= buf
;
340 ASSERT(size
== OBJSET_PHYS_SIZE_V1
|| size
== OBJSET_PHYS_SIZE_V2
||
341 size
== sizeof (objset_phys_t
));
342 dnode_byteswap(&osp
->os_meta_dnode
);
343 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
344 osp
->os_type
= BSWAP_64(osp
->os_type
);
345 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
346 if (size
>= OBJSET_PHYS_SIZE_V2
) {
347 dnode_byteswap(&osp
->os_userused_dnode
);
348 dnode_byteswap(&osp
->os_groupused_dnode
);
349 if (size
>= sizeof (objset_phys_t
))
350 dnode_byteswap(&osp
->os_projectused_dnode
);
355 * The hash is a CRC-based hash of the objset_t pointer and the object number.
358 dnode_hash(const objset_t
*os
, uint64_t obj
)
360 uintptr_t osv
= (uintptr_t)os
;
361 uint64_t crc
= -1ULL;
363 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
365 * The low 6 bits of the pointer don't have much entropy, because
366 * the objset_t is larger than 2^6 bytes long.
368 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (osv
>> 6)) & 0xFF];
369 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 0)) & 0xFF];
370 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 8)) & 0xFF];
371 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 16)) & 0xFF];
373 crc
^= (osv
>>14) ^ (obj
>>24);
379 dnode_multilist_index_func(multilist_t
*ml
, void *obj
)
382 return (dnode_hash(dn
->dn_objset
, dn
->dn_object
) %
383 multilist_get_num_sublists(ml
));
387 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
393 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
395 os
= kmem_zalloc(sizeof (objset_t
), KM_SLEEP
);
396 os
->os_dsl_dataset
= ds
;
399 if (!BP_IS_HOLE(os
->os_rootbp
)) {
400 arc_flags_t aflags
= ARC_FLAG_WAIT
;
403 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
404 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
405 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
407 if (DMU_OS_IS_L2CACHEABLE(os
))
408 aflags
|= ARC_FLAG_L2CACHE
;
410 if (ds
!= NULL
&& ds
->ds_dir
->dd_crypto_obj
!= 0) {
411 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
412 ASSERT(BP_IS_AUTHENTICATED(bp
));
413 zio_flags
|= ZIO_FLAG_RAW
;
416 dprintf_bp(os
->os_rootbp
, "reading %s", "");
417 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
418 arc_getbuf_func
, &os
->os_phys_buf
,
419 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
421 kmem_free(os
, sizeof (objset_t
));
422 /* convert checksum errors into IO errors */
424 err
= SET_ERROR(EIO
);
428 if (spa_version(spa
) < SPA_VERSION_USERSPACE
)
429 size
= OBJSET_PHYS_SIZE_V1
;
430 else if (!spa_feature_is_enabled(spa
,
431 SPA_FEATURE_PROJECT_QUOTA
))
432 size
= OBJSET_PHYS_SIZE_V2
;
434 size
= sizeof (objset_phys_t
);
436 /* Increase the blocksize if we are permitted. */
437 if (arc_buf_size(os
->os_phys_buf
) < size
) {
438 arc_buf_t
*buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
439 ARC_BUFC_METADATA
, size
);
440 bzero(buf
->b_data
, size
);
441 bcopy(os
->os_phys_buf
->b_data
, buf
->b_data
,
442 arc_buf_size(os
->os_phys_buf
));
443 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
444 os
->os_phys_buf
= buf
;
447 os
->os_phys
= os
->os_phys_buf
->b_data
;
448 os
->os_flags
= os
->os_phys
->os_flags
;
450 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
451 sizeof (objset_phys_t
) : OBJSET_PHYS_SIZE_V1
;
452 os
->os_phys_buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
453 ARC_BUFC_METADATA
, size
);
454 os
->os_phys
= os
->os_phys_buf
->b_data
;
455 bzero(os
->os_phys
, size
);
459 * Note: the changed_cb will be called once before the register
460 * func returns, thus changing the checksum/compression from the
461 * default (fletcher2/off). Snapshots don't need to know about
462 * checksum/compression/copies.
465 boolean_t needlock
= B_FALSE
;
467 os
->os_encrypted
= (ds
->ds_dir
->dd_crypto_obj
!= 0);
470 * Note: it's valid to open the objset if the dataset is
471 * long-held, in which case the pool_config lock will not
474 if (!dsl_pool_config_held(dmu_objset_pool(os
))) {
476 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
479 err
= dsl_prop_register(ds
,
480 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE
),
481 primary_cache_changed_cb
, os
);
483 err
= dsl_prop_register(ds
,
484 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE
),
485 secondary_cache_changed_cb
, os
);
487 if (!ds
->ds_is_snapshot
) {
489 err
= dsl_prop_register(ds
,
490 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
491 checksum_changed_cb
, os
);
494 err
= dsl_prop_register(ds
,
495 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
496 compression_changed_cb
, os
);
499 err
= dsl_prop_register(ds
,
500 zfs_prop_to_name(ZFS_PROP_COPIES
),
501 copies_changed_cb
, os
);
504 err
= dsl_prop_register(ds
,
505 zfs_prop_to_name(ZFS_PROP_DEDUP
),
506 dedup_changed_cb
, os
);
509 err
= dsl_prop_register(ds
,
510 zfs_prop_to_name(ZFS_PROP_LOGBIAS
),
511 logbias_changed_cb
, os
);
514 err
= dsl_prop_register(ds
,
515 zfs_prop_to_name(ZFS_PROP_SYNC
),
516 sync_changed_cb
, os
);
519 err
= dsl_prop_register(ds
,
521 ZFS_PROP_REDUNDANT_METADATA
),
522 redundant_metadata_changed_cb
, os
);
525 err
= dsl_prop_register(ds
,
526 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
),
527 recordsize_changed_cb
, os
);
530 err
= dsl_prop_register(ds
,
531 zfs_prop_to_name(ZFS_PROP_DNODESIZE
),
532 dnodesize_changed_cb
, os
);
536 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
538 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
539 kmem_free(os
, sizeof (objset_t
));
543 /* It's the meta-objset. */
544 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
545 os
->os_compress
= ZIO_COMPRESS_ON
;
546 os
->os_encrypted
= B_FALSE
;
547 os
->os_copies
= spa_max_replication(spa
);
548 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
549 os
->os_dedup_verify
= B_FALSE
;
550 os
->os_logbias
= ZFS_LOGBIAS_LATENCY
;
551 os
->os_sync
= ZFS_SYNC_STANDARD
;
552 os
->os_primary_cache
= ZFS_CACHE_ALL
;
553 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
554 os
->os_dnodesize
= DNODE_MIN_SIZE
;
557 if (ds
== NULL
|| !ds
->ds_is_snapshot
)
558 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
559 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
561 for (i
= 0; i
< TXG_SIZE
; i
++) {
562 os
->os_dirty_dnodes
[i
] = multilist_create(sizeof (dnode_t
),
563 offsetof(dnode_t
, dn_dirty_link
[i
]),
564 dnode_multilist_index_func
);
566 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
567 offsetof(dnode_t
, dn_link
));
568 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
569 offsetof(dmu_buf_impl_t
, db_link
));
571 list_link_init(&os
->os_evicting_node
);
573 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
574 mutex_init(&os
->os_userused_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
575 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
576 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
577 os
->os_obj_next_percpu_len
= boot_ncpus
;
578 os
->os_obj_next_percpu
= kmem_zalloc(os
->os_obj_next_percpu_len
*
579 sizeof (os
->os_obj_next_percpu
[0]), KM_SLEEP
);
581 dnode_special_open(os
, &os
->os_phys
->os_meta_dnode
,
582 DMU_META_DNODE_OBJECT
, &os
->os_meta_dnode
);
583 if (OBJSET_BUF_HAS_USERUSED(os
->os_phys_buf
)) {
584 dnode_special_open(os
, &os
->os_phys
->os_userused_dnode
,
585 DMU_USERUSED_OBJECT
, &os
->os_userused_dnode
);
586 dnode_special_open(os
, &os
->os_phys
->os_groupused_dnode
,
587 DMU_GROUPUSED_OBJECT
, &os
->os_groupused_dnode
);
588 if (OBJSET_BUF_HAS_PROJECTUSED(os
->os_phys_buf
))
589 dnode_special_open(os
,
590 &os
->os_phys
->os_projectused_dnode
,
591 DMU_PROJECTUSED_OBJECT
, &os
->os_projectused_dnode
);
594 mutex_init(&os
->os_upgrade_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
601 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
606 * We shouldn't be doing anything with dsl_dataset_t's unless the
607 * pool_config lock is held, or the dataset is long-held.
609 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
) ||
610 dsl_dataset_long_held(ds
));
612 mutex_enter(&ds
->ds_opening_lock
);
613 if (ds
->ds_objset
== NULL
) {
615 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
616 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
617 ds
, dsl_dataset_get_blkptr(ds
), &os
);
618 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
621 mutex_enter(&ds
->ds_lock
);
622 ASSERT(ds
->ds_objset
== NULL
);
624 mutex_exit(&ds
->ds_lock
);
627 *osp
= ds
->ds_objset
;
628 mutex_exit(&ds
->ds_opening_lock
);
633 * Holds the pool while the objset is held. Therefore only one objset
634 * can be held at a time.
637 dmu_objset_hold_flags(const char *name
, boolean_t decrypt
, void *tag
,
643 ds_hold_flags_t flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0;
645 err
= dsl_pool_hold(name
, tag
, &dp
);
648 err
= dsl_dataset_hold_flags(dp
, name
, flags
, tag
, &ds
);
650 dsl_pool_rele(dp
, tag
);
654 err
= dmu_objset_from_ds(ds
, osp
);
656 dsl_dataset_rele(ds
, tag
);
657 dsl_pool_rele(dp
, tag
);
664 dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
)
666 return (dmu_objset_hold_flags(name
, B_FALSE
, tag
, osp
));
670 dmu_objset_own_impl(dsl_dataset_t
*ds
, dmu_objset_type_t type
,
671 boolean_t readonly
, boolean_t decrypt
, void *tag
, objset_t
**osp
)
675 err
= dmu_objset_from_ds(ds
, osp
);
678 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
679 return (SET_ERROR(EINVAL
));
680 } else if (!readonly
&& dsl_dataset_is_snapshot(ds
)) {
681 return (SET_ERROR(EROFS
));
682 } else if (!readonly
&& decrypt
&&
683 dsl_dir_incompatible_encryption_version(ds
->ds_dir
)) {
684 return (SET_ERROR(EROFS
));
687 /* if we are decrypting, we can now check MACs in os->os_phys_buf */
688 if (decrypt
&& arc_is_unauthenticated((*osp
)->os_phys_buf
)) {
689 err
= arc_untransform((*osp
)->os_phys_buf
, (*osp
)->os_spa
,
690 ds
->ds_object
, B_FALSE
);
694 ASSERT0(arc_is_unauthenticated((*osp
)->os_phys_buf
));
701 * dsl_pool must not be held when this is called.
702 * Upon successful return, there will be a longhold on the dataset,
703 * and the dsl_pool will not be held.
706 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
707 boolean_t readonly
, boolean_t decrypt
, void *tag
, objset_t
**osp
)
712 ds_hold_flags_t flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0;
714 err
= dsl_pool_hold(name
, FTAG
, &dp
);
717 err
= dsl_dataset_own(dp
, name
, flags
, tag
, &ds
);
719 dsl_pool_rele(dp
, FTAG
);
722 err
= dmu_objset_own_impl(ds
, type
, readonly
, decrypt
, tag
, osp
);
724 dsl_dataset_disown(ds
, flags
, tag
);
725 dsl_pool_rele(dp
, FTAG
);
730 * User accounting requires the dataset to be decrypted and rw.
731 * We also don't begin user accounting during claiming to help
732 * speed up pool import times and to keep this txg reserved
733 * completely for recovery work.
735 if ((dmu_objset_userobjspace_upgradable(*osp
) ||
736 dmu_objset_projectquota_upgradable(*osp
)) &&
737 !readonly
&& !dp
->dp_spa
->spa_claiming
&&
738 (ds
->ds_dir
->dd_crypto_obj
== 0 || decrypt
))
739 dmu_objset_id_quota_upgrade(*osp
);
741 dsl_pool_rele(dp
, FTAG
);
746 dmu_objset_own_obj(dsl_pool_t
*dp
, uint64_t obj
, dmu_objset_type_t type
,
747 boolean_t readonly
, boolean_t decrypt
, void *tag
, objset_t
**osp
)
751 ds_hold_flags_t flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0;
753 err
= dsl_dataset_own_obj(dp
, obj
, flags
, tag
, &ds
);
757 err
= dmu_objset_own_impl(ds
, type
, readonly
, decrypt
, tag
, osp
);
759 dsl_dataset_disown(ds
, flags
, tag
);
767 dmu_objset_rele_flags(objset_t
*os
, boolean_t decrypt
, void *tag
)
769 ds_hold_flags_t flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0;
771 dsl_pool_t
*dp
= dmu_objset_pool(os
);
772 dsl_dataset_rele_flags(os
->os_dsl_dataset
, flags
, tag
);
773 dsl_pool_rele(dp
, tag
);
777 dmu_objset_rele(objset_t
*os
, void *tag
)
779 dmu_objset_rele_flags(os
, B_FALSE
, tag
);
783 * When we are called, os MUST refer to an objset associated with a dataset
784 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
785 * == tag. We will then release and reacquire ownership of the dataset while
786 * holding the pool config_rwlock to avoid intervening namespace or ownership
789 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
790 * release the hold on its dataset and acquire a new one on the dataset of the
791 * same name so that it can be partially torn down and reconstructed.
794 dmu_objset_refresh_ownership(objset_t
*os
, boolean_t decrypt
, void *tag
)
797 dsl_dataset_t
*ds
, *newds
;
798 char name
[ZFS_MAX_DATASET_NAME_LEN
];
800 ds
= os
->os_dsl_dataset
;
801 VERIFY3P(ds
, !=, NULL
);
802 VERIFY3P(ds
->ds_owner
, ==, tag
);
803 VERIFY(dsl_dataset_long_held(ds
));
805 dsl_dataset_name(ds
, name
);
806 dp
= dmu_objset_pool(os
);
807 dsl_pool_config_enter(dp
, FTAG
);
808 dmu_objset_disown(os
, decrypt
, tag
);
809 VERIFY0(dsl_dataset_own(dp
, name
,
810 (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0, tag
, &newds
));
811 VERIFY3P(newds
, ==, os
->os_dsl_dataset
);
812 dsl_pool_config_exit(dp
, FTAG
);
816 dmu_objset_disown(objset_t
*os
, boolean_t decrypt
, void *tag
)
819 * Stop upgrading thread
821 dmu_objset_upgrade_stop(os
);
822 dsl_dataset_disown(os
->os_dsl_dataset
,
823 (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: 0, tag
);
827 dmu_objset_evict_dbufs(objset_t
*os
)
832 dn_marker
= kmem_alloc(sizeof (dnode_t
), KM_SLEEP
);
834 mutex_enter(&os
->os_lock
);
835 dn
= list_head(&os
->os_dnodes
);
838 * Skip dnodes without holds. We have to do this dance
839 * because dnode_add_ref() only works if there is already a
840 * hold. If the dnode has no holds, then it has no dbufs.
842 if (dnode_add_ref(dn
, FTAG
)) {
843 list_insert_after(&os
->os_dnodes
, dn
, dn_marker
);
844 mutex_exit(&os
->os_lock
);
846 dnode_evict_dbufs(dn
);
847 dnode_rele(dn
, FTAG
);
849 mutex_enter(&os
->os_lock
);
850 dn
= list_next(&os
->os_dnodes
, dn_marker
);
851 list_remove(&os
->os_dnodes
, dn_marker
);
853 dn
= list_next(&os
->os_dnodes
, dn
);
856 mutex_exit(&os
->os_lock
);
858 kmem_free(dn_marker
, sizeof (dnode_t
));
860 if (DMU_USERUSED_DNODE(os
) != NULL
) {
861 if (DMU_PROJECTUSED_DNODE(os
) != NULL
)
862 dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os
));
863 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os
));
864 dnode_evict_dbufs(DMU_USERUSED_DNODE(os
));
866 dnode_evict_dbufs(DMU_META_DNODE(os
));
870 * Objset eviction processing is split into into two pieces.
871 * The first marks the objset as evicting, evicts any dbufs that
872 * have a refcount of zero, and then queues up the objset for the
873 * second phase of eviction. Once os->os_dnodes has been cleared by
874 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
875 * The second phase closes the special dnodes, dequeues the objset from
876 * the list of those undergoing eviction, and finally frees the objset.
878 * NOTE: Due to asynchronous eviction processing (invocation of
879 * dnode_buf_pageout()), it is possible for the meta dnode for the
880 * objset to have no holds even though os->os_dnodes is not empty.
883 dmu_objset_evict(objset_t
*os
)
885 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
887 for (int t
= 0; t
< TXG_SIZE
; t
++)
888 ASSERT(!dmu_objset_is_dirty(os
, t
));
891 dsl_prop_unregister_all(ds
, os
);
896 dmu_objset_evict_dbufs(os
);
898 mutex_enter(&os
->os_lock
);
899 spa_evicting_os_register(os
->os_spa
, os
);
900 if (list_is_empty(&os
->os_dnodes
)) {
901 mutex_exit(&os
->os_lock
);
902 dmu_objset_evict_done(os
);
904 mutex_exit(&os
->os_lock
);
911 dmu_objset_evict_done(objset_t
*os
)
913 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
915 dnode_special_close(&os
->os_meta_dnode
);
916 if (DMU_USERUSED_DNODE(os
)) {
917 if (DMU_PROJECTUSED_DNODE(os
))
918 dnode_special_close(&os
->os_projectused_dnode
);
919 dnode_special_close(&os
->os_userused_dnode
);
920 dnode_special_close(&os
->os_groupused_dnode
);
922 zil_free(os
->os_zil
);
924 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
927 * This is a barrier to prevent the objset from going away in
928 * dnode_move() until we can safely ensure that the objset is still in
929 * use. We consider the objset valid before the barrier and invalid
932 rw_enter(&os_lock
, RW_READER
);
935 kmem_free(os
->os_obj_next_percpu
,
936 os
->os_obj_next_percpu_len
* sizeof (os
->os_obj_next_percpu
[0]));
938 mutex_destroy(&os
->os_lock
);
939 mutex_destroy(&os
->os_userused_lock
);
940 mutex_destroy(&os
->os_obj_lock
);
941 mutex_destroy(&os
->os_user_ptr_lock
);
942 mutex_destroy(&os
->os_upgrade_lock
);
943 for (int i
= 0; i
< TXG_SIZE
; i
++) {
944 multilist_destroy(os
->os_dirty_dnodes
[i
]);
946 spa_evicting_os_deregister(os
->os_spa
, os
);
947 kmem_free(os
, sizeof (objset_t
));
951 dmu_objset_snap_cmtime(objset_t
*os
)
953 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
957 dmu_objset_create_impl_dnstats(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
958 dmu_objset_type_t type
, int levels
, int blksz
, int ibs
, dmu_tx_t
*tx
)
963 ASSERT(dmu_tx_is_syncing(tx
));
966 blksz
= DNODE_BLOCK_SIZE
;
968 ibs
= DN_MAX_INDBLKSHIFT
;
971 VERIFY0(dmu_objset_from_ds(ds
, &os
));
973 VERIFY0(dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
975 mdn
= DMU_META_DNODE(os
);
977 dnode_allocate(mdn
, DMU_OT_DNODE
, blksz
, ibs
, DMU_OT_NONE
, 0,
978 DNODE_MIN_SLOTS
, tx
);
981 * We don't want to have to increase the meta-dnode's nlevels
982 * later, because then we could do it in quescing context while
983 * we are also accessing it in open context.
985 * This precaution is not necessary for the MOS (ds == NULL),
986 * because the MOS is only updated in syncing context.
987 * This is most fortunate: the MOS is the only objset that
988 * needs to be synced multiple times as spa_sync() iterates
989 * to convergence, so minimizing its dn_nlevels matters.
996 * Determine the number of levels necessary for the
997 * meta-dnode to contain DN_MAX_OBJECT dnodes. Note
998 * that in order to ensure that we do not overflow
999 * 64 bits, there has to be a nlevels that gives us a
1000 * number of blocks > DN_MAX_OBJECT but < 2^64.
1001 * Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
1002 * (10) must be less than (64 - log2(DN_MAX_OBJECT))
1005 while ((uint64_t)mdn
->dn_nblkptr
<<
1006 (mdn
->dn_datablkshift
- DNODE_SHIFT
+ (levels
- 1) *
1007 (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
1012 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
1013 mdn
->dn_nlevels
= levels
;
1016 ASSERT(type
!= DMU_OST_NONE
);
1017 ASSERT(type
!= DMU_OST_ANY
);
1018 ASSERT(type
< DMU_OST_NUMTYPES
);
1019 os
->os_phys
->os_type
= type
;
1022 * Enable user accounting if it is enabled and this is not an
1023 * encrypted receive.
1025 if (dmu_objset_userused_enabled(os
) &&
1026 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
1027 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1028 if (dmu_objset_userobjused_enabled(os
)) {
1029 ds
->ds_feature_activation_needed
[
1030 SPA_FEATURE_USEROBJ_ACCOUNTING
] = B_TRUE
;
1031 os
->os_phys
->os_flags
|=
1032 OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
;
1034 if (dmu_objset_projectquota_enabled(os
)) {
1035 ds
->ds_feature_activation_needed
[
1036 SPA_FEATURE_PROJECT_QUOTA
] = B_TRUE
;
1037 os
->os_phys
->os_flags
|=
1038 OBJSET_FLAG_PROJECTQUOTA_COMPLETE
;
1040 os
->os_flags
= os
->os_phys
->os_flags
;
1043 dsl_dataset_dirty(ds
, tx
);
1048 /* called from dsl for meta-objset */
1050 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
1051 dmu_objset_type_t type
, dmu_tx_t
*tx
)
1053 return (dmu_objset_create_impl_dnstats(spa
, ds
, bp
, type
, 0, 0, 0, tx
));
1056 typedef struct dmu_objset_create_arg
{
1057 const char *doca_name
;
1059 void (*doca_userfunc
)(objset_t
*os
, void *arg
,
1060 cred_t
*cr
, dmu_tx_t
*tx
);
1062 dmu_objset_type_t doca_type
;
1063 uint64_t doca_flags
;
1064 dsl_crypto_params_t
*doca_dcp
;
1065 } dmu_objset_create_arg_t
;
1069 dmu_objset_create_check(void *arg
, dmu_tx_t
*tx
)
1071 dmu_objset_create_arg_t
*doca
= arg
;
1072 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1077 if (strchr(doca
->doca_name
, '@') != NULL
)
1078 return (SET_ERROR(EINVAL
));
1080 if (strlen(doca
->doca_name
) >= ZFS_MAX_DATASET_NAME_LEN
)
1081 return (SET_ERROR(ENAMETOOLONG
));
1083 error
= dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
);
1087 dsl_dir_rele(pdd
, FTAG
);
1088 return (SET_ERROR(EEXIST
));
1091 error
= dmu_objset_create_crypt_check(pdd
, doca
->doca_dcp
);
1093 dsl_dir_rele(pdd
, FTAG
);
1097 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
1100 dsl_dir_rele(pdd
, FTAG
);
1106 dmu_objset_create_sync(void *arg
, dmu_tx_t
*tx
)
1108 dmu_objset_create_arg_t
*doca
= arg
;
1109 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1118 VERIFY0(dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
));
1120 obj
= dsl_dataset_create_sync(pdd
, tail
, NULL
, doca
->doca_flags
,
1121 doca
->doca_cred
, doca
->doca_dcp
, tx
);
1123 VERIFY0(dsl_dataset_hold_obj_flags(pdd
->dd_pool
, obj
,
1124 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
));
1125 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1126 bp
= dsl_dataset_get_blkptr(ds
);
1127 os
= dmu_objset_create_impl(pdd
->dd_pool
->dp_spa
,
1128 ds
, bp
, doca
->doca_type
, tx
);
1129 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1131 if (doca
->doca_userfunc
!= NULL
) {
1132 doca
->doca_userfunc(os
, doca
->doca_userarg
,
1133 doca
->doca_cred
, tx
);
1137 * The doca_userfunc() may write out some data that needs to be
1138 * encrypted if the dataset is encrypted (specifically the root
1139 * directory). This data must be written out before the encryption
1140 * key mapping is removed by dsl_dataset_rele_flags(). Force the
1141 * I/O to occur immediately by invoking the relevant sections of
1144 if (os
->os_encrypted
) {
1145 dsl_dataset_t
*tmpds
= NULL
;
1146 boolean_t need_sync_done
= B_FALSE
;
1148 mutex_enter(&ds
->ds_lock
);
1149 ds
->ds_owner
= FTAG
;
1150 mutex_exit(&ds
->ds_lock
);
1152 rzio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1153 tmpds
= txg_list_remove_this(&dp
->dp_dirty_datasets
, ds
,
1155 if (tmpds
!= NULL
) {
1156 dsl_dataset_sync(ds
, rzio
, tx
);
1157 need_sync_done
= B_TRUE
;
1159 VERIFY0(zio_wait(rzio
));
1161 dmu_objset_do_userquota_updates(os
, tx
);
1162 taskq_wait(dp
->dp_sync_taskq
);
1164 rzio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1165 tmpds
= txg_list_remove_this(&dp
->dp_dirty_datasets
, ds
,
1167 if (tmpds
!= NULL
) {
1168 dmu_buf_rele(ds
->ds_dbuf
, ds
);
1169 dsl_dataset_sync(ds
, rzio
, tx
);
1171 VERIFY0(zio_wait(rzio
));
1174 dsl_dataset_sync_done(ds
, tx
);
1176 mutex_enter(&ds
->ds_lock
);
1177 ds
->ds_owner
= NULL
;
1178 mutex_exit(&ds
->ds_lock
);
1181 spa_history_log_internal_ds(ds
, "create", tx
, "");
1182 zvol_create_minors(dp
->dp_spa
, doca
->doca_name
, B_TRUE
);
1184 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
1185 dsl_dir_rele(pdd
, FTAG
);
1189 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
1190 dsl_crypto_params_t
*dcp
, dmu_objset_create_sync_func_t func
, void *arg
)
1192 dmu_objset_create_arg_t doca
;
1193 dsl_crypto_params_t tmp_dcp
= { 0 };
1195 doca
.doca_name
= name
;
1196 doca
.doca_cred
= CRED();
1197 doca
.doca_flags
= flags
;
1198 doca
.doca_userfunc
= func
;
1199 doca
.doca_userarg
= arg
;
1200 doca
.doca_type
= type
;
1203 * Some callers (mostly for testing) do not provide a dcp on their
1204 * own but various code inside the sync task will require it to be
1205 * allocated. Rather than adding NULL checks throughout this code
1206 * or adding dummy dcp's to all of the callers we simply create a
1207 * dummy one here and use that. This zero dcp will have the same
1208 * effect as asking for inheritence of all encryption params.
1210 doca
.doca_dcp
= (dcp
!= NULL
) ? dcp
: &tmp_dcp
;
1212 return (dsl_sync_task(name
,
1213 dmu_objset_create_check
, dmu_objset_create_sync
, &doca
,
1214 6, ZFS_SPACE_CHECK_NORMAL
));
1217 typedef struct dmu_objset_clone_arg
{
1218 const char *doca_clone
;
1219 const char *doca_origin
;
1221 } dmu_objset_clone_arg_t
;
1225 dmu_objset_clone_check(void *arg
, dmu_tx_t
*tx
)
1227 dmu_objset_clone_arg_t
*doca
= arg
;
1231 dsl_dataset_t
*origin
;
1232 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1234 if (strchr(doca
->doca_clone
, '@') != NULL
)
1235 return (SET_ERROR(EINVAL
));
1237 if (strlen(doca
->doca_clone
) >= ZFS_MAX_DATASET_NAME_LEN
)
1238 return (SET_ERROR(ENAMETOOLONG
));
1240 error
= dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
);
1244 dsl_dir_rele(pdd
, FTAG
);
1245 return (SET_ERROR(EEXIST
));
1248 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
1251 dsl_dir_rele(pdd
, FTAG
);
1252 return (SET_ERROR(EDQUOT
));
1255 error
= dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
);
1257 dsl_dir_rele(pdd
, FTAG
);
1261 /* You can only clone snapshots, not the head datasets. */
1262 if (!origin
->ds_is_snapshot
) {
1263 dsl_dataset_rele(origin
, FTAG
);
1264 dsl_dir_rele(pdd
, FTAG
);
1265 return (SET_ERROR(EINVAL
));
1268 error
= dmu_objset_clone_crypt_check(pdd
, origin
->ds_dir
);
1270 dsl_dataset_rele(origin
, FTAG
);
1271 dsl_dir_rele(pdd
, FTAG
);
1275 dsl_dataset_rele(origin
, FTAG
);
1276 dsl_dir_rele(pdd
, FTAG
);
1282 dmu_objset_clone_sync(void *arg
, dmu_tx_t
*tx
)
1284 dmu_objset_clone_arg_t
*doca
= arg
;
1285 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1288 dsl_dataset_t
*origin
, *ds
;
1290 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
1292 VERIFY0(dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
));
1293 VERIFY0(dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
));
1295 obj
= dsl_dataset_create_sync(pdd
, tail
, origin
, 0,
1296 doca
->doca_cred
, NULL
, tx
);
1298 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
1299 dsl_dataset_name(origin
, namebuf
);
1300 spa_history_log_internal_ds(ds
, "clone", tx
,
1301 "origin=%s (%llu)", namebuf
, origin
->ds_object
);
1302 zvol_create_minors(dp
->dp_spa
, doca
->doca_clone
, B_TRUE
);
1303 dsl_dataset_rele(ds
, FTAG
);
1304 dsl_dataset_rele(origin
, FTAG
);
1305 dsl_dir_rele(pdd
, FTAG
);
1309 dmu_objset_clone(const char *clone
, const char *origin
)
1311 dmu_objset_clone_arg_t doca
;
1313 doca
.doca_clone
= clone
;
1314 doca
.doca_origin
= origin
;
1315 doca
.doca_cred
= CRED();
1317 return (dsl_sync_task(clone
,
1318 dmu_objset_clone_check
, dmu_objset_clone_sync
, &doca
,
1319 6, ZFS_SPACE_CHECK_NORMAL
));
1323 dmu_objset_snapshot_one(const char *fsname
, const char *snapname
)
1326 char *longsnap
= kmem_asprintf("%s@%s", fsname
, snapname
);
1327 nvlist_t
*snaps
= fnvlist_alloc();
1329 fnvlist_add_boolean(snaps
, longsnap
);
1331 err
= dsl_dataset_snapshot(snaps
, NULL
, NULL
);
1332 fnvlist_free(snaps
);
1337 dmu_objset_upgrade_task_cb(void *data
)
1339 objset_t
*os
= data
;
1341 mutex_enter(&os
->os_upgrade_lock
);
1342 os
->os_upgrade_status
= EINTR
;
1343 if (!os
->os_upgrade_exit
) {
1344 mutex_exit(&os
->os_upgrade_lock
);
1346 os
->os_upgrade_status
= os
->os_upgrade_cb(os
);
1347 mutex_enter(&os
->os_upgrade_lock
);
1349 os
->os_upgrade_exit
= B_TRUE
;
1350 os
->os_upgrade_id
= 0;
1351 mutex_exit(&os
->os_upgrade_lock
);
1352 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1356 dmu_objset_upgrade(objset_t
*os
, dmu_objset_upgrade_cb_t cb
)
1358 if (os
->os_upgrade_id
!= 0)
1361 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
1362 dsl_dataset_long_hold(dmu_objset_ds(os
), upgrade_tag
);
1364 mutex_enter(&os
->os_upgrade_lock
);
1365 if (os
->os_upgrade_id
== 0 && os
->os_upgrade_status
== 0) {
1366 os
->os_upgrade_exit
= B_FALSE
;
1367 os
->os_upgrade_cb
= cb
;
1368 os
->os_upgrade_id
= taskq_dispatch(
1369 os
->os_spa
->spa_upgrade_taskq
,
1370 dmu_objset_upgrade_task_cb
, os
, TQ_SLEEP
);
1371 if (os
->os_upgrade_id
== TASKQID_INVALID
) {
1372 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1373 os
->os_upgrade_status
= ENOMEM
;
1376 mutex_exit(&os
->os_upgrade_lock
);
1380 dmu_objset_upgrade_stop(objset_t
*os
)
1382 mutex_enter(&os
->os_upgrade_lock
);
1383 os
->os_upgrade_exit
= B_TRUE
;
1384 if (os
->os_upgrade_id
!= 0) {
1385 taskqid_t id
= os
->os_upgrade_id
;
1387 os
->os_upgrade_id
= 0;
1388 mutex_exit(&os
->os_upgrade_lock
);
1390 if ((taskq_cancel_id(os
->os_spa
->spa_upgrade_taskq
, id
)) == 0) {
1391 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1393 txg_wait_synced(os
->os_spa
->spa_dsl_pool
, 0);
1395 mutex_exit(&os
->os_upgrade_lock
);
1400 dmu_objset_sync_dnodes(multilist_sublist_t
*list
, dmu_tx_t
*tx
)
1404 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
1405 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
1406 ASSERT(dn
->dn_dbuf
->db_data_pending
);
1408 * Initialize dn_zio outside dnode_sync() because the
1409 * meta-dnode needs to set it ouside dnode_sync().
1411 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
1414 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1415 multilist_sublist_remove(list
, dn
);
1417 multilist_t
*newlist
= dn
->dn_objset
->os_synced_dnodes
;
1418 if (newlist
!= NULL
) {
1419 (void) dnode_add_ref(dn
, newlist
);
1420 multilist_insert(newlist
, dn
);
1429 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1431 blkptr_t
*bp
= zio
->io_bp
;
1433 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1436 ASSERT(!BP_IS_EMBEDDED(bp
));
1437 ASSERT3U(BP_GET_TYPE(bp
), ==, DMU_OT_OBJSET
);
1438 ASSERT0(BP_GET_LEVEL(bp
));
1441 * Update rootbp fill count: it should be the number of objects
1442 * allocated in the object set (not counting the "special"
1443 * objects that are stored in the objset_phys_t -- the meta
1444 * dnode and user/group/project accounting objects).
1446 for (int i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1447 fill
+= BP_GET_FILL(&dnp
->dn_blkptr
[i
]);
1449 BP_SET_FILL(bp
, fill
);
1451 if (os
->os_dsl_dataset
!= NULL
)
1452 rrw_enter(&os
->os_dsl_dataset
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
1453 *os
->os_rootbp
= *bp
;
1454 if (os
->os_dsl_dataset
!= NULL
)
1455 rrw_exit(&os
->os_dsl_dataset
->ds_bp_rwlock
, FTAG
);
1460 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1462 blkptr_t
*bp
= zio
->io_bp
;
1463 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1466 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1467 ASSERT(BP_EQUAL(bp
, bp_orig
));
1469 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1470 dmu_tx_t
*tx
= os
->os_synctx
;
1472 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1473 dsl_dataset_block_born(ds
, bp
, tx
);
1475 kmem_free(bp
, sizeof (*bp
));
1478 typedef struct sync_dnodes_arg
{
1479 multilist_t
*sda_list
;
1480 int sda_sublist_idx
;
1481 multilist_t
*sda_newlist
;
1483 } sync_dnodes_arg_t
;
1486 sync_dnodes_task(void *arg
)
1488 sync_dnodes_arg_t
*sda
= arg
;
1490 multilist_sublist_t
*ms
=
1491 multilist_sublist_lock(sda
->sda_list
, sda
->sda_sublist_idx
);
1493 dmu_objset_sync_dnodes(ms
, sda
->sda_tx
);
1495 multilist_sublist_unlock(ms
);
1497 kmem_free(sda
, sizeof (*sda
));
1501 /* called from dsl */
1503 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1506 zbookmark_phys_t zb
;
1510 dbuf_dirty_record_t
*dr
;
1511 blkptr_t
*blkptr_copy
= kmem_alloc(sizeof (*os
->os_rootbp
), KM_SLEEP
);
1512 *blkptr_copy
= *os
->os_rootbp
;
1514 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", tx
->tx_txg
);
1516 ASSERT(dmu_tx_is_syncing(tx
));
1517 /* XXX the write_done callback should really give us the tx... */
1520 if (os
->os_dsl_dataset
== NULL
) {
1522 * This is the MOS. If we have upgraded,
1523 * spa_max_replication() could change, so reset
1526 os
->os_copies
= spa_max_replication(os
->os_spa
);
1530 * Create the root block IO
1532 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1533 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1534 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1535 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1537 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1540 * If we are either claiming the ZIL or doing a raw receive write out
1541 * the os_phys_buf raw. Neither of these actions will effect the MAC
1544 if (os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
]) {
1545 ASSERT(os
->os_encrypted
);
1546 os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
] = B_FALSE
;
1547 arc_convert_to_raw(os
->os_phys_buf
,
1548 os
->os_dsl_dataset
->ds_object
, ZFS_HOST_BYTEORDER
,
1549 DMU_OT_OBJSET
, NULL
, NULL
, NULL
);
1552 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1553 blkptr_copy
, os
->os_phys_buf
, DMU_OS_IS_L2CACHEABLE(os
),
1554 &zp
, dmu_objset_write_ready
, NULL
, NULL
, dmu_objset_write_done
,
1555 os
, ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
1558 * Sync special dnodes - the parent IO for the sync is the root block
1560 DMU_META_DNODE(os
)->dn_zio
= zio
;
1561 dnode_sync(DMU_META_DNODE(os
), tx
);
1563 os
->os_phys
->os_flags
= os
->os_flags
;
1565 if (DMU_USERUSED_DNODE(os
) &&
1566 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1567 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1568 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1569 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1570 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1573 if (DMU_PROJECTUSED_DNODE(os
) &&
1574 DMU_PROJECTUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1575 DMU_PROJECTUSED_DNODE(os
)->dn_zio
= zio
;
1576 dnode_sync(DMU_PROJECTUSED_DNODE(os
), tx
);
1579 txgoff
= tx
->tx_txg
& TXG_MASK
;
1581 if (dmu_objset_userused_enabled(os
) &&
1582 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
1584 * We must create the list here because it uses the
1585 * dn_dirty_link[] of this txg. But it may already
1586 * exist because we call dsl_dataset_sync() twice per txg.
1588 if (os
->os_synced_dnodes
== NULL
) {
1589 os
->os_synced_dnodes
=
1590 multilist_create(sizeof (dnode_t
),
1591 offsetof(dnode_t
, dn_dirty_link
[txgoff
]),
1592 dnode_multilist_index_func
);
1594 ASSERT3U(os
->os_synced_dnodes
->ml_offset
, ==,
1595 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1600 i
< multilist_get_num_sublists(os
->os_dirty_dnodes
[txgoff
]); i
++) {
1601 sync_dnodes_arg_t
*sda
= kmem_alloc(sizeof (*sda
), KM_SLEEP
);
1602 sda
->sda_list
= os
->os_dirty_dnodes
[txgoff
];
1603 sda
->sda_sublist_idx
= i
;
1605 (void) taskq_dispatch(dmu_objset_pool(os
)->dp_sync_taskq
,
1606 sync_dnodes_task
, sda
, 0);
1607 /* callback frees sda */
1609 taskq_wait(dmu_objset_pool(os
)->dp_sync_taskq
);
1611 list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1612 while ((dr
= list_head(list
)) != NULL
) {
1613 ASSERT0(dr
->dr_dbuf
->db_level
);
1614 list_remove(list
, dr
);
1616 zio_nowait(dr
->dr_zio
);
1619 /* Enable dnode backfill if enough objects have been freed. */
1620 if (os
->os_freed_dnodes
>= dmu_rescan_dnode_threshold
) {
1621 os
->os_rescan_dnodes
= B_TRUE
;
1622 os
->os_freed_dnodes
= 0;
1626 * Free intent log blocks up to this tx.
1628 zil_sync(os
->os_zil
, tx
);
1629 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1634 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1636 return (!multilist_is_empty(os
->os_dirty_dnodes
[txg
& TXG_MASK
]));
1639 static objset_used_cb_t
*used_cbs
[DMU_OST_NUMTYPES
];
1642 dmu_objset_register_type(dmu_objset_type_t ost
, objset_used_cb_t
*cb
)
1648 dmu_objset_userused_enabled(objset_t
*os
)
1650 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1651 used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1652 DMU_USERUSED_DNODE(os
) != NULL
);
1656 dmu_objset_userobjused_enabled(objset_t
*os
)
1658 return (dmu_objset_userused_enabled(os
) &&
1659 spa_feature_is_enabled(os
->os_spa
, SPA_FEATURE_USEROBJ_ACCOUNTING
));
1663 dmu_objset_projectquota_enabled(objset_t
*os
)
1665 return (used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1666 DMU_PROJECTUSED_DNODE(os
) != NULL
&&
1667 spa_feature_is_enabled(os
->os_spa
, SPA_FEATURE_PROJECT_QUOTA
));
1670 typedef struct userquota_node
{
1671 /* must be in the first filed, see userquota_update_cache() */
1672 char uqn_id
[20 + DMU_OBJACCT_PREFIX_LEN
];
1674 avl_node_t uqn_node
;
1677 typedef struct userquota_cache
{
1678 avl_tree_t uqc_user_deltas
;
1679 avl_tree_t uqc_group_deltas
;
1680 avl_tree_t uqc_project_deltas
;
1681 } userquota_cache_t
;
1684 userquota_compare(const void *l
, const void *r
)
1686 const userquota_node_t
*luqn
= l
;
1687 const userquota_node_t
*ruqn
= r
;
1691 * NB: can only access uqn_id because userquota_update_cache() doesn't
1692 * pass in an entire userquota_node_t.
1694 rv
= strcmp(luqn
->uqn_id
, ruqn
->uqn_id
);
1696 return (AVL_ISIGN(rv
));
1700 do_userquota_cacheflush(objset_t
*os
, userquota_cache_t
*cache
, dmu_tx_t
*tx
)
1703 userquota_node_t
*uqn
;
1705 ASSERT(dmu_tx_is_syncing(tx
));
1708 while ((uqn
= avl_destroy_nodes(&cache
->uqc_user_deltas
,
1709 &cookie
)) != NULL
) {
1711 * os_userused_lock protects against concurrent calls to
1712 * zap_increment_int(). It's needed because zap_increment_int()
1713 * is not thread-safe (i.e. not atomic).
1715 mutex_enter(&os
->os_userused_lock
);
1716 VERIFY0(zap_increment(os
, DMU_USERUSED_OBJECT
,
1717 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1718 mutex_exit(&os
->os_userused_lock
);
1719 kmem_free(uqn
, sizeof (*uqn
));
1721 avl_destroy(&cache
->uqc_user_deltas
);
1724 while ((uqn
= avl_destroy_nodes(&cache
->uqc_group_deltas
,
1725 &cookie
)) != NULL
) {
1726 mutex_enter(&os
->os_userused_lock
);
1727 VERIFY0(zap_increment(os
, DMU_GROUPUSED_OBJECT
,
1728 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1729 mutex_exit(&os
->os_userused_lock
);
1730 kmem_free(uqn
, sizeof (*uqn
));
1732 avl_destroy(&cache
->uqc_group_deltas
);
1734 if (dmu_objset_projectquota_enabled(os
)) {
1736 while ((uqn
= avl_destroy_nodes(&cache
->uqc_project_deltas
,
1737 &cookie
)) != NULL
) {
1738 mutex_enter(&os
->os_userused_lock
);
1739 VERIFY0(zap_increment(os
, DMU_PROJECTUSED_OBJECT
,
1740 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1741 mutex_exit(&os
->os_userused_lock
);
1742 kmem_free(uqn
, sizeof (*uqn
));
1744 avl_destroy(&cache
->uqc_project_deltas
);
1749 userquota_update_cache(avl_tree_t
*avl
, const char *id
, int64_t delta
)
1751 userquota_node_t
*uqn
;
1754 ASSERT(strlen(id
) < sizeof (uqn
->uqn_id
));
1756 * Use id directly for searching because uqn_id is the first field of
1757 * userquota_node_t and fields after uqn_id won't be accessed in
1760 uqn
= avl_find(avl
, (const void *)id
, &idx
);
1762 uqn
= kmem_zalloc(sizeof (*uqn
), KM_SLEEP
);
1763 strlcpy(uqn
->uqn_id
, id
, sizeof (uqn
->uqn_id
));
1764 avl_insert(avl
, uqn
, idx
);
1766 uqn
->uqn_delta
+= delta
;
1770 do_userquota_update(objset_t
*os
, userquota_cache_t
*cache
, uint64_t used
,
1771 uint64_t flags
, uint64_t user
, uint64_t group
, uint64_t project
,
1774 if (flags
& DNODE_FLAG_USERUSED_ACCOUNTED
) {
1775 int64_t delta
= DNODE_MIN_SIZE
+ used
;
1781 (void) sprintf(name
, "%llx", (longlong_t
)user
);
1782 userquota_update_cache(&cache
->uqc_user_deltas
, name
, delta
);
1784 (void) sprintf(name
, "%llx", (longlong_t
)group
);
1785 userquota_update_cache(&cache
->uqc_group_deltas
, name
, delta
);
1787 if (dmu_objset_projectquota_enabled(os
)) {
1788 (void) sprintf(name
, "%llx", (longlong_t
)project
);
1789 userquota_update_cache(&cache
->uqc_project_deltas
,
1796 do_userobjquota_update(objset_t
*os
, userquota_cache_t
*cache
, uint64_t flags
,
1797 uint64_t user
, uint64_t group
, uint64_t project
, boolean_t subtract
)
1799 if (flags
& DNODE_FLAG_USEROBJUSED_ACCOUNTED
) {
1800 char name
[20 + DMU_OBJACCT_PREFIX_LEN
];
1801 int delta
= subtract
? -1 : 1;
1803 (void) snprintf(name
, sizeof (name
), DMU_OBJACCT_PREFIX
"%llx",
1805 userquota_update_cache(&cache
->uqc_user_deltas
, name
, delta
);
1807 (void) snprintf(name
, sizeof (name
), DMU_OBJACCT_PREFIX
"%llx",
1809 userquota_update_cache(&cache
->uqc_group_deltas
, name
, delta
);
1811 if (dmu_objset_projectquota_enabled(os
)) {
1812 (void) snprintf(name
, sizeof (name
),
1813 DMU_OBJACCT_PREFIX
"%llx", (longlong_t
)project
);
1814 userquota_update_cache(&cache
->uqc_project_deltas
,
1820 typedef struct userquota_updates_arg
{
1822 int uua_sublist_idx
;
1824 } userquota_updates_arg_t
;
1827 userquota_updates_task(void *arg
)
1829 userquota_updates_arg_t
*uua
= arg
;
1830 objset_t
*os
= uua
->uua_os
;
1831 dmu_tx_t
*tx
= uua
->uua_tx
;
1833 userquota_cache_t cache
= { { 0 } };
1835 multilist_sublist_t
*list
=
1836 multilist_sublist_lock(os
->os_synced_dnodes
, uua
->uua_sublist_idx
);
1838 ASSERT(multilist_sublist_head(list
) == NULL
||
1839 dmu_objset_userused_enabled(os
));
1840 avl_create(&cache
.uqc_user_deltas
, userquota_compare
,
1841 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
1842 avl_create(&cache
.uqc_group_deltas
, userquota_compare
,
1843 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
1844 if (dmu_objset_projectquota_enabled(os
))
1845 avl_create(&cache
.uqc_project_deltas
, userquota_compare
,
1846 sizeof (userquota_node_t
), offsetof(userquota_node_t
,
1849 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
1851 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
1852 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
1853 dn
->dn_phys
->dn_flags
&
1854 DNODE_FLAG_USERUSED_ACCOUNTED
);
1856 flags
= dn
->dn_id_flags
;
1858 if (flags
& DN_ID_OLD_EXIST
) {
1859 do_userquota_update(os
, &cache
, dn
->dn_oldused
,
1860 dn
->dn_oldflags
, dn
->dn_olduid
, dn
->dn_oldgid
,
1861 dn
->dn_oldprojid
, B_TRUE
);
1862 do_userobjquota_update(os
, &cache
, dn
->dn_oldflags
,
1863 dn
->dn_olduid
, dn
->dn_oldgid
,
1864 dn
->dn_oldprojid
, B_TRUE
);
1866 if (flags
& DN_ID_NEW_EXIST
) {
1867 do_userquota_update(os
, &cache
,
1868 DN_USED_BYTES(dn
->dn_phys
), dn
->dn_phys
->dn_flags
,
1869 dn
->dn_newuid
, dn
->dn_newgid
,
1870 dn
->dn_newprojid
, B_FALSE
);
1871 do_userobjquota_update(os
, &cache
,
1872 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
, dn
->dn_newgid
,
1873 dn
->dn_newprojid
, B_FALSE
);
1876 mutex_enter(&dn
->dn_mtx
);
1878 dn
->dn_oldflags
= 0;
1879 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
1880 dn
->dn_olduid
= dn
->dn_newuid
;
1881 dn
->dn_oldgid
= dn
->dn_newgid
;
1882 dn
->dn_oldprojid
= dn
->dn_newprojid
;
1883 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1884 if (dn
->dn_bonuslen
== 0)
1885 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1887 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1889 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
1890 mutex_exit(&dn
->dn_mtx
);
1892 multilist_sublist_remove(list
, dn
);
1893 dnode_rele(dn
, os
->os_synced_dnodes
);
1895 do_userquota_cacheflush(os
, &cache
, tx
);
1896 multilist_sublist_unlock(list
);
1897 kmem_free(uua
, sizeof (*uua
));
1901 dmu_objset_do_userquota_updates(objset_t
*os
, dmu_tx_t
*tx
)
1903 if (!dmu_objset_userused_enabled(os
))
1907 * If this is a raw receive just return and handle accounting
1908 * later when we have the keys loaded. We also don't do user
1909 * accounting during claiming since the datasets are not owned
1910 * for the duration of claiming and this txg should only be
1911 * used for recovery.
1913 if (os
->os_encrypted
&& dmu_objset_is_receiving(os
))
1916 if (tx
->tx_txg
<= os
->os_spa
->spa_claim_max_txg
)
1919 /* Allocate the user/group/project used objects if necessary. */
1920 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1921 VERIFY0(zap_create_claim(os
,
1922 DMU_USERUSED_OBJECT
,
1923 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1924 VERIFY0(zap_create_claim(os
,
1925 DMU_GROUPUSED_OBJECT
,
1926 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1929 if (dmu_objset_projectquota_enabled(os
) &&
1930 DMU_PROJECTUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1931 VERIFY0(zap_create_claim(os
, DMU_PROJECTUSED_OBJECT
,
1932 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1936 i
< multilist_get_num_sublists(os
->os_synced_dnodes
); i
++) {
1937 userquota_updates_arg_t
*uua
=
1938 kmem_alloc(sizeof (*uua
), KM_SLEEP
);
1940 uua
->uua_sublist_idx
= i
;
1942 /* note: caller does taskq_wait() */
1943 (void) taskq_dispatch(dmu_objset_pool(os
)->dp_sync_taskq
,
1944 userquota_updates_task
, uua
, 0);
1945 /* callback frees uua */
1950 * Returns a pointer to data to find uid/gid from
1952 * If a dirty record for transaction group that is syncing can't
1953 * be found then NULL is returned. In the NULL case it is assumed
1954 * the uid/gid aren't changing.
1957 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1959 dbuf_dirty_record_t
*dr
, **drp
;
1962 if (db
->db_dirtycnt
== 0)
1963 return (db
->db
.db_data
); /* Nothing is changing */
1965 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1966 if (dr
->dr_txg
== tx
->tx_txg
)
1974 DB_DNODE_ENTER(dr
->dr_dbuf
);
1975 dn
= DB_DNODE(dr
->dr_dbuf
);
1977 if (dn
->dn_bonuslen
== 0 &&
1978 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
1979 data
= dr
->dt
.dl
.dr_data
->b_data
;
1981 data
= dr
->dt
.dl
.dr_data
;
1983 DB_DNODE_EXIT(dr
->dr_dbuf
);
1990 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
1992 objset_t
*os
= dn
->dn_objset
;
1994 dmu_buf_impl_t
*db
= NULL
;
1995 uint64_t *user
= NULL
;
1996 uint64_t *group
= NULL
;
1997 uint64_t *project
= NULL
;
1998 int flags
= dn
->dn_id_flags
;
2000 boolean_t have_spill
= B_FALSE
;
2002 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
2006 * Raw receives introduce a problem with user accounting. Raw
2007 * receives cannot update the user accounting info because the
2008 * user ids and the sizes are encrypted. To guarantee that we
2009 * never end up with bad user accounting, we simply disable it
2010 * during raw receives. We also disable this for normal receives
2011 * so that an incremental raw receive may be done on top of an
2012 * existing non-raw receive.
2014 if (os
->os_encrypted
&& dmu_objset_is_receiving(os
))
2017 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
2018 DN_ID_CHKED_SPILL
)))
2021 if (before
&& dn
->dn_bonuslen
!= 0)
2022 data
= DN_BONUS(dn
->dn_phys
);
2023 else if (!before
&& dn
->dn_bonuslen
!= 0) {
2026 mutex_enter(&db
->db_mtx
);
2027 data
= dmu_objset_userquota_find_data(db
, tx
);
2029 data
= DN_BONUS(dn
->dn_phys
);
2031 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
2034 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
2035 rf
|= DB_RF_HAVESTRUCT
;
2036 error
= dmu_spill_hold_by_dnode(dn
,
2037 rf
| DB_RF_MUST_SUCCEED
,
2038 FTAG
, (dmu_buf_t
**)&db
);
2040 mutex_enter(&db
->db_mtx
);
2041 data
= (before
) ? db
->db
.db_data
:
2042 dmu_objset_userquota_find_data(db
, tx
);
2043 have_spill
= B_TRUE
;
2045 mutex_enter(&dn
->dn_mtx
);
2046 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
2047 mutex_exit(&dn
->dn_mtx
);
2053 user
= &dn
->dn_olduid
;
2054 group
= &dn
->dn_oldgid
;
2055 project
= &dn
->dn_oldprojid
;
2057 user
= &dn
->dn_newuid
;
2058 group
= &dn
->dn_newgid
;
2059 project
= &dn
->dn_newprojid
;
2063 * Must always call the callback in case the object
2064 * type has changed and that type isn't an object type to track
2066 error
= used_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
,
2067 user
, group
, project
);
2070 * Preserve existing uid/gid when the callback can't determine
2071 * what the new uid/gid are and the callback returned EEXIST.
2072 * The EEXIST error tells us to just use the existing uid/gid.
2073 * If we don't know what the old values are then just assign
2074 * them to 0, since that is a new file being created.
2076 if (!before
&& data
== NULL
&& error
== EEXIST
) {
2077 if (flags
& DN_ID_OLD_EXIST
) {
2078 dn
->dn_newuid
= dn
->dn_olduid
;
2079 dn
->dn_newgid
= dn
->dn_oldgid
;
2080 dn
->dn_newgid
= dn
->dn_oldprojid
;
2084 dn
->dn_newprojid
= ZFS_DEFAULT_PROJID
;
2090 mutex_exit(&db
->db_mtx
);
2092 mutex_enter(&dn
->dn_mtx
);
2093 if (error
== 0 && before
)
2094 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
2095 if (error
== 0 && !before
)
2096 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
2099 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
2101 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
2103 mutex_exit(&dn
->dn_mtx
);
2105 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
2109 dmu_objset_userspace_present(objset_t
*os
)
2111 return (os
->os_phys
->os_flags
&
2112 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
2116 dmu_objset_userobjspace_present(objset_t
*os
)
2118 return (os
->os_phys
->os_flags
&
2119 OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
);
2123 dmu_objset_projectquota_present(objset_t
*os
)
2125 return (os
->os_phys
->os_flags
&
2126 OBJSET_FLAG_PROJECTQUOTA_COMPLETE
);
2130 dmu_objset_space_upgrade(objset_t
*os
)
2136 * We simply need to mark every object dirty, so that it will be
2137 * synced out and now accounted. If this is called
2138 * concurrently, or if we already did some work before crashing,
2139 * that's fine, since we track each object's accounted state
2143 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
2148 mutex_enter(&os
->os_upgrade_lock
);
2149 if (os
->os_upgrade_exit
)
2150 err
= SET_ERROR(EINTR
);
2151 mutex_exit(&os
->os_upgrade_lock
);
2155 if (issig(JUSTLOOKING
) && issig(FORREAL
))
2156 return (SET_ERROR(EINTR
));
2158 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
2161 tx
= dmu_tx_create(os
);
2162 dmu_tx_hold_bonus(tx
, obj
);
2163 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
2165 dmu_buf_rele(db
, FTAG
);
2169 dmu_buf_will_dirty(db
, tx
);
2170 dmu_buf_rele(db
, FTAG
);
2177 dmu_objset_userspace_upgrade(objset_t
*os
)
2181 if (dmu_objset_userspace_present(os
))
2183 if (dmu_objset_is_snapshot(os
))
2184 return (SET_ERROR(EINVAL
));
2185 if (!dmu_objset_userused_enabled(os
))
2186 return (SET_ERROR(ENOTSUP
));
2188 err
= dmu_objset_space_upgrade(os
);
2192 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
2193 txg_wait_synced(dmu_objset_pool(os
), 0);
2198 dmu_objset_id_quota_upgrade_cb(objset_t
*os
)
2202 if (dmu_objset_userobjspace_present(os
) &&
2203 dmu_objset_projectquota_present(os
))
2205 if (dmu_objset_is_snapshot(os
))
2206 return (SET_ERROR(EINVAL
));
2207 if (!dmu_objset_userobjused_enabled(os
))
2208 return (SET_ERROR(ENOTSUP
));
2209 if (!dmu_objset_projectquota_enabled(os
) &&
2210 dmu_objset_userobjspace_present(os
))
2211 return (SET_ERROR(ENOTSUP
));
2213 dmu_objset_ds(os
)->ds_feature_activation_needed
[
2214 SPA_FEATURE_USEROBJ_ACCOUNTING
] = B_TRUE
;
2215 if (dmu_objset_projectquota_enabled(os
))
2216 dmu_objset_ds(os
)->ds_feature_activation_needed
[
2217 SPA_FEATURE_PROJECT_QUOTA
] = B_TRUE
;
2219 err
= dmu_objset_space_upgrade(os
);
2223 os
->os_flags
|= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
;
2224 if (dmu_objset_projectquota_enabled(os
))
2225 os
->os_flags
|= OBJSET_FLAG_PROJECTQUOTA_COMPLETE
;
2227 txg_wait_synced(dmu_objset_pool(os
), 0);
2232 dmu_objset_id_quota_upgrade(objset_t
*os
)
2234 dmu_objset_upgrade(os
, dmu_objset_id_quota_upgrade_cb
);
2238 dmu_objset_userobjspace_upgradable(objset_t
*os
)
2240 return (dmu_objset_type(os
) == DMU_OST_ZFS
&&
2241 !dmu_objset_is_snapshot(os
) &&
2242 dmu_objset_userobjused_enabled(os
) &&
2243 !dmu_objset_userobjspace_present(os
));
2247 dmu_objset_projectquota_upgradable(objset_t
*os
)
2249 return (dmu_objset_type(os
) == DMU_OST_ZFS
&&
2250 !dmu_objset_is_snapshot(os
) &&
2251 dmu_objset_projectquota_enabled(os
) &&
2252 !dmu_objset_projectquota_present(os
));
2256 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
2257 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2259 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
2260 usedobjsp
, availobjsp
);
2264 dmu_objset_fsid_guid(objset_t
*os
)
2266 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
2270 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
2272 stat
->dds_type
= os
->os_phys
->os_type
;
2273 if (os
->os_dsl_dataset
)
2274 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
2278 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
2280 ASSERT(os
->os_dsl_dataset
||
2281 os
->os_phys
->os_type
== DMU_OST_META
);
2283 if (os
->os_dsl_dataset
!= NULL
)
2284 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
2286 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
2287 os
->os_phys
->os_type
);
2288 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
2289 dmu_objset_userspace_present(os
));
2293 dmu_objset_is_snapshot(objset_t
*os
)
2295 if (os
->os_dsl_dataset
!= NULL
)
2296 return (os
->os_dsl_dataset
->ds_is_snapshot
);
2302 dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
, int maxlen
,
2303 boolean_t
*conflict
)
2305 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2308 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
2309 return (SET_ERROR(ENOENT
));
2311 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2312 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, name
, 8, 1, &ignored
,
2313 MT_NORMALIZE
, real
, maxlen
, conflict
));
2317 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
2318 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
2320 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2321 zap_cursor_t cursor
;
2322 zap_attribute_t attr
;
2324 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
2326 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
2327 return (SET_ERROR(ENOENT
));
2329 zap_cursor_init_serialized(&cursor
,
2330 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2331 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, *offp
);
2333 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
2334 zap_cursor_fini(&cursor
);
2335 return (SET_ERROR(ENOENT
));
2338 if (strlen(attr
.za_name
) + 1 > namelen
) {
2339 zap_cursor_fini(&cursor
);
2340 return (SET_ERROR(ENAMETOOLONG
));
2343 (void) strcpy(name
, attr
.za_name
);
2345 *idp
= attr
.za_first_integer
;
2347 *case_conflict
= attr
.za_normalization_conflict
;
2348 zap_cursor_advance(&cursor
);
2349 *offp
= zap_cursor_serialize(&cursor
);
2350 zap_cursor_fini(&cursor
);
2356 dmu_snapshot_lookup(objset_t
*os
, const char *name
, uint64_t *value
)
2358 return (dsl_dataset_snap_lookup(os
->os_dsl_dataset
, name
, value
));
2362 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
2363 uint64_t *idp
, uint64_t *offp
)
2365 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
2366 zap_cursor_t cursor
;
2367 zap_attribute_t attr
;
2369 /* there is no next dir on a snapshot! */
2370 if (os
->os_dsl_dataset
->ds_object
!=
2371 dsl_dir_phys(dd
)->dd_head_dataset_obj
)
2372 return (SET_ERROR(ENOENT
));
2374 zap_cursor_init_serialized(&cursor
,
2375 dd
->dd_pool
->dp_meta_objset
,
2376 dsl_dir_phys(dd
)->dd_child_dir_zapobj
, *offp
);
2378 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
2379 zap_cursor_fini(&cursor
);
2380 return (SET_ERROR(ENOENT
));
2383 if (strlen(attr
.za_name
) + 1 > namelen
) {
2384 zap_cursor_fini(&cursor
);
2385 return (SET_ERROR(ENAMETOOLONG
));
2388 (void) strcpy(name
, attr
.za_name
);
2390 *idp
= attr
.za_first_integer
;
2391 zap_cursor_advance(&cursor
);
2392 *offp
= zap_cursor_serialize(&cursor
);
2393 zap_cursor_fini(&cursor
);
2398 typedef struct dmu_objset_find_ctx
{
2402 char *dc_ddname
; /* last component of ddobj's name */
2403 int (*dc_func
)(dsl_pool_t
*, dsl_dataset_t
*, void *);
2406 kmutex_t
*dc_error_lock
;
2408 } dmu_objset_find_ctx_t
;
2411 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t
*dcp
)
2413 dsl_pool_t
*dp
= dcp
->dc_dp
;
2417 zap_attribute_t
*attr
;
2421 /* don't process if there already was an error */
2422 if (*dcp
->dc_error
!= 0)
2426 * Note: passing the name (dc_ddname) here is optional, but it
2427 * improves performance because we don't need to call
2428 * zap_value_search() to determine the name.
2430 err
= dsl_dir_hold_obj(dp
, dcp
->dc_ddobj
, dcp
->dc_ddname
, FTAG
, &dd
);
2434 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2435 if (dd
->dd_myname
[0] == '$') {
2436 dsl_dir_rele(dd
, FTAG
);
2440 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
2441 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
2444 * Iterate over all children.
2446 if (dcp
->dc_flags
& DS_FIND_CHILDREN
) {
2447 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2448 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
2449 zap_cursor_retrieve(&zc
, attr
) == 0;
2450 (void) zap_cursor_advance(&zc
)) {
2451 ASSERT3U(attr
->za_integer_length
, ==,
2453 ASSERT3U(attr
->za_num_integers
, ==, 1);
2455 dmu_objset_find_ctx_t
*child_dcp
=
2456 kmem_alloc(sizeof (*child_dcp
), KM_SLEEP
);
2458 child_dcp
->dc_ddobj
= attr
->za_first_integer
;
2459 child_dcp
->dc_ddname
= spa_strdup(attr
->za_name
);
2460 if (dcp
->dc_tq
!= NULL
)
2461 (void) taskq_dispatch(dcp
->dc_tq
,
2462 dmu_objset_find_dp_cb
, child_dcp
, TQ_SLEEP
);
2464 dmu_objset_find_dp_impl(child_dcp
);
2466 zap_cursor_fini(&zc
);
2470 * Iterate over all snapshots.
2472 if (dcp
->dc_flags
& DS_FIND_SNAPSHOTS
) {
2474 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2479 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
2480 dsl_dataset_rele(ds
, FTAG
);
2482 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
2483 zap_cursor_retrieve(&zc
, attr
) == 0;
2484 (void) zap_cursor_advance(&zc
)) {
2485 ASSERT3U(attr
->za_integer_length
, ==,
2487 ASSERT3U(attr
->za_num_integers
, ==, 1);
2489 err
= dsl_dataset_hold_obj(dp
,
2490 attr
->za_first_integer
, FTAG
, &ds
);
2493 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2494 dsl_dataset_rele(ds
, FTAG
);
2498 zap_cursor_fini(&zc
);
2502 kmem_free(attr
, sizeof (zap_attribute_t
));
2505 dsl_dir_rele(dd
, FTAG
);
2512 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2515 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2516 * that the dir will remain cached, and we won't have to re-instantiate
2517 * it (which could be expensive due to finding its name via
2518 * zap_value_search()).
2520 dsl_dir_rele(dd
, FTAG
);
2523 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2524 dsl_dataset_rele(ds
, FTAG
);
2528 mutex_enter(dcp
->dc_error_lock
);
2529 /* only keep first error */
2530 if (*dcp
->dc_error
== 0)
2531 *dcp
->dc_error
= err
;
2532 mutex_exit(dcp
->dc_error_lock
);
2535 if (dcp
->dc_ddname
!= NULL
)
2536 spa_strfree(dcp
->dc_ddname
);
2537 kmem_free(dcp
, sizeof (*dcp
));
2541 dmu_objset_find_dp_cb(void *arg
)
2543 dmu_objset_find_ctx_t
*dcp
= arg
;
2544 dsl_pool_t
*dp
= dcp
->dc_dp
;
2547 * We need to get a pool_config_lock here, as there are several
2548 * asssert(pool_config_held) down the stack. Getting a lock via
2549 * dsl_pool_config_enter is risky, as it might be stalled by a
2550 * pending writer. This would deadlock, as the write lock can
2551 * only be granted when our parent thread gives up the lock.
2552 * The _prio interface gives us priority over a pending writer.
2554 dsl_pool_config_enter_prio(dp
, FTAG
);
2556 dmu_objset_find_dp_impl(dcp
);
2558 dsl_pool_config_exit(dp
, FTAG
);
2562 * Find objsets under and including ddobj, call func(ds) on each.
2563 * The order for the enumeration is completely undefined.
2564 * func is called with dsl_pool_config held.
2567 dmu_objset_find_dp(dsl_pool_t
*dp
, uint64_t ddobj
,
2568 int func(dsl_pool_t
*, dsl_dataset_t
*, void *), void *arg
, int flags
)
2573 dmu_objset_find_ctx_t
*dcp
;
2576 mutex_init(&err_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2577 dcp
= kmem_alloc(sizeof (*dcp
), KM_SLEEP
);
2580 dcp
->dc_ddobj
= ddobj
;
2581 dcp
->dc_ddname
= NULL
;
2582 dcp
->dc_func
= func
;
2584 dcp
->dc_flags
= flags
;
2585 dcp
->dc_error_lock
= &err_lock
;
2586 dcp
->dc_error
= &error
;
2588 if ((flags
& DS_FIND_SERIALIZE
) || dsl_pool_config_held_writer(dp
)) {
2590 * In case a write lock is held we can't make use of
2591 * parallelism, as down the stack of the worker threads
2592 * the lock is asserted via dsl_pool_config_held.
2593 * In case of a read lock this is solved by getting a read
2594 * lock in each worker thread, which isn't possible in case
2595 * of a writer lock. So we fall back to the synchronous path
2597 * In the future it might be possible to get some magic into
2598 * dsl_pool_config_held in a way that it returns true for
2599 * the worker threads so that a single lock held from this
2600 * thread suffices. For now, stay single threaded.
2602 dmu_objset_find_dp_impl(dcp
);
2603 mutex_destroy(&err_lock
);
2608 ntasks
= dmu_find_threads
;
2610 ntasks
= vdev_count_leaves(dp
->dp_spa
) * 4;
2611 tq
= taskq_create("dmu_objset_find", ntasks
, maxclsyspri
, ntasks
,
2614 kmem_free(dcp
, sizeof (*dcp
));
2615 mutex_destroy(&err_lock
);
2617 return (SET_ERROR(ENOMEM
));
2621 /* dcp will be freed by task */
2622 (void) taskq_dispatch(tq
, dmu_objset_find_dp_cb
, dcp
, TQ_SLEEP
);
2625 * PORTING: this code relies on the property of taskq_wait to wait
2626 * until no more tasks are queued and no more tasks are active. As
2627 * we always queue new tasks from within other tasks, task_wait
2628 * reliably waits for the full recursion to finish, even though we
2629 * enqueue new tasks after taskq_wait has been called.
2630 * On platforms other than illumos, taskq_wait may not have this
2635 mutex_destroy(&err_lock
);
2641 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2642 * The dp_config_rwlock must not be held when this is called, and it
2643 * will not be held when the callback is called.
2644 * Therefore this function should only be used when the pool is not changing
2645 * (e.g. in syncing context), or the callback can deal with the possible races.
2648 dmu_objset_find_impl(spa_t
*spa
, const char *name
,
2649 int func(const char *, void *), void *arg
, int flags
)
2652 dsl_pool_t
*dp
= spa_get_dsl(spa
);
2655 zap_attribute_t
*attr
;
2660 dsl_pool_config_enter(dp
, FTAG
);
2662 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, NULL
);
2664 dsl_pool_config_exit(dp
, FTAG
);
2668 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2669 if (dd
->dd_myname
[0] == '$') {
2670 dsl_dir_rele(dd
, FTAG
);
2671 dsl_pool_config_exit(dp
, FTAG
);
2675 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
2676 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
2679 * Iterate over all children.
2681 if (flags
& DS_FIND_CHILDREN
) {
2682 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2683 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
2684 zap_cursor_retrieve(&zc
, attr
) == 0;
2685 (void) zap_cursor_advance(&zc
)) {
2686 ASSERT3U(attr
->za_integer_length
, ==,
2688 ASSERT3U(attr
->za_num_integers
, ==, 1);
2690 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
2691 dsl_pool_config_exit(dp
, FTAG
);
2692 err
= dmu_objset_find_impl(spa
, child
,
2694 dsl_pool_config_enter(dp
, FTAG
);
2699 zap_cursor_fini(&zc
);
2702 dsl_dir_rele(dd
, FTAG
);
2703 dsl_pool_config_exit(dp
, FTAG
);
2704 kmem_free(attr
, sizeof (zap_attribute_t
));
2710 * Iterate over all snapshots.
2712 if (flags
& DS_FIND_SNAPSHOTS
) {
2713 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2718 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
2719 dsl_dataset_rele(ds
, FTAG
);
2721 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
2722 zap_cursor_retrieve(&zc
, attr
) == 0;
2723 (void) zap_cursor_advance(&zc
)) {
2724 ASSERT3U(attr
->za_integer_length
, ==,
2726 ASSERT3U(attr
->za_num_integers
, ==, 1);
2728 child
= kmem_asprintf("%s@%s",
2729 name
, attr
->za_name
);
2730 dsl_pool_config_exit(dp
, FTAG
);
2731 err
= func(child
, arg
);
2732 dsl_pool_config_enter(dp
, FTAG
);
2737 zap_cursor_fini(&zc
);
2741 dsl_dir_rele(dd
, FTAG
);
2742 kmem_free(attr
, sizeof (zap_attribute_t
));
2743 dsl_pool_config_exit(dp
, FTAG
);
2748 /* Apply to self. */
2749 return (func(name
, arg
));
2753 * See comment above dmu_objset_find_impl().
2756 dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
2762 error
= spa_open(name
, &spa
, FTAG
);
2765 error
= dmu_objset_find_impl(spa
, name
, func
, arg
, flags
);
2766 spa_close(spa
, FTAG
);
2771 dmu_objset_incompatible_encryption_version(objset_t
*os
)
2773 return (dsl_dir_incompatible_encryption_version(
2774 os
->os_dsl_dataset
->ds_dir
));
2778 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
2780 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
2781 os
->os_user_ptr
= user_ptr
;
2785 dmu_objset_get_user(objset_t
*os
)
2787 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
2788 return (os
->os_user_ptr
);
2792 * Determine name of filesystem, given name of snapshot.
2793 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2796 dmu_fsname(const char *snapname
, char *buf
)
2798 char *atp
= strchr(snapname
, '@');
2800 return (SET_ERROR(EINVAL
));
2801 if (atp
- snapname
>= ZFS_MAX_DATASET_NAME_LEN
)
2802 return (SET_ERROR(ENAMETOOLONG
));
2803 (void) strlcpy(buf
, snapname
, atp
- snapname
+ 1);
2808 * Call when we think we're going to write/free space in open context to track
2809 * the amount of dirty data in the open txg, which is also the amount
2810 * of memory that can not be evicted until this txg syncs.
2813 dmu_objset_willuse_space(objset_t
*os
, int64_t space
, dmu_tx_t
*tx
)
2815 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2816 int64_t aspace
= spa_get_worst_case_asize(os
->os_spa
, space
);
2819 dsl_dir_willuse_space(ds
->ds_dir
, aspace
, tx
);
2820 dsl_pool_dirty_space(dmu_tx_pool(tx
), space
, tx
);
2824 #if defined(_KERNEL) && defined(HAVE_SPL)
2825 EXPORT_SYMBOL(dmu_objset_zil
);
2826 EXPORT_SYMBOL(dmu_objset_pool
);
2827 EXPORT_SYMBOL(dmu_objset_ds
);
2828 EXPORT_SYMBOL(dmu_objset_type
);
2829 EXPORT_SYMBOL(dmu_objset_name
);
2830 EXPORT_SYMBOL(dmu_objset_hold
);
2831 EXPORT_SYMBOL(dmu_objset_hold_flags
);
2832 EXPORT_SYMBOL(dmu_objset_own
);
2833 EXPORT_SYMBOL(dmu_objset_rele
);
2834 EXPORT_SYMBOL(dmu_objset_rele_flags
);
2835 EXPORT_SYMBOL(dmu_objset_disown
);
2836 EXPORT_SYMBOL(dmu_objset_from_ds
);
2837 EXPORT_SYMBOL(dmu_objset_create
);
2838 EXPORT_SYMBOL(dmu_objset_clone
);
2839 EXPORT_SYMBOL(dmu_objset_stats
);
2840 EXPORT_SYMBOL(dmu_objset_fast_stat
);
2841 EXPORT_SYMBOL(dmu_objset_spa
);
2842 EXPORT_SYMBOL(dmu_objset_space
);
2843 EXPORT_SYMBOL(dmu_objset_fsid_guid
);
2844 EXPORT_SYMBOL(dmu_objset_find
);
2845 EXPORT_SYMBOL(dmu_objset_byteswap
);
2846 EXPORT_SYMBOL(dmu_objset_evict_dbufs
);
2847 EXPORT_SYMBOL(dmu_objset_snap_cmtime
);
2848 EXPORT_SYMBOL(dmu_objset_dnodesize
);
2850 EXPORT_SYMBOL(dmu_objset_sync
);
2851 EXPORT_SYMBOL(dmu_objset_is_dirty
);
2852 EXPORT_SYMBOL(dmu_objset_create_impl_dnstats
);
2853 EXPORT_SYMBOL(dmu_objset_create_impl
);
2854 EXPORT_SYMBOL(dmu_objset_open_impl
);
2855 EXPORT_SYMBOL(dmu_objset_evict
);
2856 EXPORT_SYMBOL(dmu_objset_register_type
);
2857 EXPORT_SYMBOL(dmu_objset_do_userquota_updates
);
2858 EXPORT_SYMBOL(dmu_objset_userquota_get_ids
);
2859 EXPORT_SYMBOL(dmu_objset_userused_enabled
);
2860 EXPORT_SYMBOL(dmu_objset_userspace_upgrade
);
2861 EXPORT_SYMBOL(dmu_objset_userspace_present
);
2862 EXPORT_SYMBOL(dmu_objset_userobjused_enabled
);
2863 EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable
);
2864 EXPORT_SYMBOL(dmu_objset_userobjspace_present
);
2865 EXPORT_SYMBOL(dmu_objset_projectquota_enabled
);
2866 EXPORT_SYMBOL(dmu_objset_projectquota_present
);
2867 EXPORT_SYMBOL(dmu_objset_projectquota_upgradable
);
2868 EXPORT_SYMBOL(dmu_objset_id_quota_upgrade
);