4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/mntent.h>
37 #include <sys/mount.h>
38 #include <sys/cmn_err.h>
39 #include "fs/fs_subr.h"
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_vnops.h>
42 #include <sys/zfs_dir.h>
44 #include <sys/fs/zfs.h>
46 #include <sys/dsl_prop.h>
47 #include <sys/dsl_dataset.h>
48 #include <sys/dsl_deleg.h>
52 #include <sys/sa_impl.h>
53 #include <sys/varargs.h>
54 #include <sys/policy.h>
55 #include <sys/atomic.h>
56 #include <sys/mkdev.h>
57 #include <sys/modctl.h>
58 #include <sys/refstr.h>
59 #include <sys/zfs_ioctl.h>
60 #include <sys/zfs_ctldir.h>
61 #include <sys/zfs_fuid.h>
62 #include <sys/bootconf.h>
63 #include <sys/sunddi.h>
65 #include <sys/dmu_objset.h>
66 #include <sys/spa_boot.h>
68 #include "zfs_comutil.h"
73 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
75 zfs_sb_t
*zsb
= sb
->s_fs_info
;
78 * Data integrity is job one. We don't want a compromised kernel
79 * writing to the storage pool, so we never sync during panic.
81 if (unlikely(oops_in_progress
))
85 * Semantically, the only requirement is that the sync be initiated.
86 * The DMU syncs out txgs frequently, so there's nothing to do.
93 * Sync a specific filesystem.
98 dp
= dmu_objset_pool(zsb
->z_os
);
101 * If the system is shutting down, then skip any
102 * filesystems which may exist on a suspended pool.
104 if (spa_suspended(dp
->dp_spa
)) {
109 if (zsb
->z_log
!= NULL
)
110 zil_commit(zsb
->z_log
, 0);
115 * Sync all ZFS filesystems. This is what happens when you
116 * run sync(1M). Unlike other filesystems, ZFS honors the
117 * request by waiting for all pools to commit all dirty data.
124 EXPORT_SYMBOL(zfs_sync
);
127 zfs_is_readonly(zfs_sb_t
*zsb
)
129 return (!!(zsb
->z_sb
->s_flags
& MS_RDONLY
));
131 EXPORT_SYMBOL(zfs_is_readonly
);
134 atime_changed_cb(void *arg
, uint64_t newval
)
136 ((zfs_sb_t
*)arg
)->z_atime
= newval
;
140 xattr_changed_cb(void *arg
, uint64_t newval
)
144 if (newval
== ZFS_XATTR_OFF
) {
145 zsb
->z_flags
&= ~ZSB_XATTR
;
147 zsb
->z_flags
|= ZSB_XATTR
;
149 if (newval
== ZFS_XATTR_SA
)
150 zsb
->z_xattr_sa
= B_TRUE
;
152 zsb
->z_xattr_sa
= B_FALSE
;
157 blksz_changed_cb(void *arg
, uint64_t newval
)
161 if (newval
< SPA_MINBLOCKSIZE
||
162 newval
> SPA_MAXBLOCKSIZE
|| !ISP2(newval
))
163 newval
= SPA_MAXBLOCKSIZE
;
165 zsb
->z_max_blksz
= newval
;
169 readonly_changed_cb(void *arg
, uint64_t newval
)
172 struct super_block
*sb
= zsb
->z_sb
;
178 sb
->s_flags
|= MS_RDONLY
;
180 sb
->s_flags
&= ~MS_RDONLY
;
184 devices_changed_cb(void *arg
, uint64_t newval
)
189 setuid_changed_cb(void *arg
, uint64_t newval
)
194 exec_changed_cb(void *arg
, uint64_t newval
)
199 nbmand_changed_cb(void *arg
, uint64_t newval
)
202 struct super_block
*sb
= zsb
->z_sb
;
208 sb
->s_flags
|= MS_MANDLOCK
;
210 sb
->s_flags
&= ~MS_MANDLOCK
;
214 snapdir_changed_cb(void *arg
, uint64_t newval
)
216 ((zfs_sb_t
*)arg
)->z_show_ctldir
= newval
;
220 vscan_changed_cb(void *arg
, uint64_t newval
)
222 ((zfs_sb_t
*)arg
)->z_vscan
= newval
;
226 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
228 ((zfs_sb_t
*)arg
)->z_acl_inherit
= newval
;
232 zfs_register_callbacks(zfs_sb_t
*zsb
)
234 struct dsl_dataset
*ds
= NULL
;
235 objset_t
*os
= zsb
->z_os
;
236 boolean_t do_readonly
= B_FALSE
;
239 if (zfs_is_readonly(zsb
) || !spa_writeable(dmu_objset_spa(os
)))
240 do_readonly
= B_TRUE
;
243 * Register property callbacks.
245 * It would probably be fine to just check for i/o error from
246 * the first prop_register(), but I guess I like to go
249 ds
= dmu_objset_ds(os
);
250 error
= dsl_prop_register(ds
,
251 "atime", atime_changed_cb
, zsb
);
252 error
= error
? error
: dsl_prop_register(ds
,
253 "xattr", xattr_changed_cb
, zsb
);
254 error
= error
? error
: dsl_prop_register(ds
,
255 "recordsize", blksz_changed_cb
, zsb
);
256 error
= error
? error
: dsl_prop_register(ds
,
257 "readonly", readonly_changed_cb
, zsb
);
258 error
= error
? error
: dsl_prop_register(ds
,
259 "devices", devices_changed_cb
, zsb
);
260 error
= error
? error
: dsl_prop_register(ds
,
261 "setuid", setuid_changed_cb
, zsb
);
262 error
= error
? error
: dsl_prop_register(ds
,
263 "exec", exec_changed_cb
, zsb
);
264 error
= error
? error
: dsl_prop_register(ds
,
265 "snapdir", snapdir_changed_cb
, zsb
);
266 error
= error
? error
: dsl_prop_register(ds
,
267 "aclinherit", acl_inherit_changed_cb
, zsb
);
268 error
= error
? error
: dsl_prop_register(ds
,
269 "vscan", vscan_changed_cb
, zsb
);
270 error
= error
? error
: dsl_prop_register(ds
,
271 "nbmand", nbmand_changed_cb
, zsb
);
276 readonly_changed_cb(zsb
, B_TRUE
);
282 * We may attempt to unregister some callbacks that are not
283 * registered, but this is OK; it will simply return ENOMSG,
284 * which we will ignore.
286 (void) dsl_prop_unregister(ds
, "atime", atime_changed_cb
, zsb
);
287 (void) dsl_prop_unregister(ds
, "xattr", xattr_changed_cb
, zsb
);
288 (void) dsl_prop_unregister(ds
, "recordsize", blksz_changed_cb
, zsb
);
289 (void) dsl_prop_unregister(ds
, "readonly", readonly_changed_cb
, zsb
);
290 (void) dsl_prop_unregister(ds
, "devices", devices_changed_cb
, zsb
);
291 (void) dsl_prop_unregister(ds
, "setuid", setuid_changed_cb
, zsb
);
292 (void) dsl_prop_unregister(ds
, "exec", exec_changed_cb
, zsb
);
293 (void) dsl_prop_unregister(ds
, "snapdir", snapdir_changed_cb
, zsb
);
294 (void) dsl_prop_unregister(ds
, "aclinherit", acl_inherit_changed_cb
,
296 (void) dsl_prop_unregister(ds
, "vscan", vscan_changed_cb
, zsb
);
297 (void) dsl_prop_unregister(ds
, "nbmand", nbmand_changed_cb
, zsb
);
301 EXPORT_SYMBOL(zfs_register_callbacks
);
304 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
305 uint64_t *userp
, uint64_t *groupp
)
310 * Is it a valid type of object to track?
312 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
316 * If we have a NULL data pointer
317 * then assume the id's aren't changing and
318 * return EEXIST to the dmu to let it know to
324 if (bonustype
== DMU_OT_ZNODE
) {
325 znode_phys_t
*znp
= data
;
326 *userp
= znp
->zp_uid
;
327 *groupp
= znp
->zp_gid
;
330 sa_hdr_phys_t
*sap
= data
;
331 sa_hdr_phys_t sa
= *sap
;
332 boolean_t swap
= B_FALSE
;
334 ASSERT(bonustype
== DMU_OT_SA
);
336 if (sa
.sa_magic
== 0) {
338 * This should only happen for newly created
339 * files that haven't had the znode data filled
346 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
347 sa
.sa_magic
= SA_MAGIC
;
348 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
351 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
354 hdrsize
= sa_hdrsize(&sa
);
355 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
356 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
358 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
361 *userp
= BSWAP_64(*userp
);
362 *groupp
= BSWAP_64(*groupp
);
369 fuidstr_to_sid(zfs_sb_t
*zsb
, const char *fuidstr
,
370 char *domainbuf
, int buflen
, uid_t
*ridp
)
375 fuid
= strtonum(fuidstr
, NULL
);
377 domain
= zfs_fuid_find_by_idx(zsb
, FUID_INDEX(fuid
));
379 (void) strlcpy(domainbuf
, domain
, buflen
);
382 *ridp
= FUID_RID(fuid
);
386 zfs_userquota_prop_to_obj(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
)
389 case ZFS_PROP_USERUSED
:
390 return (DMU_USERUSED_OBJECT
);
391 case ZFS_PROP_GROUPUSED
:
392 return (DMU_GROUPUSED_OBJECT
);
393 case ZFS_PROP_USERQUOTA
:
394 return (zsb
->z_userquota_obj
);
395 case ZFS_PROP_GROUPQUOTA
:
396 return (zsb
->z_groupquota_obj
);
404 zfs_userspace_many(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
405 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
410 zfs_useracct_t
*buf
= vbuf
;
413 if (!dmu_objset_userspace_present(zsb
->z_os
))
416 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
422 for (zap_cursor_init_serialized(&zc
, zsb
->z_os
, obj
, *cookiep
);
423 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
424 zap_cursor_advance(&zc
)) {
425 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
429 fuidstr_to_sid(zsb
, za
.za_name
,
430 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
432 buf
->zu_space
= za
.za_first_integer
;
438 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
439 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
440 *cookiep
= zap_cursor_serialize(&zc
);
441 zap_cursor_fini(&zc
);
444 EXPORT_SYMBOL(zfs_userspace_many
);
447 * buf must be big enough (eg, 32 bytes)
450 id_to_fuidstr(zfs_sb_t
*zsb
, const char *domain
, uid_t rid
,
451 char *buf
, boolean_t addok
)
456 if (domain
&& domain
[0]) {
457 domainid
= zfs_fuid_find_by_domain(zsb
, domain
, NULL
, addok
);
461 fuid
= FUID_ENCODE(domainid
, rid
);
462 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
467 zfs_userspace_one(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
468 const char *domain
, uint64_t rid
, uint64_t *valp
)
476 if (!dmu_objset_userspace_present(zsb
->z_os
))
479 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
483 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_FALSE
);
487 err
= zap_lookup(zsb
->z_os
, obj
, buf
, 8, 1, valp
);
492 EXPORT_SYMBOL(zfs_userspace_one
);
495 zfs_set_userquota(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
496 const char *domain
, uint64_t rid
, uint64_t quota
)
502 boolean_t fuid_dirtied
;
504 if (type
!= ZFS_PROP_USERQUOTA
&& type
!= ZFS_PROP_GROUPQUOTA
)
507 if (zsb
->z_version
< ZPL_VERSION_USERSPACE
)
510 objp
= (type
== ZFS_PROP_USERQUOTA
) ? &zsb
->z_userquota_obj
:
511 &zsb
->z_groupquota_obj
;
513 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_TRUE
);
516 fuid_dirtied
= zsb
->z_fuid_dirty
;
518 tx
= dmu_tx_create(zsb
->z_os
);
519 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
521 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
522 zfs_userquota_prop_prefixes
[type
]);
525 zfs_fuid_txhold(zsb
, tx
);
526 err
= dmu_tx_assign(tx
, TXG_WAIT
);
532 mutex_enter(&zsb
->z_lock
);
534 *objp
= zap_create(zsb
->z_os
, DMU_OT_USERGROUP_QUOTA
,
536 VERIFY(0 == zap_add(zsb
->z_os
, MASTER_NODE_OBJ
,
537 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
539 mutex_exit(&zsb
->z_lock
);
542 err
= zap_remove(zsb
->z_os
, *objp
, buf
, tx
);
546 err
= zap_update(zsb
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
550 zfs_fuid_sync(zsb
, tx
);
554 EXPORT_SYMBOL(zfs_set_userquota
);
557 zfs_fuid_overquota(zfs_sb_t
*zsb
, boolean_t isgroup
, uint64_t fuid
)
560 uint64_t used
, quota
, usedobj
, quotaobj
;
563 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
564 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
566 if (quotaobj
== 0 || zsb
->z_replay
)
569 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
570 err
= zap_lookup(zsb
->z_os
, quotaobj
, buf
, 8, 1, "a
);
574 err
= zap_lookup(zsb
->z_os
, usedobj
, buf
, 8, 1, &used
);
577 return (used
>= quota
);
579 EXPORT_SYMBOL(zfs_fuid_overquota
);
582 zfs_owner_overquota(zfs_sb_t
*zsb
, znode_t
*zp
, boolean_t isgroup
)
587 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
589 fuid
= isgroup
? zp
->z_gid
: zp
->z_uid
;
591 if (quotaobj
== 0 || zsb
->z_replay
)
594 return (zfs_fuid_overquota(zsb
, isgroup
, fuid
));
596 EXPORT_SYMBOL(zfs_owner_overquota
);
599 zfs_sb_create(const char *osname
, zfs_sb_t
**zsbp
)
607 zsb
= kmem_zalloc(sizeof (zfs_sb_t
), KM_SLEEP
| KM_NODEBUG
);
610 * We claim to always be readonly so we can open snapshots;
611 * other ZPL code will prevent us from writing to snapshots.
613 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_TRUE
, zsb
, &os
);
615 kmem_free(zsb
, sizeof (zfs_sb_t
));
620 * Initialize the zfs-specific filesystem structure.
621 * Should probably make this a kmem cache, shuffle fields,
622 * and just bzero up to z_hold_mtx[].
626 zsb
->z_max_blksz
= SPA_MAXBLOCKSIZE
;
627 zsb
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
630 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zsb
->z_version
);
633 } else if (zsb
->z_version
>
634 zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)))) {
635 (void) printk("Can't mount a version %lld file system "
636 "on a version %lld pool\n. Pool must be upgraded to mount "
637 "this file system.", (u_longlong_t
)zsb
->z_version
,
638 (u_longlong_t
)spa_version(dmu_objset_spa(os
)));
642 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &zval
)) != 0)
644 zsb
->z_norm
= (int)zval
;
646 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &zval
)) != 0)
648 zsb
->z_utf8
= (zval
!= 0);
650 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &zval
)) != 0)
652 zsb
->z_case
= (uint_t
)zval
;
655 * Fold case on file systems that are always or sometimes case
658 if (zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
659 zsb
->z_case
== ZFS_CASE_MIXED
)
660 zsb
->z_norm
|= U8_TEXTPREP_TOUPPER
;
662 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
663 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
666 /* should either have both of these objects or none */
667 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
672 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &zval
);
673 if ((error
== 0) && (zval
== ZFS_XATTR_SA
))
674 zsb
->z_xattr_sa
= B_TRUE
;
677 * Pre SA versions file systems should never touch
678 * either the attribute registration or layout objects.
683 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
688 if (zsb
->z_version
>= ZPL_VERSION_SA
)
689 sa_register_update_callback(os
, zfs_sa_upgrade
);
691 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
695 ASSERT(zsb
->z_root
!= 0);
697 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
698 &zsb
->z_unlinkedobj
);
702 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
703 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
704 8, 1, &zsb
->z_userquota_obj
);
705 if (error
&& error
!= ENOENT
)
708 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
709 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
710 8, 1, &zsb
->z_groupquota_obj
);
711 if (error
&& error
!= ENOENT
)
714 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
716 if (error
&& error
!= ENOENT
)
719 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
721 if (error
&& error
!= ENOENT
)
724 mutex_init(&zsb
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
725 mutex_init(&zsb
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
726 list_create(&zsb
->z_all_znodes
, sizeof (znode_t
),
727 offsetof(znode_t
, z_link_node
));
728 rrw_init(&zsb
->z_teardown_lock
);
729 rw_init(&zsb
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
730 rw_init(&zsb
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
731 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
732 mutex_init(&zsb
->z_hold_mtx
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
734 avl_create(&zsb
->z_ctldir_snaps
, snapentry_compare
,
735 sizeof (zfs_snapentry_t
), offsetof(zfs_snapentry_t
, se_node
));
736 mutex_init(&zsb
->z_ctldir_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
742 dmu_objset_disown(os
, zsb
);
744 kmem_free(zsb
, sizeof (zfs_sb_t
));
747 EXPORT_SYMBOL(zfs_sb_create
);
750 zfs_sb_setup(zfs_sb_t
*zsb
, boolean_t mounting
)
754 error
= zfs_register_callbacks(zsb
);
759 * Set the objset user_ptr to track its zsb.
761 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
762 dmu_objset_set_user(zsb
->z_os
, zsb
);
763 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
765 zsb
->z_log
= zil_open(zsb
->z_os
, zfs_get_data
);
768 * If we are not mounting (ie: online recv), then we don't
769 * have to worry about replaying the log as we blocked all
770 * operations out since we closed the ZIL.
776 * During replay we remove the read only flag to
777 * allow replays to succeed.
779 readonly
= zfs_is_readonly(zsb
);
781 readonly_changed_cb(zsb
, B_FALSE
);
783 zfs_unlinked_drain(zsb
);
786 * Parse and replay the intent log.
788 * Because of ziltest, this must be done after
789 * zfs_unlinked_drain(). (Further note: ziltest
790 * doesn't use readonly mounts, where
791 * zfs_unlinked_drain() isn't called.) This is because
792 * ziltest causes spa_sync() to think it's committed,
793 * but actually it is not, so the intent log contains
794 * many txg's worth of changes.
796 * In particular, if object N is in the unlinked set in
797 * the last txg to actually sync, then it could be
798 * actually freed in a later txg and then reallocated
799 * in a yet later txg. This would write a "create
800 * object N" record to the intent log. Normally, this
801 * would be fine because the spa_sync() would have
802 * written out the fact that object N is free, before
803 * we could write the "create object N" intent log
806 * But when we are in ziltest mode, we advance the "open
807 * txg" without actually spa_sync()-ing the changes to
808 * disk. So we would see that object N is still
809 * allocated and in the unlinked set, and there is an
810 * intent log record saying to allocate it.
812 if (spa_writeable(dmu_objset_spa(zsb
->z_os
))) {
813 if (zil_replay_disable
) {
814 zil_destroy(zsb
->z_log
, B_FALSE
);
816 zsb
->z_replay
= B_TRUE
;
817 zil_replay(zsb
->z_os
, zsb
,
819 zsb
->z_replay
= B_FALSE
;
823 /* restore readonly bit */
825 readonly_changed_cb(zsb
, B_TRUE
);
830 EXPORT_SYMBOL(zfs_sb_setup
);
833 zfs_sb_free(zfs_sb_t
*zsb
)
837 zfs_fuid_destroy(zsb
);
839 mutex_destroy(&zsb
->z_znodes_lock
);
840 mutex_destroy(&zsb
->z_lock
);
841 list_destroy(&zsb
->z_all_znodes
);
842 rrw_destroy(&zsb
->z_teardown_lock
);
843 rw_destroy(&zsb
->z_teardown_inactive_lock
);
844 rw_destroy(&zsb
->z_fuid_lock
);
845 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
846 mutex_destroy(&zsb
->z_hold_mtx
[i
]);
847 mutex_destroy(&zsb
->z_ctldir_lock
);
848 avl_destroy(&zsb
->z_ctldir_snaps
);
849 kmem_free(zsb
, sizeof (zfs_sb_t
));
851 EXPORT_SYMBOL(zfs_sb_free
);
854 zfs_set_fuid_feature(zfs_sb_t
*zsb
)
856 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
857 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
861 zfs_unregister_callbacks(zfs_sb_t
*zsb
)
863 objset_t
*os
= zsb
->z_os
;
864 struct dsl_dataset
*ds
;
867 * Unregister properties.
869 if (!dmu_objset_is_snapshot(os
)) {
870 ds
= dmu_objset_ds(os
);
871 VERIFY(dsl_prop_unregister(ds
, "atime", atime_changed_cb
,
874 VERIFY(dsl_prop_unregister(ds
, "xattr", xattr_changed_cb
,
877 VERIFY(dsl_prop_unregister(ds
, "recordsize", blksz_changed_cb
,
880 VERIFY(dsl_prop_unregister(ds
, "readonly", readonly_changed_cb
,
883 VERIFY(dsl_prop_unregister(ds
, "devices", devices_changed_cb
,
886 VERIFY(dsl_prop_unregister(ds
, "setuid", setuid_changed_cb
,
889 VERIFY(dsl_prop_unregister(ds
, "exec", exec_changed_cb
,
892 VERIFY(dsl_prop_unregister(ds
, "snapdir", snapdir_changed_cb
,
895 VERIFY(dsl_prop_unregister(ds
, "aclinherit",
896 acl_inherit_changed_cb
, zsb
) == 0);
898 VERIFY(dsl_prop_unregister(ds
, "vscan",
899 vscan_changed_cb
, zsb
) == 0);
901 VERIFY(dsl_prop_unregister(ds
, "nbmand",
902 nbmand_changed_cb
, zsb
) == 0);
905 EXPORT_SYMBOL(zfs_unregister_callbacks
);
909 * zfs_check_global_label:
910 * Check that the hex label string is appropriate for the dataset
911 * being mounted into the global_zone proper.
913 * Return an error if the hex label string is not default or
914 * admin_low/admin_high. For admin_low labels, the corresponding
915 * dataset must be readonly.
918 zfs_check_global_label(const char *dsname
, const char *hexsl
)
920 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
922 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
924 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
925 /* must be readonly */
928 if (dsl_prop_get_integer(dsname
,
929 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
931 return (rdonly
? 0 : EACCES
);
935 EXPORT_SYMBOL(zfs_check_global_label
);
936 #endif /* HAVE_MLSLABEL */
939 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
941 zfs_sb_t
*zsb
= dentry
->d_sb
->s_fs_info
;
942 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
948 dmu_objset_space(zsb
->z_os
,
949 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
951 fsid
= dmu_objset_fsid_guid(zsb
->z_os
);
953 * The underlying storage pool actually uses multiple block
954 * size. Under Solaris frsize (fragment size) is reported as
955 * the smallest block size we support, and bsize (block size)
956 * as the filesystem's maximum block size. Unfortunately,
957 * under Linux the fragment size and block size are often used
958 * interchangeably. Thus we are forced to report both of them
959 * as the filesystem's maximum block size.
961 statp
->f_frsize
= zsb
->z_max_blksz
;
962 statp
->f_bsize
= zsb
->z_max_blksz
;
963 bshift
= fls(statp
->f_bsize
) - 1;
966 * The following report "total" blocks of various kinds in
967 * the file system, but reported in terms of f_bsize - the
971 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
972 statp
->f_bfree
= availbytes
>> bshift
;
973 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
976 * statvfs() should really be called statufs(), because it assumes
977 * static metadata. ZFS doesn't preallocate files, so the best
978 * we can do is report the max that could possibly fit in f_files,
979 * and that minus the number actually used in f_ffree.
980 * For f_ffree, report the smaller of the number of object available
981 * and the number of blocks (each object will take at least a block).
983 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
984 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
985 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
986 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
987 statp
->f_type
= ZFS_SUPER_MAGIC
;
988 statp
->f_namelen
= ZFS_MAXNAMELEN
;
991 * We have all of 40 characters to stuff a string here.
992 * Is there anything useful we could/should provide?
994 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
999 EXPORT_SYMBOL(zfs_statvfs
);
1002 zfs_root(zfs_sb_t
*zsb
, struct inode
**ipp
)
1009 error
= zfs_zget(zsb
, zsb
->z_root
, &rootzp
);
1011 *ipp
= ZTOI(rootzp
);
1016 EXPORT_SYMBOL(zfs_root
);
1020 zfs_sb_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1022 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1023 struct shrinker
*shrinker
= &sb
->s_shrink
;
1024 struct shrink_control sc
= {
1025 .nr_to_scan
= nr_to_scan
,
1026 .gfp_mask
= GFP_KERNEL
,
1030 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1035 EXPORT_SYMBOL(zfs_sb_prune
);
1036 #endif /* HAVE_SHRINK */
1039 * Teardown the zfs_sb_t.
1041 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1042 * and 'z_teardown_inactive_lock' held.
1045 zfs_sb_teardown(zfs_sb_t
*zsb
, boolean_t unmounting
)
1049 rrw_enter(&zsb
->z_teardown_lock
, RW_WRITER
, FTAG
);
1053 * We purge the parent filesystem's super block as the
1054 * parent filesystem and all of its snapshots have their
1055 * inode's super block set to the parent's filesystem's
1056 * super block. Note, 'z_parent' is self referential
1057 * for non-snapshots.
1059 shrink_dcache_sb(zsb
->z_parent
->z_sb
);
1063 * If someone has not already unmounted this file system,
1064 * drain the iput_taskq to ensure all active references to the
1065 * zfs_sb_t have been handled only then can it be safely destroyed.
1068 taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb
->z_os
)));
1071 * Close the zil. NB: Can't close the zil while zfs_inactive
1072 * threads are blocked as zil_close can call zfs_inactive.
1075 zil_close(zsb
->z_log
);
1079 rw_enter(&zsb
->z_teardown_inactive_lock
, RW_WRITER
);
1082 * If we are not unmounting (ie: online recv) and someone already
1083 * unmounted this file system while we were doing the switcheroo,
1084 * or a reopen of z_os failed then just bail out now.
1086 if (!unmounting
&& (zsb
->z_unmounted
|| zsb
->z_os
== NULL
)) {
1087 rw_exit(&zsb
->z_teardown_inactive_lock
);
1088 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1093 * At this point there are no VFS ops active, and any new VFS ops
1094 * will fail with EIO since we have z_teardown_lock for writer (only
1095 * relevant for forced unmount).
1097 * Release all holds on dbufs.
1099 mutex_enter(&zsb
->z_znodes_lock
);
1100 for (zp
= list_head(&zsb
->z_all_znodes
); zp
!= NULL
;
1101 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1103 ASSERT(atomic_read(&ZTOI(zp
)->i_count
) > 0);
1104 zfs_znode_dmu_fini(zp
);
1107 mutex_exit(&zsb
->z_znodes_lock
);
1110 * If we are unmounting, set the unmounted flag and let new VFS ops
1111 * unblock. zfs_inactive will have the unmounted behavior, and all
1112 * other VFS ops will fail with EIO.
1115 zsb
->z_unmounted
= B_TRUE
;
1116 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1117 rw_exit(&zsb
->z_teardown_inactive_lock
);
1121 * z_os will be NULL if there was an error in attempting to reopen
1122 * zsb, so just return as the properties had already been
1124 * unregistered and cached data had been evicted before.
1126 if (zsb
->z_os
== NULL
)
1130 * Unregister properties.
1132 zfs_unregister_callbacks(zsb
);
1137 if (dsl_dataset_is_dirty(dmu_objset_ds(zsb
->z_os
)) &&
1138 !zfs_is_readonly(zsb
))
1139 txg_wait_synced(dmu_objset_pool(zsb
->z_os
), 0);
1140 (void) dmu_objset_evict_dbufs(zsb
->z_os
);
1144 EXPORT_SYMBOL(zfs_sb_teardown
);
1146 #if defined(HAVE_BDI) && !defined(HAVE_BDI_SETUP_AND_REGISTER)
1147 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1148 #endif /* HAVE_BDI && !HAVE_BDI_SETUP_AND_REGISTER */
1151 zfs_domount(struct super_block
*sb
, void *data
, int silent
)
1153 zpl_mount_data_t
*zmd
= data
;
1154 const char *osname
= zmd
->z_osname
;
1156 struct inode
*root_inode
;
1157 uint64_t recordsize
;
1160 error
= zfs_sb_create(osname
, &zsb
);
1164 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1165 &recordsize
, NULL
)))
1169 sb
->s_fs_info
= zsb
;
1170 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1171 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1172 sb
->s_time_gran
= 1;
1173 sb
->s_blocksize
= recordsize
;
1174 sb
->s_blocksize_bits
= ilog2(recordsize
);
1178 * 2.6.32 API change,
1179 * Added backing_device_info (BDI) per super block interfaces. A BDI
1180 * must be configured when using a non-device backed filesystem for
1181 * proper writeback. This is not required for older pdflush kernels.
1183 * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead.
1185 zsb
->z_bdi
.ra_pages
= 0;
1186 sb
->s_bdi
= &zsb
->z_bdi
;
1188 error
= -bdi_setup_and_register(&zsb
->z_bdi
, "zfs", BDI_CAP_MAP_COPY
);
1191 #endif /* HAVE_BDI */
1193 /* Set callback operations for the file system. */
1194 sb
->s_op
= &zpl_super_operations
;
1195 sb
->s_xattr
= zpl_xattr_handlers
;
1196 sb
->s_export_op
= &zpl_export_operations
;
1198 sb
->s_d_op
= &zpl_dentry_operations
;
1199 #endif /* HAVE_S_D_OP */
1201 /* Set features for file system. */
1202 zfs_set_fuid_feature(zsb
);
1204 if (dmu_objset_is_snapshot(zsb
->z_os
)) {
1207 atime_changed_cb(zsb
, B_FALSE
);
1208 readonly_changed_cb(zsb
, B_TRUE
);
1209 if ((error
= dsl_prop_get_integer(osname
,"xattr",&pval
,NULL
)))
1211 xattr_changed_cb(zsb
, pval
);
1212 zsb
->z_issnap
= B_TRUE
;
1213 zsb
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1215 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
1216 dmu_objset_set_user(zsb
->z_os
, zsb
);
1217 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
1219 error
= zfs_sb_setup(zsb
, B_TRUE
);
1222 /* Allocate a root inode for the filesystem. */
1223 error
= zfs_root(zsb
, &root_inode
);
1225 (void) zfs_umount(sb
);
1229 /* Allocate a root dentry for the filesystem */
1230 sb
->s_root
= d_make_root(root_inode
);
1231 if (sb
->s_root
== NULL
) {
1232 (void) zfs_umount(sb
);
1241 dmu_objset_disown(zsb
->z_os
, zsb
);
1247 EXPORT_SYMBOL(zfs_domount
);
1250 * Called when an unmount is requested and certain sanity checks have
1251 * already passed. At this point no dentries or inodes have been reclaimed
1252 * from their respective caches. We drop the extra reference on the .zfs
1253 * control directory to allow everything to be reclaimed. All snapshots
1254 * must already have been unmounted to reach this point.
1257 zfs_preumount(struct super_block
*sb
)
1259 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1261 if (zsb
!= NULL
&& zsb
->z_ctldir
!= NULL
)
1262 zfsctl_destroy(zsb
);
1264 EXPORT_SYMBOL(zfs_preumount
);
1267 * Called once all other unmount released tear down has occurred.
1268 * It is our responsibility to release any remaining infrastructure.
1272 zfs_umount(struct super_block
*sb
)
1274 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1277 VERIFY(zfs_sb_teardown(zsb
, B_TRUE
) == 0);
1281 bdi_destroy(sb
->s_bdi
);
1282 #endif /* HAVE_BDI */
1285 * z_os will be NULL if there was an error in
1286 * attempting to reopen zsb.
1290 * Unset the objset user_ptr.
1292 mutex_enter(&os
->os_user_ptr_lock
);
1293 dmu_objset_set_user(os
, NULL
);
1294 mutex_exit(&os
->os_user_ptr_lock
);
1297 * Finally release the objset
1299 dmu_objset_disown(os
, zsb
);
1305 EXPORT_SYMBOL(zfs_umount
);
1308 zfs_remount(struct super_block
*sb
, int *flags
, char *data
)
1311 * All namespace flags (MNT_*) and super block flags (MS_*) will
1312 * be handled by the Linux VFS. Only handle custom options here.
1316 EXPORT_SYMBOL(zfs_remount
);
1319 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1321 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1323 uint64_t object
= 0;
1324 uint64_t fid_gen
= 0;
1333 if (fidp
->fid_len
== LONG_FID_LEN
) {
1334 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1335 uint64_t objsetid
= 0;
1336 uint64_t setgen
= 0;
1338 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1339 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1341 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1342 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1346 err
= zfsctl_lookup_objset(sb
, objsetid
, &zsb
);
1353 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1354 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1356 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1357 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1359 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1360 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1366 /* A zero fid_gen means we are in the .zfs control directories */
1368 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
1369 *ipp
= zsb
->z_ctldir
;
1370 ASSERT(*ipp
!= NULL
);
1371 if (object
== ZFSCTL_INO_SNAPDIR
) {
1372 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
1373 0, kcred
, NULL
, NULL
) == 0);
1381 gen_mask
= -1ULL >> (64 - 8 * i
);
1383 dprintf("getting %llu [%u mask %llx]\n", object
, fid_gen
, gen_mask
);
1384 if ((err
= zfs_zget(zsb
, object
, &zp
))) {
1388 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
), &zp_gen
,
1390 zp_gen
= zp_gen
& gen_mask
;
1393 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1394 dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen
, fid_gen
);
1402 zfs_inode_update(ITOZ(*ipp
));
1407 EXPORT_SYMBOL(zfs_vget
);
1410 * Block out VFS ops and close zfs_sb_t
1412 * Note, if successful, then we return with the 'z_teardown_lock' and
1413 * 'z_teardown_inactive_lock' write held.
1416 zfs_suspend_fs(zfs_sb_t
*zsb
)
1420 if ((error
= zfs_sb_teardown(zsb
, B_FALSE
)) != 0)
1423 dmu_objset_disown(zsb
->z_os
, zsb
);
1427 EXPORT_SYMBOL(zfs_suspend_fs
);
1430 * Reopen zfs_sb_t and release VFS ops.
1433 zfs_resume_fs(zfs_sb_t
*zsb
, const char *osname
)
1437 ASSERT(RRW_WRITE_HELD(&zsb
->z_teardown_lock
));
1438 ASSERT(RW_WRITE_HELD(&zsb
->z_teardown_inactive_lock
));
1440 err
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_FALSE
, zsb
, &zsb
->z_os
);
1445 uint64_t sa_obj
= 0;
1447 err2
= zap_lookup(zsb
->z_os
, MASTER_NODE_OBJ
,
1448 ZFS_SA_ATTRS
, 8, 1, &sa_obj
);
1450 if ((err
|| err2
) && zsb
->z_version
>= ZPL_VERSION_SA
)
1454 if ((err
= sa_setup(zsb
->z_os
, sa_obj
,
1455 zfs_attr_table
, ZPL_END
, &zsb
->z_attr_table
)) != 0)
1458 VERIFY(zfs_sb_setup(zsb
, B_FALSE
) == 0);
1459 zsb
->z_rollback_time
= jiffies
;
1462 * Attempt to re-establish all the active inodes with their
1463 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1464 * and mark it stale. This prevents a collision if a new
1465 * inode/object is created which must use the same inode
1466 * number. The stale inode will be be released when the
1467 * VFS prunes the dentry holding the remaining references
1468 * on the stale inode.
1470 mutex_enter(&zsb
->z_znodes_lock
);
1471 for (zp
= list_head(&zsb
->z_all_znodes
); zp
;
1472 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1473 err2
= zfs_rezget(zp
);
1475 remove_inode_hash(ZTOI(zp
));
1476 zp
->z_is_stale
= B_TRUE
;
1479 mutex_exit(&zsb
->z_znodes_lock
);
1483 /* release the VFS ops */
1484 rw_exit(&zsb
->z_teardown_inactive_lock
);
1485 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1489 * Since we couldn't reopen zfs_sb_t or, setup the
1490 * sa framework, force unmount this file system.
1493 (void) zfs_umount(zsb
->z_sb
);
1497 EXPORT_SYMBOL(zfs_resume_fs
);
1500 zfs_set_version(zfs_sb_t
*zsb
, uint64_t newvers
)
1503 objset_t
*os
= zsb
->z_os
;
1506 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
1509 if (newvers
< zsb
->z_version
)
1512 if (zfs_spa_version_map(newvers
) >
1513 spa_version(dmu_objset_spa(zsb
->z_os
)))
1516 tx
= dmu_tx_create(os
);
1517 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
1518 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1519 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
1521 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1523 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1529 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
1530 8, 1, &newvers
, tx
);
1537 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1540 ASSERT3U(spa_version(dmu_objset_spa(zsb
->z_os
)), >=,
1542 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1543 DMU_OT_NONE
, 0, tx
);
1545 error
= zap_add(os
, MASTER_NODE_OBJ
,
1546 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1549 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
1550 sa_register_update_callback(os
, zfs_sa_upgrade
);
1553 spa_history_log_internal(LOG_DS_UPGRADE
,
1554 dmu_objset_spa(os
), tx
, "oldver=%llu newver=%llu dataset = %llu",
1555 zsb
->z_version
, newvers
, dmu_objset_id(os
));
1559 zsb
->z_version
= newvers
;
1561 if (zsb
->z_version
>= ZPL_VERSION_FUID
)
1562 zfs_set_fuid_feature(zsb
);
1566 EXPORT_SYMBOL(zfs_set_version
);
1569 * Read a property stored within the master node.
1572 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
1578 * Look up the file system's value for the property. For the
1579 * version property, we look up a slightly different string.
1581 if (prop
== ZFS_PROP_VERSION
)
1582 pname
= ZPL_VERSION_STR
;
1584 pname
= zfs_prop_to_name(prop
);
1587 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
1589 if (error
== ENOENT
) {
1590 /* No value set, use the default value */
1592 case ZFS_PROP_VERSION
:
1593 *value
= ZPL_VERSION
;
1595 case ZFS_PROP_NORMALIZE
:
1596 case ZFS_PROP_UTF8ONLY
:
1600 *value
= ZFS_CASE_SENSITIVE
;
1609 EXPORT_SYMBOL(zfs_get_zplprop
);
1616 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
1617 register_filesystem(&zpl_fs_type
);
1618 (void) arc_add_prune_callback(zpl_prune_sbs
, NULL
);
1624 unregister_filesystem(&zpl_fs_type
);