4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/mntent.h>
37 #include <sys/mount.h>
38 #include <sys/cmn_err.h>
39 #include "fs/fs_subr.h"
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_vnops.h>
42 #include <sys/zfs_dir.h>
44 #include <sys/fs/zfs.h>
46 #include <sys/dsl_prop.h>
47 #include <sys/dsl_dataset.h>
48 #include <sys/dsl_deleg.h>
52 #include <sys/sa_impl.h>
53 #include <sys/varargs.h>
54 #include <sys/policy.h>
55 #include <sys/atomic.h>
56 #include <sys/mkdev.h>
57 #include <sys/modctl.h>
58 #include <sys/refstr.h>
59 #include <sys/zfs_ioctl.h>
60 #include <sys/zfs_ctldir.h>
61 #include <sys/zfs_fuid.h>
62 #include <sys/bootconf.h>
63 #include <sys/sunddi.h>
65 #include <sys/dmu_objset.h>
66 #include <sys/spa_boot.h>
68 #include "zfs_comutil.h"
73 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
75 zfs_sb_t
*zsb
= sb
->s_fs_info
;
78 * Data integrity is job one. We don't want a compromised kernel
79 * writing to the storage pool, so we never sync during panic.
81 if (unlikely(oops_in_progress
))
85 * Semantically, the only requirement is that the sync be initiated.
86 * The DMU syncs out txgs frequently, so there's nothing to do.
93 * Sync a specific filesystem.
98 dp
= dmu_objset_pool(zsb
->z_os
);
101 * If the system is shutting down, then skip any
102 * filesystems which may exist on a suspended pool.
104 if (spa_suspended(dp
->dp_spa
)) {
109 if (zsb
->z_log
!= NULL
)
110 zil_commit(zsb
->z_log
, 0);
115 * Sync all ZFS filesystems. This is what happens when you
116 * run sync(1M). Unlike other filesystems, ZFS honors the
117 * request by waiting for all pools to commit all dirty data.
124 EXPORT_SYMBOL(zfs_sync
);
127 zfs_is_readonly(zfs_sb_t
*zsb
)
129 return (!!(zsb
->z_sb
->s_flags
& MS_RDONLY
));
131 EXPORT_SYMBOL(zfs_is_readonly
);
134 atime_changed_cb(void *arg
, uint64_t newval
)
136 ((zfs_sb_t
*)arg
)->z_atime
= newval
;
140 xattr_changed_cb(void *arg
, uint64_t newval
)
144 if (newval
== ZFS_XATTR_OFF
) {
145 zsb
->z_flags
&= ~ZSB_XATTR
;
147 zsb
->z_flags
|= ZSB_XATTR
;
149 if (newval
== ZFS_XATTR_SA
)
150 zsb
->z_xattr_sa
= B_TRUE
;
152 zsb
->z_xattr_sa
= B_FALSE
;
157 blksz_changed_cb(void *arg
, uint64_t newval
)
161 if (newval
< SPA_MINBLOCKSIZE
||
162 newval
> SPA_MAXBLOCKSIZE
|| !ISP2(newval
))
163 newval
= SPA_MAXBLOCKSIZE
;
165 zsb
->z_max_blksz
= newval
;
169 readonly_changed_cb(void *arg
, uint64_t newval
)
172 struct super_block
*sb
= zsb
->z_sb
;
178 sb
->s_flags
|= MS_RDONLY
;
180 sb
->s_flags
&= ~MS_RDONLY
;
184 devices_changed_cb(void *arg
, uint64_t newval
)
189 setuid_changed_cb(void *arg
, uint64_t newval
)
194 exec_changed_cb(void *arg
, uint64_t newval
)
199 nbmand_changed_cb(void *arg
, uint64_t newval
)
202 struct super_block
*sb
= zsb
->z_sb
;
208 sb
->s_flags
|= MS_MANDLOCK
;
210 sb
->s_flags
&= ~MS_MANDLOCK
;
214 snapdir_changed_cb(void *arg
, uint64_t newval
)
216 ((zfs_sb_t
*)arg
)->z_show_ctldir
= newval
;
220 vscan_changed_cb(void *arg
, uint64_t newval
)
222 ((zfs_sb_t
*)arg
)->z_vscan
= newval
;
226 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
228 ((zfs_sb_t
*)arg
)->z_acl_inherit
= newval
;
232 zfs_register_callbacks(zfs_sb_t
*zsb
)
234 struct dsl_dataset
*ds
= NULL
;
235 objset_t
*os
= zsb
->z_os
;
238 if (zfs_is_readonly(zsb
) || !spa_writeable(dmu_objset_spa(os
)))
239 readonly_changed_cb(zsb
, B_TRUE
);
242 * Register property callbacks.
244 * It would probably be fine to just check for i/o error from
245 * the first prop_register(), but I guess I like to go
248 ds
= dmu_objset_ds(os
);
249 error
= dsl_prop_register(ds
,
250 "atime", atime_changed_cb
, zsb
);
251 error
= error
? error
: dsl_prop_register(ds
,
252 "xattr", xattr_changed_cb
, zsb
);
253 error
= error
? error
: dsl_prop_register(ds
,
254 "recordsize", blksz_changed_cb
, zsb
);
255 error
= error
? error
: dsl_prop_register(ds
,
256 "readonly", readonly_changed_cb
, zsb
);
257 error
= error
? error
: dsl_prop_register(ds
,
258 "devices", devices_changed_cb
, zsb
);
259 error
= error
? error
: dsl_prop_register(ds
,
260 "setuid", setuid_changed_cb
, zsb
);
261 error
= error
? error
: dsl_prop_register(ds
,
262 "exec", exec_changed_cb
, zsb
);
263 error
= error
? error
: dsl_prop_register(ds
,
264 "snapdir", snapdir_changed_cb
, zsb
);
265 error
= error
? error
: dsl_prop_register(ds
,
266 "aclinherit", acl_inherit_changed_cb
, zsb
);
267 error
= error
? error
: dsl_prop_register(ds
,
268 "vscan", vscan_changed_cb
, zsb
);
269 error
= error
? error
: dsl_prop_register(ds
,
270 "nbmand", nbmand_changed_cb
, zsb
);
278 * We may attempt to unregister some callbacks that are not
279 * registered, but this is OK; it will simply return ENOMSG,
280 * which we will ignore.
282 (void) dsl_prop_unregister(ds
, "atime", atime_changed_cb
, zsb
);
283 (void) dsl_prop_unregister(ds
, "xattr", xattr_changed_cb
, zsb
);
284 (void) dsl_prop_unregister(ds
, "recordsize", blksz_changed_cb
, zsb
);
285 (void) dsl_prop_unregister(ds
, "readonly", readonly_changed_cb
, zsb
);
286 (void) dsl_prop_unregister(ds
, "devices", devices_changed_cb
, zsb
);
287 (void) dsl_prop_unregister(ds
, "setuid", setuid_changed_cb
, zsb
);
288 (void) dsl_prop_unregister(ds
, "exec", exec_changed_cb
, zsb
);
289 (void) dsl_prop_unregister(ds
, "snapdir", snapdir_changed_cb
, zsb
);
290 (void) dsl_prop_unregister(ds
, "aclinherit", acl_inherit_changed_cb
,
292 (void) dsl_prop_unregister(ds
, "vscan", vscan_changed_cb
, zsb
);
293 (void) dsl_prop_unregister(ds
, "nbmand", nbmand_changed_cb
, zsb
);
297 EXPORT_SYMBOL(zfs_register_callbacks
);
300 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
301 uint64_t *userp
, uint64_t *groupp
)
306 * Is it a valid type of object to track?
308 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
312 * If we have a NULL data pointer
313 * then assume the id's aren't changing and
314 * return EEXIST to the dmu to let it know to
320 if (bonustype
== DMU_OT_ZNODE
) {
321 znode_phys_t
*znp
= data
;
322 *userp
= znp
->zp_uid
;
323 *groupp
= znp
->zp_gid
;
326 sa_hdr_phys_t
*sap
= data
;
327 sa_hdr_phys_t sa
= *sap
;
328 boolean_t swap
= B_FALSE
;
330 ASSERT(bonustype
== DMU_OT_SA
);
332 if (sa
.sa_magic
== 0) {
334 * This should only happen for newly created
335 * files that haven't had the znode data filled
342 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
343 sa
.sa_magic
= SA_MAGIC
;
344 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
347 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
350 hdrsize
= sa_hdrsize(&sa
);
351 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
352 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
354 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
357 *userp
= BSWAP_64(*userp
);
358 *groupp
= BSWAP_64(*groupp
);
365 fuidstr_to_sid(zfs_sb_t
*zsb
, const char *fuidstr
,
366 char *domainbuf
, int buflen
, uid_t
*ridp
)
371 fuid
= strtonum(fuidstr
, NULL
);
373 domain
= zfs_fuid_find_by_idx(zsb
, FUID_INDEX(fuid
));
375 (void) strlcpy(domainbuf
, domain
, buflen
);
378 *ridp
= FUID_RID(fuid
);
382 zfs_userquota_prop_to_obj(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
)
385 case ZFS_PROP_USERUSED
:
386 return (DMU_USERUSED_OBJECT
);
387 case ZFS_PROP_GROUPUSED
:
388 return (DMU_GROUPUSED_OBJECT
);
389 case ZFS_PROP_USERQUOTA
:
390 return (zsb
->z_userquota_obj
);
391 case ZFS_PROP_GROUPQUOTA
:
392 return (zsb
->z_groupquota_obj
);
400 zfs_userspace_many(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
401 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
406 zfs_useracct_t
*buf
= vbuf
;
409 if (!dmu_objset_userspace_present(zsb
->z_os
))
412 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
418 for (zap_cursor_init_serialized(&zc
, zsb
->z_os
, obj
, *cookiep
);
419 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
420 zap_cursor_advance(&zc
)) {
421 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
425 fuidstr_to_sid(zsb
, za
.za_name
,
426 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
428 buf
->zu_space
= za
.za_first_integer
;
434 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
435 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
436 *cookiep
= zap_cursor_serialize(&zc
);
437 zap_cursor_fini(&zc
);
440 EXPORT_SYMBOL(zfs_userspace_many
);
443 * buf must be big enough (eg, 32 bytes)
446 id_to_fuidstr(zfs_sb_t
*zsb
, const char *domain
, uid_t rid
,
447 char *buf
, boolean_t addok
)
452 if (domain
&& domain
[0]) {
453 domainid
= zfs_fuid_find_by_domain(zsb
, domain
, NULL
, addok
);
457 fuid
= FUID_ENCODE(domainid
, rid
);
458 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
463 zfs_userspace_one(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
464 const char *domain
, uint64_t rid
, uint64_t *valp
)
472 if (!dmu_objset_userspace_present(zsb
->z_os
))
475 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
479 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_FALSE
);
483 err
= zap_lookup(zsb
->z_os
, obj
, buf
, 8, 1, valp
);
488 EXPORT_SYMBOL(zfs_userspace_one
);
491 zfs_set_userquota(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
492 const char *domain
, uint64_t rid
, uint64_t quota
)
498 boolean_t fuid_dirtied
;
500 if (type
!= ZFS_PROP_USERQUOTA
&& type
!= ZFS_PROP_GROUPQUOTA
)
503 if (zsb
->z_version
< ZPL_VERSION_USERSPACE
)
506 objp
= (type
== ZFS_PROP_USERQUOTA
) ? &zsb
->z_userquota_obj
:
507 &zsb
->z_groupquota_obj
;
509 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_TRUE
);
512 fuid_dirtied
= zsb
->z_fuid_dirty
;
514 tx
= dmu_tx_create(zsb
->z_os
);
515 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
517 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
518 zfs_userquota_prop_prefixes
[type
]);
521 zfs_fuid_txhold(zsb
, tx
);
522 err
= dmu_tx_assign(tx
, TXG_WAIT
);
528 mutex_enter(&zsb
->z_lock
);
530 *objp
= zap_create(zsb
->z_os
, DMU_OT_USERGROUP_QUOTA
,
532 VERIFY(0 == zap_add(zsb
->z_os
, MASTER_NODE_OBJ
,
533 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
535 mutex_exit(&zsb
->z_lock
);
538 err
= zap_remove(zsb
->z_os
, *objp
, buf
, tx
);
542 err
= zap_update(zsb
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
546 zfs_fuid_sync(zsb
, tx
);
550 EXPORT_SYMBOL(zfs_set_userquota
);
553 zfs_fuid_overquota(zfs_sb_t
*zsb
, boolean_t isgroup
, uint64_t fuid
)
556 uint64_t used
, quota
, usedobj
, quotaobj
;
559 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
560 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
562 if (quotaobj
== 0 || zsb
->z_replay
)
565 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
566 err
= zap_lookup(zsb
->z_os
, quotaobj
, buf
, 8, 1, "a
);
570 err
= zap_lookup(zsb
->z_os
, usedobj
, buf
, 8, 1, &used
);
573 return (used
>= quota
);
575 EXPORT_SYMBOL(zfs_fuid_overquota
);
578 zfs_owner_overquota(zfs_sb_t
*zsb
, znode_t
*zp
, boolean_t isgroup
)
583 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
585 fuid
= isgroup
? zp
->z_gid
: zp
->z_uid
;
587 if (quotaobj
== 0 || zsb
->z_replay
)
590 return (zfs_fuid_overquota(zsb
, isgroup
, fuid
));
592 EXPORT_SYMBOL(zfs_owner_overquota
);
595 zfs_sb_create(const char *osname
, zfs_sb_t
**zsbp
)
603 zsb
= kmem_zalloc(sizeof (zfs_sb_t
), KM_SLEEP
| KM_NODEBUG
);
606 * We claim to always be readonly so we can open snapshots;
607 * other ZPL code will prevent us from writing to snapshots.
609 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_TRUE
, zsb
, &os
);
611 kmem_free(zsb
, sizeof (zfs_sb_t
));
616 * Initialize the zfs-specific filesystem structure.
617 * Should probably make this a kmem cache, shuffle fields,
618 * and just bzero up to z_hold_mtx[].
622 zsb
->z_max_blksz
= SPA_MAXBLOCKSIZE
;
623 zsb
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
626 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zsb
->z_version
);
629 } else if (zsb
->z_version
>
630 zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)))) {
631 (void) printk("Can't mount a version %lld file system "
632 "on a version %lld pool\n. Pool must be upgraded to mount "
633 "this file system.", (u_longlong_t
)zsb
->z_version
,
634 (u_longlong_t
)spa_version(dmu_objset_spa(os
)));
638 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &zval
)) != 0)
640 zsb
->z_norm
= (int)zval
;
642 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &zval
)) != 0)
644 zsb
->z_utf8
= (zval
!= 0);
646 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &zval
)) != 0)
648 zsb
->z_case
= (uint_t
)zval
;
651 * Fold case on file systems that are always or sometimes case
654 if (zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
655 zsb
->z_case
== ZFS_CASE_MIXED
)
656 zsb
->z_norm
|= U8_TEXTPREP_TOUPPER
;
658 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
659 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
662 /* should either have both of these objects or none */
663 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
668 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &zval
);
669 if ((error
== 0) && (zval
== ZFS_XATTR_SA
))
670 zsb
->z_xattr_sa
= B_TRUE
;
673 * Pre SA versions file systems should never touch
674 * either the attribute registration or layout objects.
679 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
684 if (zsb
->z_version
>= ZPL_VERSION_SA
)
685 sa_register_update_callback(os
, zfs_sa_upgrade
);
687 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
691 ASSERT(zsb
->z_root
!= 0);
693 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
694 &zsb
->z_unlinkedobj
);
698 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
699 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
700 8, 1, &zsb
->z_userquota_obj
);
701 if (error
&& error
!= ENOENT
)
704 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
705 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
706 8, 1, &zsb
->z_groupquota_obj
);
707 if (error
&& error
!= ENOENT
)
710 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
712 if (error
&& error
!= ENOENT
)
715 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
717 if (error
&& error
!= ENOENT
)
720 mutex_init(&zsb
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
721 mutex_init(&zsb
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
722 list_create(&zsb
->z_all_znodes
, sizeof (znode_t
),
723 offsetof(znode_t
, z_link_node
));
724 rrw_init(&zsb
->z_teardown_lock
);
725 rw_init(&zsb
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
726 rw_init(&zsb
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
727 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
728 mutex_init(&zsb
->z_hold_mtx
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
730 avl_create(&zsb
->z_ctldir_snaps
, snapentry_compare
,
731 sizeof (zfs_snapentry_t
), offsetof(zfs_snapentry_t
, se_node
));
732 mutex_init(&zsb
->z_ctldir_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
738 dmu_objset_disown(os
, zsb
);
740 kmem_free(zsb
, sizeof (zfs_sb_t
));
743 EXPORT_SYMBOL(zfs_sb_create
);
746 zfs_sb_setup(zfs_sb_t
*zsb
, boolean_t mounting
)
750 error
= zfs_register_callbacks(zsb
);
755 * Set the objset user_ptr to track its zsb.
757 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
758 dmu_objset_set_user(zsb
->z_os
, zsb
);
759 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
761 zsb
->z_log
= zil_open(zsb
->z_os
, zfs_get_data
);
764 * If we are not mounting (ie: online recv), then we don't
765 * have to worry about replaying the log as we blocked all
766 * operations out since we closed the ZIL.
772 * During replay we remove the read only flag to
773 * allow replays to succeed.
775 readonly
= zfs_is_readonly(zsb
);
777 readonly_changed_cb(zsb
, B_FALSE
);
779 zfs_unlinked_drain(zsb
);
782 * Parse and replay the intent log.
784 * Because of ziltest, this must be done after
785 * zfs_unlinked_drain(). (Further note: ziltest
786 * doesn't use readonly mounts, where
787 * zfs_unlinked_drain() isn't called.) This is because
788 * ziltest causes spa_sync() to think it's committed,
789 * but actually it is not, so the intent log contains
790 * many txg's worth of changes.
792 * In particular, if object N is in the unlinked set in
793 * the last txg to actually sync, then it could be
794 * actually freed in a later txg and then reallocated
795 * in a yet later txg. This would write a "create
796 * object N" record to the intent log. Normally, this
797 * would be fine because the spa_sync() would have
798 * written out the fact that object N is free, before
799 * we could write the "create object N" intent log
802 * But when we are in ziltest mode, we advance the "open
803 * txg" without actually spa_sync()-ing the changes to
804 * disk. So we would see that object N is still
805 * allocated and in the unlinked set, and there is an
806 * intent log record saying to allocate it.
808 if (spa_writeable(dmu_objset_spa(zsb
->z_os
))) {
809 if (zil_replay_disable
) {
810 zil_destroy(zsb
->z_log
, B_FALSE
);
812 zsb
->z_replay
= B_TRUE
;
813 zil_replay(zsb
->z_os
, zsb
,
815 zsb
->z_replay
= B_FALSE
;
819 /* restore readonly bit */
821 readonly_changed_cb(zsb
, B_TRUE
);
826 EXPORT_SYMBOL(zfs_sb_setup
);
829 zfs_sb_free(zfs_sb_t
*zsb
)
833 zfs_fuid_destroy(zsb
);
835 mutex_destroy(&zsb
->z_znodes_lock
);
836 mutex_destroy(&zsb
->z_lock
);
837 list_destroy(&zsb
->z_all_znodes
);
838 rrw_destroy(&zsb
->z_teardown_lock
);
839 rw_destroy(&zsb
->z_teardown_inactive_lock
);
840 rw_destroy(&zsb
->z_fuid_lock
);
841 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
842 mutex_destroy(&zsb
->z_hold_mtx
[i
]);
843 mutex_destroy(&zsb
->z_ctldir_lock
);
844 avl_destroy(&zsb
->z_ctldir_snaps
);
845 kmem_free(zsb
, sizeof (zfs_sb_t
));
847 EXPORT_SYMBOL(zfs_sb_free
);
850 zfs_set_fuid_feature(zfs_sb_t
*zsb
)
852 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
853 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
857 zfs_unregister_callbacks(zfs_sb_t
*zsb
)
859 objset_t
*os
= zsb
->z_os
;
860 struct dsl_dataset
*ds
;
863 * Unregister properties.
865 if (!dmu_objset_is_snapshot(os
)) {
866 ds
= dmu_objset_ds(os
);
867 VERIFY(dsl_prop_unregister(ds
, "atime", atime_changed_cb
,
870 VERIFY(dsl_prop_unregister(ds
, "xattr", xattr_changed_cb
,
873 VERIFY(dsl_prop_unregister(ds
, "recordsize", blksz_changed_cb
,
876 VERIFY(dsl_prop_unregister(ds
, "readonly", readonly_changed_cb
,
879 VERIFY(dsl_prop_unregister(ds
, "devices", devices_changed_cb
,
882 VERIFY(dsl_prop_unregister(ds
, "setuid", setuid_changed_cb
,
885 VERIFY(dsl_prop_unregister(ds
, "exec", exec_changed_cb
,
888 VERIFY(dsl_prop_unregister(ds
, "snapdir", snapdir_changed_cb
,
891 VERIFY(dsl_prop_unregister(ds
, "aclinherit",
892 acl_inherit_changed_cb
, zsb
) == 0);
894 VERIFY(dsl_prop_unregister(ds
, "vscan",
895 vscan_changed_cb
, zsb
) == 0);
897 VERIFY(dsl_prop_unregister(ds
, "nbmand",
898 nbmand_changed_cb
, zsb
) == 0);
901 EXPORT_SYMBOL(zfs_unregister_callbacks
);
905 * zfs_check_global_label:
906 * Check that the hex label string is appropriate for the dataset
907 * being mounted into the global_zone proper.
909 * Return an error if the hex label string is not default or
910 * admin_low/admin_high. For admin_low labels, the corresponding
911 * dataset must be readonly.
914 zfs_check_global_label(const char *dsname
, const char *hexsl
)
916 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
918 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
920 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
921 /* must be readonly */
924 if (dsl_prop_get_integer(dsname
,
925 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
927 return (rdonly
? 0 : EACCES
);
931 EXPORT_SYMBOL(zfs_check_global_label
);
932 #endif /* HAVE_MLSLABEL */
935 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
937 zfs_sb_t
*zsb
= dentry
->d_sb
->s_fs_info
;
938 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
944 dmu_objset_space(zsb
->z_os
,
945 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
947 fsid
= dmu_objset_fsid_guid(zsb
->z_os
);
949 * The underlying storage pool actually uses multiple block
950 * size. Under Solaris frsize (fragment size) is reported as
951 * the smallest block size we support, and bsize (block size)
952 * as the filesystem's maximum block size. Unfortunately,
953 * under Linux the fragment size and block size are often used
954 * interchangeably. Thus we are forced to report both of them
955 * as the filesystem's maximum block size.
957 statp
->f_frsize
= zsb
->z_max_blksz
;
958 statp
->f_bsize
= zsb
->z_max_blksz
;
959 bshift
= fls(statp
->f_bsize
) - 1;
962 * The following report "total" blocks of various kinds in
963 * the file system, but reported in terms of f_bsize - the
967 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
968 statp
->f_bfree
= availbytes
>> bshift
;
969 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
972 * statvfs() should really be called statufs(), because it assumes
973 * static metadata. ZFS doesn't preallocate files, so the best
974 * we can do is report the max that could possibly fit in f_files,
975 * and that minus the number actually used in f_ffree.
976 * For f_ffree, report the smaller of the number of object available
977 * and the number of blocks (each object will take at least a block).
979 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
980 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
981 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
982 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
983 statp
->f_type
= ZFS_SUPER_MAGIC
;
984 statp
->f_namelen
= ZFS_MAXNAMELEN
;
987 * We have all of 40 characters to stuff a string here.
988 * Is there anything useful we could/should provide?
990 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
995 EXPORT_SYMBOL(zfs_statvfs
);
998 zfs_root(zfs_sb_t
*zsb
, struct inode
**ipp
)
1005 error
= zfs_zget(zsb
, zsb
->z_root
, &rootzp
);
1007 *ipp
= ZTOI(rootzp
);
1012 EXPORT_SYMBOL(zfs_root
);
1016 zfs_sb_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1018 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1019 struct shrinker
*shrinker
= &sb
->s_shrink
;
1020 struct shrink_control sc
= {
1021 .nr_to_scan
= nr_to_scan
,
1022 .gfp_mask
= GFP_KERNEL
,
1026 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1031 EXPORT_SYMBOL(zfs_sb_prune
);
1032 #endif /* HAVE_SHRINK */
1035 * Teardown the zfs_sb_t.
1037 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1038 * and 'z_teardown_inactive_lock' held.
1041 zfs_sb_teardown(zfs_sb_t
*zsb
, boolean_t unmounting
)
1045 rrw_enter(&zsb
->z_teardown_lock
, RW_WRITER
, FTAG
);
1049 * We purge the parent filesystem's super block as the
1050 * parent filesystem and all of its snapshots have their
1051 * inode's super block set to the parent's filesystem's
1052 * super block. Note, 'z_parent' is self referential
1053 * for non-snapshots.
1055 shrink_dcache_sb(zsb
->z_parent
->z_sb
);
1059 * Drain the iput_taskq to ensure all active references to the
1060 * zfs_sb_t have been handled only then can it be safely destroyed.
1062 taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb
->z_os
)));
1065 * Close the zil. NB: Can't close the zil while zfs_inactive
1066 * threads are blocked as zil_close can call zfs_inactive.
1069 zil_close(zsb
->z_log
);
1073 rw_enter(&zsb
->z_teardown_inactive_lock
, RW_WRITER
);
1076 * If we are not unmounting (ie: online recv) and someone already
1077 * unmounted this file system while we were doing the switcheroo,
1078 * or a reopen of z_os failed then just bail out now.
1080 if (!unmounting
&& (zsb
->z_unmounted
|| zsb
->z_os
== NULL
)) {
1081 rw_exit(&zsb
->z_teardown_inactive_lock
);
1082 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1087 * At this point there are no VFS ops active, and any new VFS ops
1088 * will fail with EIO since we have z_teardown_lock for writer (only
1089 * relevant for forced unmount).
1091 * Release all holds on dbufs.
1093 mutex_enter(&zsb
->z_znodes_lock
);
1094 for (zp
= list_head(&zsb
->z_all_znodes
); zp
!= NULL
;
1095 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1097 ASSERT(atomic_read(&ZTOI(zp
)->i_count
) > 0);
1098 zfs_znode_dmu_fini(zp
);
1101 mutex_exit(&zsb
->z_znodes_lock
);
1104 * If we are unmounting, set the unmounted flag and let new VFS ops
1105 * unblock. zfs_inactive will have the unmounted behavior, and all
1106 * other VFS ops will fail with EIO.
1109 zsb
->z_unmounted
= B_TRUE
;
1110 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1111 rw_exit(&zsb
->z_teardown_inactive_lock
);
1115 * z_os will be NULL if there was an error in attempting to reopen
1116 * zsb, so just return as the properties had already been
1118 * unregistered and cached data had been evicted before.
1120 if (zsb
->z_os
== NULL
)
1124 * Unregister properties.
1126 zfs_unregister_callbacks(zsb
);
1131 if (dsl_dataset_is_dirty(dmu_objset_ds(zsb
->z_os
)) &&
1132 !zfs_is_readonly(zsb
))
1133 txg_wait_synced(dmu_objset_pool(zsb
->z_os
), 0);
1134 (void) dmu_objset_evict_dbufs(zsb
->z_os
);
1138 EXPORT_SYMBOL(zfs_sb_teardown
);
1140 #if defined(HAVE_BDI) && !defined(HAVE_BDI_SETUP_AND_REGISTER)
1141 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1142 #endif /* HAVE_BDI && !HAVE_BDI_SETUP_AND_REGISTER */
1145 zfs_domount(struct super_block
*sb
, void *data
, int silent
)
1147 zpl_mount_data_t
*zmd
= data
;
1148 const char *osname
= zmd
->z_osname
;
1150 struct inode
*root_inode
;
1151 uint64_t recordsize
;
1154 error
= zfs_sb_create(osname
, &zsb
);
1158 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1159 &recordsize
, NULL
)))
1163 sb
->s_fs_info
= zsb
;
1164 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1165 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1166 sb
->s_time_gran
= 1;
1167 sb
->s_blocksize
= recordsize
;
1168 sb
->s_blocksize_bits
= ilog2(recordsize
);
1172 * 2.6.32 API change,
1173 * Added backing_device_info (BDI) per super block interfaces. A BDI
1174 * must be configured when using a non-device backed filesystem for
1175 * proper writeback. This is not required for older pdflush kernels.
1177 * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead.
1179 zsb
->z_bdi
.ra_pages
= 0;
1180 sb
->s_bdi
= &zsb
->z_bdi
;
1182 error
= -bdi_setup_and_register(&zsb
->z_bdi
, "zfs", BDI_CAP_MAP_COPY
);
1185 #endif /* HAVE_BDI */
1187 /* Set callback operations for the file system. */
1188 sb
->s_op
= &zpl_super_operations
;
1189 sb
->s_xattr
= zpl_xattr_handlers
;
1190 sb
->s_export_op
= &zpl_export_operations
;
1192 sb
->s_d_op
= &zpl_dentry_operations
;
1193 #endif /* HAVE_S_D_OP */
1195 /* Set features for file system. */
1196 zfs_set_fuid_feature(zsb
);
1198 if (dmu_objset_is_snapshot(zsb
->z_os
)) {
1201 atime_changed_cb(zsb
, B_FALSE
);
1202 readonly_changed_cb(zsb
, B_TRUE
);
1203 if ((error
= dsl_prop_get_integer(osname
,"xattr",&pval
,NULL
)))
1205 xattr_changed_cb(zsb
, pval
);
1206 zsb
->z_issnap
= B_TRUE
;
1207 zsb
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1209 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
1210 dmu_objset_set_user(zsb
->z_os
, zsb
);
1211 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
1213 error
= zfs_sb_setup(zsb
, B_TRUE
);
1216 /* Allocate a root inode for the filesystem. */
1217 error
= zfs_root(zsb
, &root_inode
);
1219 (void) zfs_umount(sb
);
1223 /* Allocate a root dentry for the filesystem */
1224 sb
->s_root
= d_make_root(root_inode
);
1225 if (sb
->s_root
== NULL
) {
1226 (void) zfs_umount(sb
);
1235 dmu_objset_disown(zsb
->z_os
, zsb
);
1241 EXPORT_SYMBOL(zfs_domount
);
1244 * Called when an unmount is requested and certain sanity checks have
1245 * already passed. At this point no dentries or inodes have been reclaimed
1246 * from their respective caches. We drop the extra reference on the .zfs
1247 * control directory to allow everything to be reclaimed. All snapshots
1248 * must already have been unmounted to reach this point.
1251 zfs_preumount(struct super_block
*sb
)
1253 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1255 if (zsb
!= NULL
&& zsb
->z_ctldir
!= NULL
)
1256 zfsctl_destroy(zsb
);
1258 EXPORT_SYMBOL(zfs_preumount
);
1261 * Called once all other unmount released tear down has occurred.
1262 * It is our responsibility to release any remaining infrastructure.
1266 zfs_umount(struct super_block
*sb
)
1268 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1271 VERIFY(zfs_sb_teardown(zsb
, B_TRUE
) == 0);
1275 bdi_destroy(sb
->s_bdi
);
1276 #endif /* HAVE_BDI */
1279 * z_os will be NULL if there was an error in
1280 * attempting to reopen zsb.
1284 * Unset the objset user_ptr.
1286 mutex_enter(&os
->os_user_ptr_lock
);
1287 dmu_objset_set_user(os
, NULL
);
1288 mutex_exit(&os
->os_user_ptr_lock
);
1291 * Finally release the objset
1293 dmu_objset_disown(os
, zsb
);
1299 EXPORT_SYMBOL(zfs_umount
);
1302 zfs_remount(struct super_block
*sb
, int *flags
, char *data
)
1305 * All namespace flags (MNT_*) and super block flags (MS_*) will
1306 * be handled by the Linux VFS. Only handle custom options here.
1310 EXPORT_SYMBOL(zfs_remount
);
1313 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1315 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1317 uint64_t object
= 0;
1318 uint64_t fid_gen
= 0;
1327 if (fidp
->fid_len
== LONG_FID_LEN
) {
1328 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1329 uint64_t objsetid
= 0;
1330 uint64_t setgen
= 0;
1332 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1333 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1335 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1336 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1340 err
= zfsctl_lookup_objset(sb
, objsetid
, &zsb
);
1347 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1348 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1350 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1351 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1353 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1354 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1360 /* A zero fid_gen means we are in the .zfs control directories */
1362 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
1363 *ipp
= zsb
->z_ctldir
;
1364 ASSERT(*ipp
!= NULL
);
1365 if (object
== ZFSCTL_INO_SNAPDIR
) {
1366 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
1367 0, kcred
, NULL
, NULL
) == 0);
1375 gen_mask
= -1ULL >> (64 - 8 * i
);
1377 dprintf("getting %llu [%u mask %llx]\n", object
, fid_gen
, gen_mask
);
1378 if ((err
= zfs_zget(zsb
, object
, &zp
))) {
1382 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
), &zp_gen
,
1384 zp_gen
= zp_gen
& gen_mask
;
1387 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1388 dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen
, fid_gen
);
1396 zfs_inode_update(ITOZ(*ipp
));
1401 EXPORT_SYMBOL(zfs_vget
);
1404 * Block out VFS ops and close zfs_sb_t
1406 * Note, if successful, then we return with the 'z_teardown_lock' and
1407 * 'z_teardown_inactive_lock' write held.
1410 zfs_suspend_fs(zfs_sb_t
*zsb
)
1414 if ((error
= zfs_sb_teardown(zsb
, B_FALSE
)) != 0)
1417 dmu_objset_disown(zsb
->z_os
, zsb
);
1421 EXPORT_SYMBOL(zfs_suspend_fs
);
1424 * Reopen zfs_sb_t and release VFS ops.
1427 zfs_resume_fs(zfs_sb_t
*zsb
, const char *osname
)
1431 ASSERT(RRW_WRITE_HELD(&zsb
->z_teardown_lock
));
1432 ASSERT(RW_WRITE_HELD(&zsb
->z_teardown_inactive_lock
));
1434 err
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_FALSE
, zsb
, &zsb
->z_os
);
1439 uint64_t sa_obj
= 0;
1441 err2
= zap_lookup(zsb
->z_os
, MASTER_NODE_OBJ
,
1442 ZFS_SA_ATTRS
, 8, 1, &sa_obj
);
1444 if ((err
|| err2
) && zsb
->z_version
>= ZPL_VERSION_SA
)
1448 if ((err
= sa_setup(zsb
->z_os
, sa_obj
,
1449 zfs_attr_table
, ZPL_END
, &zsb
->z_attr_table
)) != 0)
1452 VERIFY(zfs_sb_setup(zsb
, B_FALSE
) == 0);
1453 zsb
->z_rollback_time
= jiffies
;
1456 * Attempt to re-establish all the active inodes with their
1457 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1458 * and mark it stale. This prevents a collision if a new
1459 * inode/object is created which must use the same inode
1460 * number. The stale inode will be be released when the
1461 * VFS prunes the dentry holding the remaining references
1462 * on the stale inode.
1464 mutex_enter(&zsb
->z_znodes_lock
);
1465 for (zp
= list_head(&zsb
->z_all_znodes
); zp
;
1466 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1467 err2
= zfs_rezget(zp
);
1469 remove_inode_hash(ZTOI(zp
));
1470 zp
->z_is_stale
= B_TRUE
;
1473 mutex_exit(&zsb
->z_znodes_lock
);
1477 /* release the VFS ops */
1478 rw_exit(&zsb
->z_teardown_inactive_lock
);
1479 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1483 * Since we couldn't reopen zfs_sb_t, force
1484 * unmount this file system.
1486 (void) zfs_umount(zsb
->z_sb
);
1490 EXPORT_SYMBOL(zfs_resume_fs
);
1493 zfs_set_version(zfs_sb_t
*zsb
, uint64_t newvers
)
1496 objset_t
*os
= zsb
->z_os
;
1499 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
1502 if (newvers
< zsb
->z_version
)
1505 if (zfs_spa_version_map(newvers
) >
1506 spa_version(dmu_objset_spa(zsb
->z_os
)))
1509 tx
= dmu_tx_create(os
);
1510 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
1511 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1512 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
1514 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1516 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1522 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
1523 8, 1, &newvers
, tx
);
1530 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1533 ASSERT3U(spa_version(dmu_objset_spa(zsb
->z_os
)), >=,
1535 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1536 DMU_OT_NONE
, 0, tx
);
1538 error
= zap_add(os
, MASTER_NODE_OBJ
,
1539 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1542 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
1543 sa_register_update_callback(os
, zfs_sa_upgrade
);
1546 spa_history_log_internal(LOG_DS_UPGRADE
,
1547 dmu_objset_spa(os
), tx
, "oldver=%llu newver=%llu dataset = %llu",
1548 zsb
->z_version
, newvers
, dmu_objset_id(os
));
1552 zsb
->z_version
= newvers
;
1554 if (zsb
->z_version
>= ZPL_VERSION_FUID
)
1555 zfs_set_fuid_feature(zsb
);
1559 EXPORT_SYMBOL(zfs_set_version
);
1562 * Read a property stored within the master node.
1565 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
1571 * Look up the file system's value for the property. For the
1572 * version property, we look up a slightly different string.
1574 if (prop
== ZFS_PROP_VERSION
)
1575 pname
= ZPL_VERSION_STR
;
1577 pname
= zfs_prop_to_name(prop
);
1580 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
1582 if (error
== ENOENT
) {
1583 /* No value set, use the default value */
1585 case ZFS_PROP_VERSION
:
1586 *value
= ZPL_VERSION
;
1588 case ZFS_PROP_NORMALIZE
:
1589 case ZFS_PROP_UTF8ONLY
:
1593 *value
= ZFS_CASE_SENSITIVE
;
1602 EXPORT_SYMBOL(zfs_get_zplprop
);
1609 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
1610 register_filesystem(&zpl_fs_type
);
1611 (void) arc_add_prune_callback(zpl_prune_sbs
, NULL
);
1617 unregister_filesystem(&zpl_fs_type
);