4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
24 * All rights reserved.
25 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
30 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sysmacros.h>
39 #include <sys/vnode.h>
41 #include <sys/mntent.h>
42 #include <sys/mount.h>
43 #include <sys/cmn_err.h>
44 #include <sys/zfs_znode.h>
45 #include <sys/zfs_vnops.h>
46 #include <sys/zfs_dir.h>
48 #include <sys/fs/zfs.h>
50 #include <sys/dsl_prop.h>
51 #include <sys/dsl_dataset.h>
52 #include <sys/dsl_deleg.h>
56 #include <sys/sa_impl.h>
57 #include <sys/policy.h>
58 #include <sys/atomic.h>
59 #include <sys/zfs_ioctl.h>
60 #include <sys/zfs_ctldir.h>
61 #include <sys/zfs_fuid.h>
62 #include <sys/sunddi.h>
63 #include <sys/dmu_objset.h>
64 #include <sys/dsl_dir.h>
66 #include <ufs/ufs/quota.h>
67 #include <sys/zfs_quota.h>
69 #include "zfs_comutil.h"
71 #ifndef MNTK_VMSETSIZE_BUG
72 #define MNTK_VMSETSIZE_BUG 0
75 #define MNTK_NOMSYNC 8
78 struct mtx zfs_debug_mtx
;
79 MTX_SYSINIT(zfs_debug_mtx
, &zfs_debug_mtx
, "zfs_debug", MTX_DEF
);
81 SYSCTL_NODE(_vfs
, OID_AUTO
, zfs
, CTLFLAG_RW
, 0, "ZFS file system");
84 SYSCTL_INT(_vfs_zfs
, OID_AUTO
, super_owner
, CTLFLAG_RW
, &zfs_super_owner
, 0,
85 "File system owners can perform privileged operation on file systems");
88 SYSCTL_INT(_vfs_zfs
, OID_AUTO
, debug
, CTLFLAG_RWTUN
, &zfs_debug_level
, 0,
91 SYSCTL_NODE(_vfs_zfs
, OID_AUTO
, version
, CTLFLAG_RD
, 0, "ZFS versions");
92 static int zfs_version_acl
= ZFS_ACL_VERSION
;
93 SYSCTL_INT(_vfs_zfs_version
, OID_AUTO
, acl
, CTLFLAG_RD
, &zfs_version_acl
, 0,
95 static int zfs_version_spa
= SPA_VERSION
;
96 SYSCTL_INT(_vfs_zfs_version
, OID_AUTO
, spa
, CTLFLAG_RD
, &zfs_version_spa
, 0,
98 static int zfs_version_zpl
= ZPL_VERSION
;
99 SYSCTL_INT(_vfs_zfs_version
, OID_AUTO
, zpl
, CTLFLAG_RD
, &zfs_version_zpl
, 0,
102 #if __FreeBSD_version >= 1400018
103 static int zfs_quotactl(vfs_t
*vfsp
, int cmds
, uid_t id
, void *arg
,
106 static int zfs_quotactl(vfs_t
*vfsp
, int cmds
, uid_t id
, void *arg
);
108 static int zfs_mount(vfs_t
*vfsp
);
109 static int zfs_umount(vfs_t
*vfsp
, int fflag
);
110 static int zfs_root(vfs_t
*vfsp
, int flags
, vnode_t
**vpp
);
111 static int zfs_statfs(vfs_t
*vfsp
, struct statfs
*statp
);
112 static int zfs_vget(vfs_t
*vfsp
, ino_t ino
, int flags
, vnode_t
**vpp
);
113 static int zfs_sync(vfs_t
*vfsp
, int waitfor
);
114 #if __FreeBSD_version >= 1300098
115 static int zfs_checkexp(vfs_t
*vfsp
, struct sockaddr
*nam
, uint64_t *extflagsp
,
116 struct ucred
**credanonp
, int *numsecflavors
, int *secflavors
);
118 static int zfs_checkexp(vfs_t
*vfsp
, struct sockaddr
*nam
, int *extflagsp
,
119 struct ucred
**credanonp
, int *numsecflavors
, int **secflavors
);
121 static int zfs_fhtovp(vfs_t
*vfsp
, fid_t
*fidp
, int flags
, vnode_t
**vpp
);
122 static void zfs_freevfs(vfs_t
*vfsp
);
124 struct vfsops zfs_vfsops
= {
125 .vfs_mount
= zfs_mount
,
126 .vfs_unmount
= zfs_umount
,
127 #if __FreeBSD_version >= 1300049
128 .vfs_root
= vfs_cache_root
,
129 .vfs_cachedroot
= zfs_root
,
131 .vfs_root
= zfs_root
,
133 .vfs_statfs
= zfs_statfs
,
134 .vfs_vget
= zfs_vget
,
135 .vfs_sync
= zfs_sync
,
136 .vfs_checkexp
= zfs_checkexp
,
137 .vfs_fhtovp
= zfs_fhtovp
,
138 .vfs_quotactl
= zfs_quotactl
,
141 VFS_SET(zfs_vfsops
, zfs
, VFCF_JAIL
| VFCF_DELEGADMIN
);
144 * We need to keep a count of active fs's.
145 * This is necessary to prevent our module
146 * from being unloaded after a umount -f
148 static uint32_t zfs_active_fs_count
= 0;
151 zfs_get_temporary_prop(dsl_dataset_t
*ds
, zfs_prop_t zfs_prop
, uint64_t *val
,
160 error
= dmu_objset_from_ds(ds
, &os
);
164 error
= getzfsvfs_impl(os
, &zfvp
);
172 if (vfs_optionisset(vfsp
, MNTOPT_NOATIME
, NULL
))
174 if (vfs_optionisset(vfsp
, MNTOPT_ATIME
, NULL
))
177 case ZFS_PROP_DEVICES
:
178 if (vfs_optionisset(vfsp
, MNTOPT_NODEVICES
, NULL
))
180 if (vfs_optionisset(vfsp
, MNTOPT_DEVICES
, NULL
))
184 if (vfs_optionisset(vfsp
, MNTOPT_NOEXEC
, NULL
))
186 if (vfs_optionisset(vfsp
, MNTOPT_EXEC
, NULL
))
189 case ZFS_PROP_SETUID
:
190 if (vfs_optionisset(vfsp
, MNTOPT_NOSETUID
, NULL
))
192 if (vfs_optionisset(vfsp
, MNTOPT_SETUID
, NULL
))
195 case ZFS_PROP_READONLY
:
196 if (vfs_optionisset(vfsp
, MNTOPT_RW
, NULL
))
198 if (vfs_optionisset(vfsp
, MNTOPT_RO
, NULL
))
202 if (zfvp
->z_flags
& ZSB_XATTR
)
205 case ZFS_PROP_NBMAND
:
206 if (vfs_optionisset(vfsp
, MNTOPT_NONBMAND
, NULL
))
208 if (vfs_optionisset(vfsp
, MNTOPT_NBMAND
, NULL
))
218 (void) strcpy(setpoint
, "temporary");
225 zfs_getquota(zfsvfs_t
*zfsvfs
, uid_t id
, int isgroup
, struct dqblk64
*dqp
)
229 uint64_t usedobj
, quotaobj
;
230 uint64_t quota
, used
= 0;
233 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
234 quotaobj
= isgroup
? zfsvfs
->z_groupquota_obj
: zfsvfs
->z_userquota_obj
;
236 if (quotaobj
== 0 || zfsvfs
->z_replay
) {
240 (void) sprintf(buf
, "%llx", (longlong_t
)id
);
241 if ((error
= zap_lookup(zfsvfs
->z_os
, quotaobj
,
242 buf
, sizeof (quota
), 1, "a
)) != 0) {
243 dprintf("%s(%d): quotaobj lookup failed\n",
244 __FUNCTION__
, __LINE__
);
248 * quota(8) uses bsoftlimit as "quoota", and hardlimit as "limit".
249 * So we set them to be the same.
251 dqp
->dqb_bsoftlimit
= dqp
->dqb_bhardlimit
= btodb(quota
);
252 error
= zap_lookup(zfsvfs
->z_os
, usedobj
, buf
, sizeof (used
), 1, &used
);
253 if (error
&& error
!= ENOENT
) {
254 dprintf("%s(%d): usedobj failed; %d\n",
255 __FUNCTION__
, __LINE__
, error
);
258 dqp
->dqb_curblocks
= btodb(used
);
259 dqp
->dqb_ihardlimit
= dqp
->dqb_isoftlimit
= 0;
262 * Setting this to 0 causes FreeBSD quota(8) to print
263 * the number of days since the epoch, which isn't
264 * particularly useful.
266 dqp
->dqb_btime
= dqp
->dqb_itime
= now
.tv_sec
;
272 #if __FreeBSD_version >= 1400018
273 zfs_quotactl(vfs_t
*vfsp
, int cmds
, uid_t id
, void *arg
, bool *mp_busy
)
275 zfs_quotactl(vfs_t
*vfsp
, int cmds
, uid_t id
, void *arg
)
278 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
280 int cmd
, type
, error
= 0;
282 zfs_userquota_prop_t quota_type
;
283 struct dqblk64 dqblk
= { 0 };
286 cmd
= cmds
>> SUBCMDSHIFT
;
287 type
= cmds
& SUBCMDMASK
;
293 id
= td
->td_ucred
->cr_ruid
;
296 id
= td
->td_ucred
->cr_rgid
;
300 #if __FreeBSD_version < 1400018
301 if (cmd
== Q_QUOTAON
|| cmd
== Q_QUOTAOFF
)
310 * ZFS_PROP_USERQUOTA,
311 * ZFS_PROP_GROUPUSED,
312 * ZFS_PROP_GROUPQUOTA
317 if (type
== USRQUOTA
)
318 quota_type
= ZFS_PROP_USERQUOTA
;
319 else if (type
== GRPQUOTA
)
320 quota_type
= ZFS_PROP_GROUPQUOTA
;
326 if (type
== USRQUOTA
)
327 quota_type
= ZFS_PROP_USERUSED
;
328 else if (type
== GRPQUOTA
)
329 quota_type
= ZFS_PROP_GROUPUSED
;
336 * Depending on the cmd, we may need to get
337 * the ruid and domain (see fuidstr_to_sid?),
338 * the fuid (how?), or other information.
339 * Create fuid using zfs_fuid_create(zfsvfs, id,
340 * ZFS_OWNER or ZFS_GROUP, cr, &fuidp)?
341 * I think I can use just the id?
343 * Look at zfs_id_overquota() to look up a quota.
344 * zap_lookup(something, quotaobj, fuidstring,
345 * sizeof (long long), 1, "a)
347 * See zfs_set_userquota() to set a quota.
349 if ((uint32_t)type
>= MAXQUOTAS
) {
357 error
= copyout(&bitsize
, arg
, sizeof (int));
360 // As far as I can tell, you can't turn quotas on or off on zfs
362 #if __FreeBSD_version < 1400018
368 #if __FreeBSD_version < 1400018
373 error
= copyin(arg
, &dqblk
, sizeof (dqblk
));
375 error
= zfs_set_userquota(zfsvfs
, quota_type
,
376 "", id
, dbtob(dqblk
.dqb_bhardlimit
));
379 error
= zfs_getquota(zfsvfs
, id
, type
== GRPQUOTA
, &dqblk
);
381 error
= copyout(&dqblk
, arg
, sizeof (dqblk
));
394 zfs_is_readonly(zfsvfs_t
*zfsvfs
)
396 return (!!(zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
));
400 zfs_sync(vfs_t
*vfsp
, int waitfor
)
404 * Data integrity is job one. We don't want a compromised kernel
405 * writing to the storage pool, so we never sync during panic.
411 * Ignore the system syncher. ZFS already commits async data
412 * at zfs_txg_timeout intervals.
414 if (waitfor
== MNT_LAZY
)
419 * Sync a specific filesystem.
421 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
425 error
= vfs_stdsync(vfsp
, waitfor
);
430 dp
= dmu_objset_pool(zfsvfs
->z_os
);
433 * If the system is shutting down, then skip any
434 * filesystems which may exist on a suspended pool.
436 if (rebooting
&& spa_suspended(dp
->dp_spa
)) {
441 if (zfsvfs
->z_log
!= NULL
)
442 zil_commit(zfsvfs
->z_log
, 0);
447 * Sync all ZFS filesystems. This is what happens when you
448 * run sync(8). Unlike other filesystems, ZFS honors the
449 * request by waiting for all pools to commit all dirty data.
458 atime_changed_cb(void *arg
, uint64_t newval
)
460 zfsvfs_t
*zfsvfs
= arg
;
462 if (newval
== TRUE
) {
463 zfsvfs
->z_atime
= TRUE
;
464 zfsvfs
->z_vfs
->vfs_flag
&= ~MNT_NOATIME
;
465 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_NOATIME
);
466 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_ATIME
, NULL
, 0);
468 zfsvfs
->z_atime
= FALSE
;
469 zfsvfs
->z_vfs
->vfs_flag
|= MNT_NOATIME
;
470 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_ATIME
);
471 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_NOATIME
, NULL
, 0);
476 xattr_changed_cb(void *arg
, uint64_t newval
)
478 zfsvfs_t
*zfsvfs
= arg
;
480 if (newval
== ZFS_XATTR_OFF
) {
481 zfsvfs
->z_flags
&= ~ZSB_XATTR
;
483 zfsvfs
->z_flags
|= ZSB_XATTR
;
485 if (newval
== ZFS_XATTR_SA
)
486 zfsvfs
->z_xattr_sa
= B_TRUE
;
488 zfsvfs
->z_xattr_sa
= B_FALSE
;
493 blksz_changed_cb(void *arg
, uint64_t newval
)
495 zfsvfs_t
*zfsvfs
= arg
;
496 ASSERT3U(newval
, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs
->z_os
)));
497 ASSERT3U(newval
, >=, SPA_MINBLOCKSIZE
);
498 ASSERT(ISP2(newval
));
500 zfsvfs
->z_max_blksz
= newval
;
501 zfsvfs
->z_vfs
->mnt_stat
.f_iosize
= newval
;
505 readonly_changed_cb(void *arg
, uint64_t newval
)
507 zfsvfs_t
*zfsvfs
= arg
;
510 /* XXX locking on vfs_flag? */
511 zfsvfs
->z_vfs
->vfs_flag
|= VFS_RDONLY
;
512 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_RW
);
513 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_RO
, NULL
, 0);
515 /* XXX locking on vfs_flag? */
516 zfsvfs
->z_vfs
->vfs_flag
&= ~VFS_RDONLY
;
517 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_RO
);
518 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_RW
, NULL
, 0);
523 setuid_changed_cb(void *arg
, uint64_t newval
)
525 zfsvfs_t
*zfsvfs
= arg
;
527 if (newval
== FALSE
) {
528 zfsvfs
->z_vfs
->vfs_flag
|= VFS_NOSETUID
;
529 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_SETUID
);
530 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_NOSETUID
, NULL
, 0);
532 zfsvfs
->z_vfs
->vfs_flag
&= ~VFS_NOSETUID
;
533 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_NOSETUID
);
534 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_SETUID
, NULL
, 0);
539 exec_changed_cb(void *arg
, uint64_t newval
)
541 zfsvfs_t
*zfsvfs
= arg
;
543 if (newval
== FALSE
) {
544 zfsvfs
->z_vfs
->vfs_flag
|= VFS_NOEXEC
;
545 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_EXEC
);
546 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_NOEXEC
, NULL
, 0);
548 zfsvfs
->z_vfs
->vfs_flag
&= ~VFS_NOEXEC
;
549 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_NOEXEC
);
550 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_EXEC
, NULL
, 0);
555 * The nbmand mount option can be changed at mount time.
556 * We can't allow it to be toggled on live file systems or incorrect
557 * behavior may be seen from cifs clients
559 * This property isn't registered via dsl_prop_register(), but this callback
560 * will be called when a file system is first mounted
563 nbmand_changed_cb(void *arg
, uint64_t newval
)
565 zfsvfs_t
*zfsvfs
= arg
;
566 if (newval
== FALSE
) {
567 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_NBMAND
);
568 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_NONBMAND
, NULL
, 0);
570 vfs_clearmntopt(zfsvfs
->z_vfs
, MNTOPT_NONBMAND
);
571 vfs_setmntopt(zfsvfs
->z_vfs
, MNTOPT_NBMAND
, NULL
, 0);
576 snapdir_changed_cb(void *arg
, uint64_t newval
)
578 zfsvfs_t
*zfsvfs
= arg
;
580 zfsvfs
->z_show_ctldir
= newval
;
584 acl_mode_changed_cb(void *arg
, uint64_t newval
)
586 zfsvfs_t
*zfsvfs
= arg
;
588 zfsvfs
->z_acl_mode
= newval
;
592 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
594 zfsvfs_t
*zfsvfs
= arg
;
596 zfsvfs
->z_acl_inherit
= newval
;
600 acl_type_changed_cb(void *arg
, uint64_t newval
)
602 zfsvfs_t
*zfsvfs
= arg
;
604 zfsvfs
->z_acl_type
= newval
;
608 zfs_register_callbacks(vfs_t
*vfsp
)
610 struct dsl_dataset
*ds
= NULL
;
612 zfsvfs_t
*zfsvfs
= NULL
;
614 boolean_t readonly
= B_FALSE
;
615 boolean_t do_readonly
= B_FALSE
;
616 boolean_t setuid
= B_FALSE
;
617 boolean_t do_setuid
= B_FALSE
;
618 boolean_t exec
= B_FALSE
;
619 boolean_t do_exec
= B_FALSE
;
620 boolean_t xattr
= B_FALSE
;
621 boolean_t atime
= B_FALSE
;
622 boolean_t do_atime
= B_FALSE
;
623 boolean_t do_xattr
= B_FALSE
;
626 ASSERT3P(vfsp
, !=, NULL
);
627 zfsvfs
= vfsp
->vfs_data
;
628 ASSERT3P(zfsvfs
, !=, NULL
);
632 * This function can be called for a snapshot when we update snapshot's
633 * mount point, which isn't really supported.
635 if (dmu_objset_is_snapshot(os
))
639 * The act of registering our callbacks will destroy any mount
640 * options we may have. In order to enable temporary overrides
641 * of mount options, we stash away the current values and
642 * restore them after we register the callbacks.
644 if (vfs_optionisset(vfsp
, MNTOPT_RO
, NULL
) ||
645 !spa_writeable(dmu_objset_spa(os
))) {
647 do_readonly
= B_TRUE
;
648 } else if (vfs_optionisset(vfsp
, MNTOPT_RW
, NULL
)) {
650 do_readonly
= B_TRUE
;
652 if (vfs_optionisset(vfsp
, MNTOPT_NOSETUID
, NULL
)) {
655 } else if (vfs_optionisset(vfsp
, MNTOPT_SETUID
, NULL
)) {
659 if (vfs_optionisset(vfsp
, MNTOPT_NOEXEC
, NULL
)) {
662 } else if (vfs_optionisset(vfsp
, MNTOPT_EXEC
, NULL
)) {
666 if (vfs_optionisset(vfsp
, MNTOPT_NOXATTR
, NULL
)) {
667 zfsvfs
->z_xattr
= xattr
= ZFS_XATTR_OFF
;
669 } else if (vfs_optionisset(vfsp
, MNTOPT_XATTR
, NULL
)) {
670 zfsvfs
->z_xattr
= xattr
= ZFS_XATTR_DIR
;
672 } else if (vfs_optionisset(vfsp
, MNTOPT_DIRXATTR
, NULL
)) {
673 zfsvfs
->z_xattr
= xattr
= ZFS_XATTR_DIR
;
675 } else if (vfs_optionisset(vfsp
, MNTOPT_SAXATTR
, NULL
)) {
676 zfsvfs
->z_xattr
= xattr
= ZFS_XATTR_SA
;
679 if (vfs_optionisset(vfsp
, MNTOPT_NOATIME
, NULL
)) {
682 } else if (vfs_optionisset(vfsp
, MNTOPT_ATIME
, NULL
)) {
688 * We need to enter pool configuration here, so that we can use
689 * dsl_prop_get_int_ds() to handle the special nbmand property below.
690 * dsl_prop_get_integer() can not be used, because it has to acquire
691 * spa_namespace_lock and we can not do that because we already hold
692 * z_teardown_lock. The problem is that spa_write_cachefile() is called
693 * with spa_namespace_lock held and the function calls ZFS vnode
694 * operations to write the cache file and thus z_teardown_lock is
695 * acquired after spa_namespace_lock.
697 ds
= dmu_objset_ds(os
);
698 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
701 * nbmand is a special property. It can only be changed at
704 * This is weird, but it is documented to only be changeable
707 if (vfs_optionisset(vfsp
, MNTOPT_NONBMAND
, NULL
)) {
709 } else if (vfs_optionisset(vfsp
, MNTOPT_NBMAND
, NULL
)) {
711 } else if ((error
= dsl_prop_get_int_ds(ds
, "nbmand", &nbmand
) != 0)) {
712 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
717 * Register property callbacks.
719 * It would probably be fine to just check for i/o error from
720 * the first prop_register(), but I guess I like to go
723 error
= dsl_prop_register(ds
,
724 zfs_prop_to_name(ZFS_PROP_ATIME
), atime_changed_cb
, zfsvfs
);
725 error
= error
? error
: dsl_prop_register(ds
,
726 zfs_prop_to_name(ZFS_PROP_XATTR
), xattr_changed_cb
, zfsvfs
);
727 error
= error
? error
: dsl_prop_register(ds
,
728 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), blksz_changed_cb
, zfsvfs
);
729 error
= error
? error
: dsl_prop_register(ds
,
730 zfs_prop_to_name(ZFS_PROP_READONLY
), readonly_changed_cb
, zfsvfs
);
731 error
= error
? error
: dsl_prop_register(ds
,
732 zfs_prop_to_name(ZFS_PROP_SETUID
), setuid_changed_cb
, zfsvfs
);
733 error
= error
? error
: dsl_prop_register(ds
,
734 zfs_prop_to_name(ZFS_PROP_EXEC
), exec_changed_cb
, zfsvfs
);
735 error
= error
? error
: dsl_prop_register(ds
,
736 zfs_prop_to_name(ZFS_PROP_SNAPDIR
), snapdir_changed_cb
, zfsvfs
);
737 error
= error
? error
: dsl_prop_register(ds
,
738 zfs_prop_to_name(ZFS_PROP_ACLTYPE
), acl_type_changed_cb
, zfsvfs
);
739 error
= error
? error
: dsl_prop_register(ds
,
740 zfs_prop_to_name(ZFS_PROP_ACLMODE
), acl_mode_changed_cb
, zfsvfs
);
741 error
= error
? error
: dsl_prop_register(ds
,
742 zfs_prop_to_name(ZFS_PROP_ACLINHERIT
), acl_inherit_changed_cb
,
744 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
749 * Invoke our callbacks to restore temporary mount options.
752 readonly_changed_cb(zfsvfs
, readonly
);
754 setuid_changed_cb(zfsvfs
, setuid
);
756 exec_changed_cb(zfsvfs
, exec
);
758 xattr_changed_cb(zfsvfs
, xattr
);
760 atime_changed_cb(zfsvfs
, atime
);
762 nbmand_changed_cb(zfsvfs
, nbmand
);
767 dsl_prop_unregister_all(ds
, zfsvfs
);
772 * Associate this zfsvfs with the given objset, which must be owned.
773 * This will cache a bunch of on-disk state from the objset in the
777 zfsvfs_init(zfsvfs_t
*zfsvfs
, objset_t
*os
)
782 zfsvfs
->z_max_blksz
= SPA_OLD_MAXBLOCKSIZE
;
783 zfsvfs
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
786 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zfsvfs
->z_version
);
789 if (zfsvfs
->z_version
>
790 zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)))) {
791 (void) printf("Can't mount a version %lld file system "
792 "on a version %lld pool\n. Pool must be upgraded to mount "
793 "this file system.", (u_longlong_t
)zfsvfs
->z_version
,
794 (u_longlong_t
)spa_version(dmu_objset_spa(os
)));
795 return (SET_ERROR(ENOTSUP
));
797 error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &val
);
800 zfsvfs
->z_norm
= (int)val
;
802 error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &val
);
805 zfsvfs
->z_utf8
= (val
!= 0);
807 error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &val
);
810 zfsvfs
->z_case
= (uint_t
)val
;
812 error
= zfs_get_zplprop(os
, ZFS_PROP_ACLTYPE
, &val
);
815 zfsvfs
->z_acl_type
= (uint_t
)val
;
818 * Fold case on file systems that are always or sometimes case
821 if (zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
822 zfsvfs
->z_case
== ZFS_CASE_MIXED
)
823 zfsvfs
->z_norm
|= U8_TEXTPREP_TOUPPER
;
825 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
826 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
829 if (zfsvfs
->z_use_sa
) {
830 /* should either have both of these objects or none */
831 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
836 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &val
);
837 if (error
== 0 && val
== ZFS_XATTR_SA
)
838 zfsvfs
->z_xattr_sa
= B_TRUE
;
841 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
842 &zfsvfs
->z_attr_table
);
846 if (zfsvfs
->z_version
>= ZPL_VERSION_SA
)
847 sa_register_update_callback(os
, zfs_sa_upgrade
);
849 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
853 ASSERT3U(zfsvfs
->z_root
, !=, 0);
855 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
856 &zfsvfs
->z_unlinkedobj
);
860 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
861 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
862 8, 1, &zfsvfs
->z_userquota_obj
);
864 zfsvfs
->z_userquota_obj
= 0;
868 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
869 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
870 8, 1, &zfsvfs
->z_groupquota_obj
);
872 zfsvfs
->z_groupquota_obj
= 0;
876 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
877 zfs_userquota_prop_prefixes
[ZFS_PROP_PROJECTQUOTA
],
878 8, 1, &zfsvfs
->z_projectquota_obj
);
880 zfsvfs
->z_projectquota_obj
= 0;
884 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
885 zfs_userquota_prop_prefixes
[ZFS_PROP_USEROBJQUOTA
],
886 8, 1, &zfsvfs
->z_userobjquota_obj
);
888 zfsvfs
->z_userobjquota_obj
= 0;
892 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
893 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPOBJQUOTA
],
894 8, 1, &zfsvfs
->z_groupobjquota_obj
);
896 zfsvfs
->z_groupobjquota_obj
= 0;
900 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
901 zfs_userquota_prop_prefixes
[ZFS_PROP_PROJECTOBJQUOTA
],
902 8, 1, &zfsvfs
->z_projectobjquota_obj
);
904 zfsvfs
->z_projectobjquota_obj
= 0;
908 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
909 &zfsvfs
->z_fuid_obj
);
911 zfsvfs
->z_fuid_obj
= 0;
915 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
916 &zfsvfs
->z_shares_dir
);
918 zfsvfs
->z_shares_dir
= 0;
923 * Only use the name cache if we are looking for a
924 * name on a file system that does not require normalization
925 * or case folding. We can also look there if we happen to be
926 * on a non-normalizing, mixed sensitivity file system IF we
927 * are looking for the exact name (which is always the case on
930 zfsvfs
->z_use_namecache
= !zfsvfs
->z_norm
||
931 ((zfsvfs
->z_case
== ZFS_CASE_MIXED
) &&
932 !(zfsvfs
->z_norm
& ~U8_TEXTPREP_TOUPPER
));
937 taskq_t
*zfsvfs_taskq
;
940 zfsvfs_task_unlinked_drain(void *context
, int pending __unused
)
943 zfs_unlinked_drain((zfsvfs_t
*)context
);
947 zfsvfs_create(const char *osname
, boolean_t readonly
, zfsvfs_t
**zfvp
)
952 boolean_t ro
= (readonly
|| (strchr(osname
, '@') != NULL
));
955 * XXX: Fix struct statfs so this isn't necessary!
957 * The 'osname' is used as the filesystem's special node, which means
958 * it must fit in statfs.f_mntfromname, or else it can't be
959 * enumerated, so libzfs_mnttab_find() returns NULL, which causes
960 * 'zfs unmount' to think it's not mounted when it is.
962 if (strlen(osname
) >= MNAMELEN
)
963 return (SET_ERROR(ENAMETOOLONG
));
965 zfsvfs
= kmem_zalloc(sizeof (zfsvfs_t
), KM_SLEEP
);
967 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, ro
, B_TRUE
, zfsvfs
,
970 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
974 error
= zfsvfs_create_impl(zfvp
, zfsvfs
, os
);
981 zfsvfs_create_impl(zfsvfs_t
**zfvp
, zfsvfs_t
*zfsvfs
, objset_t
*os
)
985 zfsvfs
->z_vfs
= NULL
;
986 zfsvfs
->z_parent
= zfsvfs
;
988 mutex_init(&zfsvfs
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
989 mutex_init(&zfsvfs
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
990 list_create(&zfsvfs
->z_all_znodes
, sizeof (znode_t
),
991 offsetof(znode_t
, z_link_node
));
992 TASK_INIT(&zfsvfs
->z_unlinked_drain_task
, 0,
993 zfsvfs_task_unlinked_drain
, zfsvfs
);
994 ZFS_TEARDOWN_INIT(zfsvfs
);
995 ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs
);
996 rw_init(&zfsvfs
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
997 for (int i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
998 mutex_init(&zfsvfs
->z_hold_mtx
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
1000 error
= zfsvfs_init(zfsvfs
, os
);
1002 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1004 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1013 zfsvfs_setup(zfsvfs_t
*zfsvfs
, boolean_t mounting
)
1018 * Check for a bad on-disk format version now since we
1019 * lied about owning the dataset readonly before.
1021 if (!(zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) &&
1022 dmu_objset_incompatible_encryption_version(zfsvfs
->z_os
))
1023 return (SET_ERROR(EROFS
));
1025 error
= zfs_register_callbacks(zfsvfs
->z_vfs
);
1030 * If we are not mounting (ie: online recv), then we don't
1031 * have to worry about replaying the log as we blocked all
1032 * operations out since we closed the ZIL.
1037 ASSERT3P(zfsvfs
->z_kstat
.dk_kstats
, ==, NULL
);
1038 error
= dataset_kstats_create(&zfsvfs
->z_kstat
, zfsvfs
->z_os
);
1041 zfsvfs
->z_log
= zil_open(zfsvfs
->z_os
, zfs_get_data
,
1042 &zfsvfs
->z_kstat
.dk_zil_sums
);
1045 * During replay we remove the read only flag to
1046 * allow replays to succeed.
1048 readonly
= zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
;
1049 if (readonly
!= 0) {
1050 zfsvfs
->z_vfs
->vfs_flag
&= ~VFS_RDONLY
;
1055 if (zap_get_stats(zfsvfs
->z_os
, zfsvfs
->z_unlinkedobj
,
1057 dataset_kstats_update_nunlinks_kstat(
1058 &zfsvfs
->z_kstat
, zs
.zs_num_entries
);
1059 dprintf_ds(zfsvfs
->z_os
->os_dsl_dataset
,
1060 "num_entries in unlinked set: %llu",
1061 (u_longlong_t
)zs
.zs_num_entries
);
1064 zfs_unlinked_drain(zfsvfs
);
1065 dd
= zfsvfs
->z_os
->os_dsl_dataset
->ds_dir
;
1066 dd
->dd_activity_cancelled
= B_FALSE
;
1070 * Parse and replay the intent log.
1072 * Because of ziltest, this must be done after
1073 * zfs_unlinked_drain(). (Further note: ziltest
1074 * doesn't use readonly mounts, where
1075 * zfs_unlinked_drain() isn't called.) This is because
1076 * ziltest causes spa_sync() to think it's committed,
1077 * but actually it is not, so the intent log contains
1078 * many txg's worth of changes.
1080 * In particular, if object N is in the unlinked set in
1081 * the last txg to actually sync, then it could be
1082 * actually freed in a later txg and then reallocated
1083 * in a yet later txg. This would write a "create
1084 * object N" record to the intent log. Normally, this
1085 * would be fine because the spa_sync() would have
1086 * written out the fact that object N is free, before
1087 * we could write the "create object N" intent log
1090 * But when we are in ziltest mode, we advance the "open
1091 * txg" without actually spa_sync()-ing the changes to
1092 * disk. So we would see that object N is still
1093 * allocated and in the unlinked set, and there is an
1094 * intent log record saying to allocate it.
1096 if (spa_writeable(dmu_objset_spa(zfsvfs
->z_os
))) {
1097 if (zil_replay_disable
) {
1098 zil_destroy(zfsvfs
->z_log
, B_FALSE
);
1100 boolean_t use_nc
= zfsvfs
->z_use_namecache
;
1101 zfsvfs
->z_use_namecache
= B_FALSE
;
1102 zfsvfs
->z_replay
= B_TRUE
;
1103 zil_replay(zfsvfs
->z_os
, zfsvfs
,
1105 zfsvfs
->z_replay
= B_FALSE
;
1106 zfsvfs
->z_use_namecache
= use_nc
;
1110 /* restore readonly bit */
1112 zfsvfs
->z_vfs
->vfs_flag
|= VFS_RDONLY
;
1114 ASSERT3P(zfsvfs
->z_kstat
.dk_kstats
, !=, NULL
);
1115 zfsvfs
->z_log
= zil_open(zfsvfs
->z_os
, zfs_get_data
,
1116 &zfsvfs
->z_kstat
.dk_zil_sums
);
1120 * Set the objset user_ptr to track its zfsvfs.
1122 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1123 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1124 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1130 zfsvfs_free(zfsvfs_t
*zfsvfs
)
1134 zfs_fuid_destroy(zfsvfs
);
1136 mutex_destroy(&zfsvfs
->z_znodes_lock
);
1137 mutex_destroy(&zfsvfs
->z_lock
);
1138 ASSERT3U(zfsvfs
->z_nr_znodes
, ==, 0);
1139 list_destroy(&zfsvfs
->z_all_znodes
);
1140 ZFS_TEARDOWN_DESTROY(zfsvfs
);
1141 ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs
);
1142 rw_destroy(&zfsvfs
->z_fuid_lock
);
1143 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
1144 mutex_destroy(&zfsvfs
->z_hold_mtx
[i
]);
1145 dataset_kstats_destroy(&zfsvfs
->z_kstat
);
1146 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1150 zfs_set_fuid_feature(zfsvfs_t
*zfsvfs
)
1152 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
1153 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
1157 zfs_domount(vfs_t
*vfsp
, char *osname
)
1159 uint64_t recordsize
, fsid_guid
;
1163 ASSERT3P(vfsp
, !=, NULL
);
1164 ASSERT3P(osname
, !=, NULL
);
1166 error
= zfsvfs_create(osname
, vfsp
->mnt_flag
& MNT_RDONLY
, &zfsvfs
);
1169 zfsvfs
->z_vfs
= vfsp
;
1171 if ((error
= dsl_prop_get_integer(osname
,
1172 "recordsize", &recordsize
, NULL
)))
1174 zfsvfs
->z_vfs
->vfs_bsize
= SPA_MINBLOCKSIZE
;
1175 zfsvfs
->z_vfs
->mnt_stat
.f_iosize
= recordsize
;
1177 vfsp
->vfs_data
= zfsvfs
;
1178 vfsp
->mnt_flag
|= MNT_LOCAL
;
1179 vfsp
->mnt_kern_flag
|= MNTK_LOOKUP_SHARED
;
1180 vfsp
->mnt_kern_flag
|= MNTK_SHARED_WRITES
;
1181 vfsp
->mnt_kern_flag
|= MNTK_EXTENDED_SHARED
;
1183 * This can cause a loss of coherence between ARC and page cache
1184 * on ZoF - unclear if the problem is in FreeBSD or ZoF
1186 vfsp
->mnt_kern_flag
|= MNTK_NO_IOPF
; /* vn_io_fault can be used */
1187 vfsp
->mnt_kern_flag
|= MNTK_NOMSYNC
;
1188 vfsp
->mnt_kern_flag
|= MNTK_VMSETSIZE_BUG
;
1190 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
1191 vfsp
->mnt_kern_flag
|= MNTK_FPLOOKUP
;
1194 * The fsid is 64 bits, composed of an 8-bit fs type, which
1195 * separates our fsid from any other filesystem types, and a
1196 * 56-bit objset unique ID. The objset unique ID is unique to
1197 * all objsets open on this system, provided by unique_create().
1198 * The 8-bit fs type must be put in the low bits of fsid[1]
1199 * because that's where other Solaris filesystems put it.
1201 fsid_guid
= dmu_objset_fsid_guid(zfsvfs
->z_os
);
1202 ASSERT3U((fsid_guid
& ~((1ULL << 56) - 1)), ==, 0);
1203 vfsp
->vfs_fsid
.val
[0] = fsid_guid
;
1204 vfsp
->vfs_fsid
.val
[1] = ((fsid_guid
>> 32) << 8) |
1205 (vfsp
->mnt_vfc
->vfc_typenum
& 0xFF);
1208 * Set features for file system.
1210 zfs_set_fuid_feature(zfsvfs
);
1212 if (dmu_objset_is_snapshot(zfsvfs
->z_os
)) {
1215 atime_changed_cb(zfsvfs
, B_FALSE
);
1216 readonly_changed_cb(zfsvfs
, B_TRUE
);
1217 if ((error
= dsl_prop_get_integer(osname
,
1218 "xattr", &pval
, NULL
)))
1220 xattr_changed_cb(zfsvfs
, pval
);
1221 if ((error
= dsl_prop_get_integer(osname
,
1222 "acltype", &pval
, NULL
)))
1224 acl_type_changed_cb(zfsvfs
, pval
);
1225 zfsvfs
->z_issnap
= B_TRUE
;
1226 zfsvfs
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1228 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1229 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1230 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1232 if ((error
= zfsvfs_setup(zfsvfs
, B_TRUE
)))
1236 vfs_mountedfrom(vfsp
, osname
);
1238 if (!zfsvfs
->z_issnap
)
1239 zfsctl_create(zfsvfs
);
1242 dmu_objset_disown(zfsvfs
->z_os
, B_TRUE
, zfsvfs
);
1243 zfsvfs_free(zfsvfs
);
1245 atomic_inc_32(&zfs_active_fs_count
);
1252 zfs_unregister_callbacks(zfsvfs_t
*zfsvfs
)
1254 objset_t
*os
= zfsvfs
->z_os
;
1256 if (!dmu_objset_is_snapshot(os
))
1257 dsl_prop_unregister_all(dmu_objset_ds(os
), zfsvfs
);
1261 getpoolname(const char *osname
, char *poolname
)
1265 p
= strchr(osname
, '/');
1267 if (strlen(osname
) >= MAXNAMELEN
)
1268 return (ENAMETOOLONG
);
1269 (void) strcpy(poolname
, osname
);
1271 if (p
- osname
>= MAXNAMELEN
)
1272 return (ENAMETOOLONG
);
1273 (void) strncpy(poolname
, osname
, p
- osname
);
1274 poolname
[p
- osname
] = '\0';
1280 fetch_osname_options(char *name
, bool *checkpointrewind
)
1283 if (name
[0] == '!') {
1284 *checkpointrewind
= true;
1285 memmove(name
, name
+ 1, strlen(name
));
1287 *checkpointrewind
= false;
1292 zfs_mount(vfs_t
*vfsp
)
1294 kthread_t
*td
= curthread
;
1295 vnode_t
*mvp
= vfsp
->mnt_vnodecovered
;
1296 cred_t
*cr
= td
->td_ucred
;
1300 bool checkpointrewind
;
1302 if (vfs_getopt(vfsp
->mnt_optnew
, "from", (void **)&osname
, NULL
))
1303 return (SET_ERROR(EINVAL
));
1306 * If full-owner-access is enabled and delegated administration is
1307 * turned on, we must set nosuid.
1309 if (zfs_super_owner
&&
1310 dsl_deleg_access(osname
, ZFS_DELEG_PERM_MOUNT
, cr
) != ECANCELED
) {
1311 secpolicy_fs_mount_clearopts(cr
, vfsp
);
1314 fetch_osname_options(osname
, &checkpointrewind
);
1317 * Check for mount privilege?
1319 * If we don't have privilege then see if
1320 * we have local permission to allow it
1322 error
= secpolicy_fs_mount(cr
, mvp
, vfsp
);
1324 if (dsl_deleg_access(osname
, ZFS_DELEG_PERM_MOUNT
, cr
) != 0)
1327 if (!(vfsp
->vfs_flag
& MS_REMOUNT
)) {
1331 * Make sure user is the owner of the mount point
1332 * or has sufficient privileges.
1335 vattr
.va_mask
= AT_UID
;
1337 vn_lock(mvp
, LK_SHARED
| LK_RETRY
);
1338 if (VOP_GETATTR(mvp
, &vattr
, cr
)) {
1343 if (secpolicy_vnode_owner(mvp
, cr
, vattr
.va_uid
) != 0 &&
1344 VOP_ACCESS(mvp
, VWRITE
, cr
, td
) != 0) {
1351 secpolicy_fs_mount_clearopts(cr
, vfsp
);
1355 * Refuse to mount a filesystem if we are in a local zone and the
1356 * dataset is not visible.
1358 if (!INGLOBALZONE(curproc
) &&
1359 (!zone_dataset_visible(osname
, &canwrite
) || !canwrite
)) {
1360 error
= SET_ERROR(EPERM
);
1364 vfsp
->vfs_flag
|= MNT_NFS4ACLS
;
1367 * When doing a remount, we simply refresh our temporary properties
1368 * according to those options set in the current VFS options.
1370 if (vfsp
->vfs_flag
& MS_REMOUNT
) {
1371 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1374 * Refresh mount options with z_teardown_lock blocking I/O while
1375 * the filesystem is in an inconsistent state.
1376 * The lock also serializes this code with filesystem
1377 * manipulations between entry to zfs_suspend_fs() and return
1378 * from zfs_resume_fs().
1380 ZFS_TEARDOWN_ENTER_WRITE(zfsvfs
, FTAG
);
1381 zfs_unregister_callbacks(zfsvfs
);
1382 error
= zfs_register_callbacks(vfsp
);
1383 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
1387 /* Initial root mount: try hard to import the requested root pool. */
1388 if ((vfsp
->vfs_flag
& MNT_ROOTFS
) != 0 &&
1389 (vfsp
->vfs_flag
& MNT_UPDATE
) == 0) {
1390 char pname
[MAXNAMELEN
];
1392 error
= getpoolname(osname
, pname
);
1394 error
= spa_import_rootpool(pname
, checkpointrewind
);
1399 error
= zfs_domount(vfsp
, osname
);
1407 zfs_statfs(vfs_t
*vfsp
, struct statfs
*statp
)
1409 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1410 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
1412 statp
->f_version
= STATFS_VERSION
;
1416 dmu_objset_space(zfsvfs
->z_os
,
1417 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
1420 * The underlying storage pool actually uses multiple block sizes.
1421 * We report the fragsize as the smallest block size we support,
1422 * and we report our blocksize as the filesystem's maximum blocksize.
1424 statp
->f_bsize
= SPA_MINBLOCKSIZE
;
1425 statp
->f_iosize
= zfsvfs
->z_vfs
->mnt_stat
.f_iosize
;
1428 * The following report "total" blocks of various kinds in the
1429 * file system, but reported in terms of f_frsize - the
1433 statp
->f_blocks
= (refdbytes
+ availbytes
) >> SPA_MINBLOCKSHIFT
;
1434 statp
->f_bfree
= availbytes
/ statp
->f_bsize
;
1435 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
1438 * statvfs() should really be called statufs(), because it assumes
1439 * static metadata. ZFS doesn't preallocate files, so the best
1440 * we can do is report the max that could possibly fit in f_files,
1441 * and that minus the number actually used in f_ffree.
1442 * For f_ffree, report the smaller of the number of object available
1443 * and the number of blocks (each object will take at least a block).
1445 statp
->f_ffree
= MIN(availobjs
, statp
->f_bfree
);
1446 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
1449 * We're a zfs filesystem.
1451 strlcpy(statp
->f_fstypename
, "zfs",
1452 sizeof (statp
->f_fstypename
));
1454 strlcpy(statp
->f_mntfromname
, vfsp
->mnt_stat
.f_mntfromname
,
1455 sizeof (statp
->f_mntfromname
));
1456 strlcpy(statp
->f_mntonname
, vfsp
->mnt_stat
.f_mntonname
,
1457 sizeof (statp
->f_mntonname
));
1459 statp
->f_namemax
= MAXNAMELEN
- 1;
1466 zfs_root(vfs_t
*vfsp
, int flags
, vnode_t
**vpp
)
1468 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1474 error
= zfs_zget(zfsvfs
, zfsvfs
->z_root
, &rootzp
);
1476 *vpp
= ZTOV(rootzp
);
1481 error
= vn_lock(*vpp
, flags
);
1491 * Teardown the zfsvfs::z_os.
1493 * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
1494 * and 'z_teardown_inactive_lock' held.
1497 zfsvfs_teardown(zfsvfs_t
*zfsvfs
, boolean_t unmounting
)
1503 * If someone has not already unmounted this file system,
1504 * drain the zrele_taskq to ensure all active references to the
1505 * zfsvfs_t have been handled only then can it be safely destroyed.
1509 * If we're unmounting we have to wait for the list to
1512 * If we're not unmounting there's no guarantee the list
1513 * will drain completely, but zreles run from the taskq
1514 * may add the parents of dir-based xattrs to the taskq
1515 * so we want to wait for these.
1517 * We can safely read z_nr_znodes without locking because the
1518 * VFS has already blocked operations which add to the
1519 * z_all_znodes list and thus increment z_nr_znodes.
1522 while (zfsvfs
->z_nr_znodes
> 0) {
1523 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1524 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1525 if (++round
> 1 && !unmounting
)
1529 ZFS_TEARDOWN_ENTER_WRITE(zfsvfs
, FTAG
);
1533 * We purge the parent filesystem's vfsp as the parent
1534 * filesystem and all of its snapshots have their vnode's
1535 * v_vfsp set to the parent's filesystem's vfsp. Note,
1536 * 'z_parent' is self referential for non-snapshots.
1538 #ifdef FREEBSD_NAMECACHE
1539 #if __FreeBSD_version >= 1300117
1540 cache_purgevfs(zfsvfs
->z_parent
->z_vfs
);
1542 cache_purgevfs(zfsvfs
->z_parent
->z_vfs
, true);
1548 * Close the zil. NB: Can't close the zil while zfs_inactive
1549 * threads are blocked as zil_close can call zfs_inactive.
1551 if (zfsvfs
->z_log
) {
1552 zil_close(zfsvfs
->z_log
);
1553 zfsvfs
->z_log
= NULL
;
1556 ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs
);
1559 * If we are not unmounting (ie: online recv) and someone already
1560 * unmounted this file system while we were doing the switcheroo,
1561 * or a reopen of z_os failed then just bail out now.
1563 if (!unmounting
&& (zfsvfs
->z_unmounted
|| zfsvfs
->z_os
== NULL
)) {
1564 ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs
);
1565 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
1566 return (SET_ERROR(EIO
));
1570 * At this point there are no vops active, and any new vops will
1571 * fail with EIO since we have z_teardown_lock for writer (only
1572 * relevant for forced unmount).
1574 * Release all holds on dbufs.
1576 mutex_enter(&zfsvfs
->z_znodes_lock
);
1577 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
!= NULL
;
1578 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
1579 if (zp
->z_sa_hdl
!= NULL
) {
1580 zfs_znode_dmu_fini(zp
);
1583 mutex_exit(&zfsvfs
->z_znodes_lock
);
1586 * If we are unmounting, set the unmounted flag and let new vops
1587 * unblock. zfs_inactive will have the unmounted behavior, and all
1588 * other vops will fail with EIO.
1591 zfsvfs
->z_unmounted
= B_TRUE
;
1592 ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs
);
1593 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
1597 * z_os will be NULL if there was an error in attempting to reopen
1598 * zfsvfs, so just return as the properties had already been
1599 * unregistered and cached data had been evicted before.
1601 if (zfsvfs
->z_os
== NULL
)
1605 * Unregister properties.
1607 zfs_unregister_callbacks(zfsvfs
);
1612 if (!zfs_is_readonly(zfsvfs
))
1613 txg_wait_synced(dmu_objset_pool(zfsvfs
->z_os
), 0);
1614 dmu_objset_evict_dbufs(zfsvfs
->z_os
);
1615 dd
= zfsvfs
->z_os
->os_dsl_dataset
->ds_dir
;
1616 dsl_dir_cancel_waiters(dd
);
1622 zfs_umount(vfs_t
*vfsp
, int fflag
)
1624 kthread_t
*td
= curthread
;
1625 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1627 cred_t
*cr
= td
->td_ucred
;
1630 ret
= secpolicy_fs_unmount(cr
, vfsp
);
1632 if (dsl_deleg_access((char *)vfsp
->vfs_resource
,
1633 ZFS_DELEG_PERM_MOUNT
, cr
))
1638 * Unmount any snapshots mounted under .zfs before unmounting the
1641 if (zfsvfs
->z_ctldir
!= NULL
) {
1642 if ((ret
= zfsctl_umount_snapshots(vfsp
, fflag
, cr
)) != 0)
1646 if (fflag
& MS_FORCE
) {
1648 * Mark file system as unmounted before calling
1649 * vflush(FORCECLOSE). This way we ensure no future vnops
1650 * will be called and risk operating on DOOMED vnodes.
1652 ZFS_TEARDOWN_ENTER_WRITE(zfsvfs
, FTAG
);
1653 zfsvfs
->z_unmounted
= B_TRUE
;
1654 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
1658 * Flush all the files.
1660 ret
= vflush(vfsp
, 0, (fflag
& MS_FORCE
) ? FORCECLOSE
: 0, td
);
1663 while (taskqueue_cancel(zfsvfs_taskq
->tq_queue
,
1664 &zfsvfs
->z_unlinked_drain_task
, NULL
) != 0)
1665 taskqueue_drain(zfsvfs_taskq
->tq_queue
,
1666 &zfsvfs
->z_unlinked_drain_task
);
1668 VERIFY0(zfsvfs_teardown(zfsvfs
, B_TRUE
));
1672 * z_os will be NULL if there was an error in
1673 * attempting to reopen zfsvfs.
1677 * Unset the objset user_ptr.
1679 mutex_enter(&os
->os_user_ptr_lock
);
1680 dmu_objset_set_user(os
, NULL
);
1681 mutex_exit(&os
->os_user_ptr_lock
);
1684 * Finally release the objset
1686 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1690 * We can now safely destroy the '.zfs' directory node.
1692 if (zfsvfs
->z_ctldir
!= NULL
)
1693 zfsctl_destroy(zfsvfs
);
1700 zfs_vget(vfs_t
*vfsp
, ino_t ino
, int flags
, vnode_t
**vpp
)
1702 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1707 * zfs_zget() can't operate on virtual entries like .zfs/ or
1708 * .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
1709 * This will make NFS to switch to LOOKUP instead of using VGET.
1711 if (ino
== ZFSCTL_INO_ROOT
|| ino
== ZFSCTL_INO_SNAPDIR
||
1712 (zfsvfs
->z_shares_dir
!= 0 && ino
== zfsvfs
->z_shares_dir
))
1713 return (EOPNOTSUPP
);
1716 err
= zfs_zget(zfsvfs
, ino
, &zp
);
1717 if (err
== 0 && zp
->z_unlinked
) {
1725 err
= vn_lock(*vpp
, flags
);
1735 #if __FreeBSD_version >= 1300098
1736 zfs_checkexp(vfs_t
*vfsp
, struct sockaddr
*nam
, uint64_t *extflagsp
,
1737 struct ucred
**credanonp
, int *numsecflavors
, int *secflavors
)
1739 zfs_checkexp(vfs_t
*vfsp
, struct sockaddr
*nam
, int *extflagsp
,
1740 struct ucred
**credanonp
, int *numsecflavors
, int **secflavors
)
1743 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1746 * If this is regular file system vfsp is the same as
1747 * zfsvfs->z_parent->z_vfs, but if it is snapshot,
1748 * zfsvfs->z_parent->z_vfs represents parent file system
1749 * which we have to use here, because only this file system
1750 * has mnt_export configured.
1752 return (vfs_stdcheckexp(zfsvfs
->z_parent
->z_vfs
, nam
, extflagsp
,
1753 credanonp
, numsecflavors
, secflavors
));
1756 _Static_assert(sizeof (struct fid
) >= SHORT_FID_LEN
,
1757 "struct fid bigger than SHORT_FID_LEN");
1758 _Static_assert(sizeof (struct fid
) >= LONG_FID_LEN
,
1759 "struct fid bigger than LONG_FID_LEN");
1762 zfs_fhtovp(vfs_t
*vfsp
, fid_t
*fidp
, int flags
, vnode_t
**vpp
)
1764 struct componentname cn
;
1765 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1768 uint64_t object
= 0;
1769 uint64_t fid_gen
= 0;
1770 uint64_t setgen
= 0;
1780 * On FreeBSD we can get snapshot's mount point or its parent file
1781 * system mount point depending if snapshot is already mounted or not.
1783 if (zfsvfs
->z_parent
== zfsvfs
&& fidp
->fid_len
== LONG_FID_LEN
) {
1784 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1785 uint64_t objsetid
= 0;
1787 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1788 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1790 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1791 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1795 err
= zfsctl_lookup_objset(vfsp
, objsetid
, &zfsvfs
);
1797 return (SET_ERROR(EINVAL
));
1801 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1802 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1804 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1805 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1807 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1808 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1811 return (SET_ERROR(EINVAL
));
1814 if (fidp
->fid_len
== LONG_FID_LEN
&& (fid_gen
> 1 || setgen
!= 0)) {
1815 dprintf("snapdir fid: fid_gen (%llu) and setgen (%llu)\n",
1816 (u_longlong_t
)fid_gen
, (u_longlong_t
)setgen
);
1817 return (SET_ERROR(EINVAL
));
1821 * A zero fid_gen means we are in .zfs or the .zfs/snapshot
1822 * directory tree. If the object == zfsvfs->z_shares_dir, then
1823 * we are in the .zfs/shares directory tree.
1825 if ((fid_gen
== 0 &&
1826 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) ||
1827 (zfsvfs
->z_shares_dir
!= 0 && object
== zfsvfs
->z_shares_dir
)) {
1829 VERIFY0(zfsctl_root(zfsvfs
, LK_SHARED
, &dvp
));
1830 if (object
== ZFSCTL_INO_SNAPDIR
) {
1831 cn
.cn_nameptr
= "snapshot";
1832 cn
.cn_namelen
= strlen(cn
.cn_nameptr
);
1833 cn
.cn_nameiop
= LOOKUP
;
1834 cn
.cn_flags
= ISLASTCN
| LOCKLEAF
;
1835 cn
.cn_lkflags
= flags
;
1836 VERIFY0(VOP_LOOKUP(dvp
, vpp
, &cn
));
1838 } else if (object
== zfsvfs
->z_shares_dir
) {
1840 * XXX This branch must not be taken,
1841 * if it is, then the lookup below will
1844 cn
.cn_nameptr
= "shares";
1845 cn
.cn_namelen
= strlen(cn
.cn_nameptr
);
1846 cn
.cn_nameiop
= LOOKUP
;
1847 cn
.cn_flags
= ISLASTCN
;
1848 cn
.cn_lkflags
= flags
;
1849 VERIFY0(VOP_LOOKUP(dvp
, vpp
, &cn
));
1857 gen_mask
= -1ULL >> (64 - 8 * i
);
1859 dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t
)object
,
1860 (u_longlong_t
)fid_gen
,
1861 (u_longlong_t
)gen_mask
);
1862 if ((err
= zfs_zget(zfsvfs
, object
, &zp
))) {
1866 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
), &zp_gen
,
1868 zp_gen
= zp_gen
& gen_mask
;
1871 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1872 dprintf("znode gen (%llu) != fid gen (%llu)\n",
1873 (u_longlong_t
)zp_gen
, (u_longlong_t
)fid_gen
);
1876 return (SET_ERROR(EINVAL
));
1881 err
= vn_lock(*vpp
, flags
);
1883 vnode_create_vobject(*vpp
, zp
->z_size
, curthread
);
1890 * Block out VOPs and close zfsvfs_t::z_os
1892 * Note, if successful, then we return with the 'z_teardown_lock' and
1893 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1894 * dataset and objset intact so that they can be atomically handed off during
1895 * a subsequent rollback or recv operation and the resume thereafter.
1898 zfs_suspend_fs(zfsvfs_t
*zfsvfs
)
1902 if ((error
= zfsvfs_teardown(zfsvfs
, B_FALSE
)) != 0)
1909 * Rebuild SA and release VOPs. Note that ownership of the underlying dataset
1910 * is an invariant across any of the operations that can be performed while the
1911 * filesystem was suspended. Whether it succeeded or failed, the preconditions
1912 * are the same: the relevant objset and associated dataset are owned by
1913 * zfsvfs, held, and long held on entry.
1916 zfs_resume_fs(zfsvfs_t
*zfsvfs
, dsl_dataset_t
*ds
)
1921 ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs
));
1922 ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs
));
1925 * We already own this, so just update the objset_t, as the one we
1926 * had before may have been evicted.
1929 VERIFY3P(ds
->ds_owner
, ==, zfsvfs
);
1930 VERIFY(dsl_dataset_long_held(ds
));
1931 dsl_pool_t
*dp
= spa_get_dsl(dsl_dataset_get_spa(ds
));
1932 dsl_pool_config_enter(dp
, FTAG
);
1933 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1934 dsl_pool_config_exit(dp
, FTAG
);
1936 err
= zfsvfs_init(zfsvfs
, os
);
1940 ds
->ds_dir
->dd_activity_cancelled
= B_FALSE
;
1941 VERIFY0(zfsvfs_setup(zfsvfs
, B_FALSE
));
1943 zfs_set_fuid_feature(zfsvfs
);
1946 * Attempt to re-establish all the active znodes with
1947 * their dbufs. If a zfs_rezget() fails, then we'll let
1948 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
1949 * when they try to use their znode.
1951 mutex_enter(&zfsvfs
->z_znodes_lock
);
1952 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
;
1953 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
1954 (void) zfs_rezget(zp
);
1956 mutex_exit(&zfsvfs
->z_znodes_lock
);
1959 /* release the VOPs */
1960 ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs
);
1961 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
1965 * Since we couldn't setup the sa framework, try to force
1966 * unmount this file system.
1968 if (vn_vfswlock(zfsvfs
->z_vfs
->vfs_vnodecovered
) == 0) {
1969 vfs_ref(zfsvfs
->z_vfs
);
1970 (void) dounmount(zfsvfs
->z_vfs
, MS_FORCE
, curthread
);
1977 zfs_freevfs(vfs_t
*vfsp
)
1979 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1981 zfsvfs_free(zfsvfs
);
1983 atomic_dec_32(&zfs_active_fs_count
);
1987 static int desiredvnodes_backup
;
1988 #include <sys/vmmeter.h>
1991 #include <vm/vm_page.h>
1992 #include <vm/vm_object.h>
1993 #include <vm/vm_kern.h>
1994 #include <vm/vm_map.h>
1998 zfs_vnodes_adjust(void)
2001 int newdesiredvnodes
;
2003 desiredvnodes_backup
= desiredvnodes
;
2006 * We calculate newdesiredvnodes the same way it is done in
2007 * vntblinit(). If it is equal to desiredvnodes, it means that
2008 * it wasn't tuned by the administrator and we can tune it down.
2010 newdesiredvnodes
= min(maxproc
+ vm_cnt
.v_page_count
/ 4, 2 *
2011 vm_kmem_size
/ (5 * (sizeof (struct vm_object
) +
2012 sizeof (struct vnode
))));
2013 if (newdesiredvnodes
== desiredvnodes
)
2014 desiredvnodes
= (3 * newdesiredvnodes
) / 4;
2019 zfs_vnodes_adjust_back(void)
2023 desiredvnodes
= desiredvnodes_backup
;
2031 printf("ZFS filesystem version: " ZPL_VERSION_STRING
"\n");
2034 * Initialize .zfs directory structures
2039 * Initialize znode cache, vnode ops, etc...
2044 * Reduce number of vnodes. Originally number of vnodes is calculated
2045 * with UFS inode in mind. We reduce it here, because it's too big for
2048 zfs_vnodes_adjust();
2050 dmu_objset_register_type(DMU_OST_ZFS
, zpl_get_file_info
);
2052 zfsvfs_taskq
= taskq_create("zfsvfs", 1, minclsyspri
, 0, 0, 0);
2058 taskq_destroy(zfsvfs_taskq
);
2061 zfs_vnodes_adjust_back();
2067 return (zfs_active_fs_count
!= 0);
2071 * Release VOPs and unmount a suspended filesystem.
2074 zfs_end_fs(zfsvfs_t
*zfsvfs
, dsl_dataset_t
*ds
)
2076 ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs
));
2077 ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs
));
2080 * We already own this, so just hold and rele it to update the
2081 * objset_t, as the one we had before may have been evicted.
2084 VERIFY3P(ds
->ds_owner
, ==, zfsvfs
);
2085 VERIFY(dsl_dataset_long_held(ds
));
2086 dsl_pool_t
*dp
= spa_get_dsl(dsl_dataset_get_spa(ds
));
2087 dsl_pool_config_enter(dp
, FTAG
);
2088 VERIFY0(dmu_objset_from_ds(ds
, &os
));
2089 dsl_pool_config_exit(dp
, FTAG
);
2092 /* release the VOPs */
2093 ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs
);
2094 ZFS_TEARDOWN_EXIT(zfsvfs
, FTAG
);
2097 * Try to force unmount this file system.
2099 (void) zfs_umount(zfsvfs
->z_vfs
, 0);
2100 zfsvfs
->z_unmounted
= B_TRUE
;
2105 zfs_set_version(zfsvfs_t
*zfsvfs
, uint64_t newvers
)
2108 objset_t
*os
= zfsvfs
->z_os
;
2111 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
2112 return (SET_ERROR(EINVAL
));
2114 if (newvers
< zfsvfs
->z_version
)
2115 return (SET_ERROR(EINVAL
));
2117 if (zfs_spa_version_map(newvers
) >
2118 spa_version(dmu_objset_spa(zfsvfs
->z_os
)))
2119 return (SET_ERROR(ENOTSUP
));
2121 tx
= dmu_tx_create(os
);
2122 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
2123 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
2124 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
2126 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
2128 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2134 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
2135 8, 1, &newvers
, tx
);
2142 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
2145 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs
->z_os
)), >=,
2147 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
2148 DMU_OT_NONE
, 0, tx
);
2150 error
= zap_add(os
, MASTER_NODE_OBJ
,
2151 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
2154 VERIFY0(sa_set_sa_object(os
, sa_obj
));
2155 sa_register_update_callback(os
, zfs_sa_upgrade
);
2158 spa_history_log_internal_ds(dmu_objset_ds(os
), "upgrade", tx
,
2159 "from %ju to %ju", (uintmax_t)zfsvfs
->z_version
,
2160 (uintmax_t)newvers
);
2163 zfsvfs
->z_version
= newvers
;
2164 os
->os_version
= newvers
;
2166 zfs_set_fuid_feature(zfsvfs
);
2172 * Read a property stored within the master node.
2175 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
2177 uint64_t *cached_copy
= NULL
;
2180 * Figure out where in the objset_t the cached copy would live, if it
2181 * is available for the requested property.
2185 case ZFS_PROP_VERSION
:
2186 cached_copy
= &os
->os_version
;
2188 case ZFS_PROP_NORMALIZE
:
2189 cached_copy
= &os
->os_normalization
;
2191 case ZFS_PROP_UTF8ONLY
:
2192 cached_copy
= &os
->os_utf8only
;
2195 cached_copy
= &os
->os_casesensitivity
;
2201 if (cached_copy
!= NULL
&& *cached_copy
!= OBJSET_PROP_UNINITIALIZED
) {
2202 *value
= *cached_copy
;
2207 * If the property wasn't cached, look up the file system's value for
2208 * the property. For the version property, we look up a slightly
2213 if (prop
== ZFS_PROP_VERSION
) {
2214 pname
= ZPL_VERSION_STR
;
2216 pname
= zfs_prop_to_name(prop
);
2220 ASSERT3U(os
->os_phys
->os_type
, ==, DMU_OST_ZFS
);
2221 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
2224 if (error
== ENOENT
) {
2225 /* No value set, use the default value */
2227 case ZFS_PROP_VERSION
:
2228 *value
= ZPL_VERSION
;
2230 case ZFS_PROP_NORMALIZE
:
2231 case ZFS_PROP_UTF8ONLY
:
2235 *value
= ZFS_CASE_SENSITIVE
;
2237 case ZFS_PROP_ACLTYPE
:
2238 *value
= ZFS_ACLTYPE_NFSV4
;
2247 * If one of the methods for getting the property value above worked,
2248 * copy it into the objset_t's cache.
2250 if (error
== 0 && cached_copy
!= NULL
) {
2251 *cached_copy
= *value
;
2258 * Return true if the corresponding vfs's unmounted flag is set.
2259 * Otherwise return false.
2260 * If this function returns true we know VFS unmount has been initiated.
2263 zfs_get_vfs_flag_unmounted(objset_t
*os
)
2266 boolean_t unmounted
= B_FALSE
;
2268 ASSERT3U(dmu_objset_type(os
), ==, DMU_OST_ZFS
);
2270 mutex_enter(&os
->os_user_ptr_lock
);
2271 zfvp
= dmu_objset_get_user(os
);
2272 if (zfvp
!= NULL
&& zfvp
->z_vfs
!= NULL
&&
2273 (zfvp
->z_vfs
->mnt_kern_flag
& MNTK_UNMOUNT
))
2275 mutex_exit(&os
->os_user_ptr_lock
);
2282 zfsvfs_update_fromname(const char *oldname
, const char *newname
)
2284 char tmpbuf
[MAXPATHLEN
];
2289 oldlen
= strlen(oldname
);
2291 mtx_lock(&mountlist_mtx
);
2292 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
2293 fromname
= mp
->mnt_stat
.f_mntfromname
;
2294 if (strcmp(fromname
, oldname
) == 0) {
2295 (void) strlcpy(fromname
, newname
,
2296 sizeof (mp
->mnt_stat
.f_mntfromname
));
2299 if (strncmp(fromname
, oldname
, oldlen
) == 0 &&
2300 (fromname
[oldlen
] == '/' || fromname
[oldlen
] == '@')) {
2301 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s%s",
2302 newname
, fromname
+ oldlen
);
2303 (void) strlcpy(fromname
, tmpbuf
,
2304 sizeof (mp
->mnt_stat
.f_mntfromname
));
2308 mtx_unlock(&mountlist_mtx
);