4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
33 #include <sys/pathname.h>
34 #include <sys/vnode.h>
36 #include <sys/vfs_opreg.h>
37 #include <sys/mntent.h>
38 #include <sys/mount.h>
39 #include <sys/cmn_err.h>
40 #include "fs/fs_subr.h"
41 #include <sys/zfs_znode.h>
42 #include <sys/zfs_vnops.h>
43 #include <sys/zfs_dir.h>
45 #include <sys/fs/zfs.h>
47 #include <sys/dsl_prop.h>
48 #include <sys/dsl_dataset.h>
49 #include <sys/dsl_deleg.h>
53 #include <sys/sa_impl.h>
54 #include <sys/varargs.h>
55 #include <sys/policy.h>
56 #include <sys/atomic.h>
57 #include <sys/mkdev.h>
58 #include <sys/modctl.h>
59 #include <sys/refstr.h>
60 #include <sys/zfs_ioctl.h>
61 #include <sys/zfs_ctldir.h>
62 #include <sys/zfs_fuid.h>
63 #include <sys/bootconf.h>
64 #include <sys/sunddi.h>
66 #include <sys/dmu_objset.h>
67 #include <sys/spa_boot.h>
69 #include "zfs_comutil.h"
73 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
75 zfs_sb_t
*zsb
= sb
->s_fs_info
;
78 * Data integrity is job one. We don't want a compromised kernel
79 * writing to the storage pool, so we never sync during panic.
81 if (unlikely(oops_in_progress
))
85 * Semantically, the only requirement is that the sync be initiated.
86 * The DMU syncs out txgs frequently, so there's nothing to do.
93 * Sync a specific filesystem.
98 dp
= dmu_objset_pool(zsb
->z_os
);
101 * If the system is shutting down, then skip any
102 * filesystems which may exist on a suspended pool.
104 if (spa_suspended(dp
->dp_spa
)) {
109 if (zsb
->z_log
!= NULL
)
110 zil_commit(zsb
->z_log
, 0);
115 * Sync all ZFS filesystems. This is what happens when you
116 * run sync(1M). Unlike other filesystems, ZFS honors the
117 * request by waiting for all pools to commit all dirty data.
124 EXPORT_SYMBOL(zfs_sync
);
127 zfs_is_readonly(zfs_sb_t
*zsb
)
129 return (!!(zsb
->z_sb
->s_flags
& MS_RDONLY
));
131 EXPORT_SYMBOL(zfs_is_readonly
);
134 atime_changed_cb(void *arg
, uint64_t newval
)
136 ((zfs_sb_t
*)arg
)->z_atime
= newval
;
140 relatime_changed_cb(void *arg
, uint64_t newval
)
142 ((zfs_sb_t
*)arg
)->z_relatime
= newval
;
146 xattr_changed_cb(void *arg
, uint64_t newval
)
150 if (newval
== ZFS_XATTR_OFF
) {
151 zsb
->z_flags
&= ~ZSB_XATTR
;
153 zsb
->z_flags
|= ZSB_XATTR
;
155 if (newval
== ZFS_XATTR_SA
)
156 zsb
->z_xattr_sa
= B_TRUE
;
158 zsb
->z_xattr_sa
= B_FALSE
;
163 acltype_changed_cb(void *arg
, uint64_t newval
)
168 case ZFS_ACLTYPE_OFF
:
169 zsb
->z_acl_type
= ZFS_ACLTYPE_OFF
;
170 zsb
->z_sb
->s_flags
&= ~MS_POSIXACL
;
172 case ZFS_ACLTYPE_POSIXACL
:
173 #ifdef CONFIG_FS_POSIX_ACL
174 zsb
->z_acl_type
= ZFS_ACLTYPE_POSIXACL
;
175 zsb
->z_sb
->s_flags
|= MS_POSIXACL
;
177 zsb
->z_acl_type
= ZFS_ACLTYPE_OFF
;
178 zsb
->z_sb
->s_flags
&= ~MS_POSIXACL
;
179 #endif /* CONFIG_FS_POSIX_ACL */
187 blksz_changed_cb(void *arg
, uint64_t newval
)
190 ASSERT3U(newval
, <=, spa_maxblocksize(dmu_objset_spa(zsb
->z_os
)));
191 ASSERT3U(newval
, >=, SPA_MINBLOCKSIZE
);
192 ASSERT(ISP2(newval
));
194 zsb
->z_max_blksz
= newval
;
198 readonly_changed_cb(void *arg
, uint64_t newval
)
201 struct super_block
*sb
= zsb
->z_sb
;
207 sb
->s_flags
|= MS_RDONLY
;
209 sb
->s_flags
&= ~MS_RDONLY
;
213 devices_changed_cb(void *arg
, uint64_t newval
)
218 setuid_changed_cb(void *arg
, uint64_t newval
)
223 exec_changed_cb(void *arg
, uint64_t newval
)
228 nbmand_changed_cb(void *arg
, uint64_t newval
)
231 struct super_block
*sb
= zsb
->z_sb
;
237 sb
->s_flags
|= MS_MANDLOCK
;
239 sb
->s_flags
&= ~MS_MANDLOCK
;
243 snapdir_changed_cb(void *arg
, uint64_t newval
)
245 ((zfs_sb_t
*)arg
)->z_show_ctldir
= newval
;
249 vscan_changed_cb(void *arg
, uint64_t newval
)
251 ((zfs_sb_t
*)arg
)->z_vscan
= newval
;
255 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
257 ((zfs_sb_t
*)arg
)->z_acl_inherit
= newval
;
261 zfs_register_callbacks(zfs_sb_t
*zsb
)
263 struct dsl_dataset
*ds
= NULL
;
264 objset_t
*os
= zsb
->z_os
;
265 zfs_mntopts_t
*zmo
= zsb
->z_mntopts
;
272 * The act of registering our callbacks will destroy any mount
273 * options we may have. In order to enable temporary overrides
274 * of mount options, we stash away the current values and
275 * restore them after we register the callbacks.
277 if (zfs_is_readonly(zsb
) || !spa_writeable(dmu_objset_spa(os
))) {
278 zmo
->z_do_readonly
= B_TRUE
;
279 zmo
->z_readonly
= B_TRUE
;
283 * Register property callbacks.
285 * It would probably be fine to just check for i/o error from
286 * the first prop_register(), but I guess I like to go
289 ds
= dmu_objset_ds(os
);
290 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
291 error
= dsl_prop_register(ds
,
292 zfs_prop_to_name(ZFS_PROP_ATIME
), atime_changed_cb
, zsb
);
293 error
= error
? error
: dsl_prop_register(ds
,
294 zfs_prop_to_name(ZFS_PROP_RELATIME
), relatime_changed_cb
, zsb
);
295 error
= error
? error
: dsl_prop_register(ds
,
296 zfs_prop_to_name(ZFS_PROP_XATTR
), xattr_changed_cb
, zsb
);
297 error
= error
? error
: dsl_prop_register(ds
,
298 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), blksz_changed_cb
, zsb
);
299 error
= error
? error
: dsl_prop_register(ds
,
300 zfs_prop_to_name(ZFS_PROP_READONLY
), readonly_changed_cb
, zsb
);
301 error
= error
? error
: dsl_prop_register(ds
,
302 zfs_prop_to_name(ZFS_PROP_DEVICES
), devices_changed_cb
, zsb
);
303 error
= error
? error
: dsl_prop_register(ds
,
304 zfs_prop_to_name(ZFS_PROP_SETUID
), setuid_changed_cb
, zsb
);
305 error
= error
? error
: dsl_prop_register(ds
,
306 zfs_prop_to_name(ZFS_PROP_EXEC
), exec_changed_cb
, zsb
);
307 error
= error
? error
: dsl_prop_register(ds
,
308 zfs_prop_to_name(ZFS_PROP_SNAPDIR
), snapdir_changed_cb
, zsb
);
309 error
= error
? error
: dsl_prop_register(ds
,
310 zfs_prop_to_name(ZFS_PROP_ACLTYPE
), acltype_changed_cb
, zsb
);
311 error
= error
? error
: dsl_prop_register(ds
,
312 zfs_prop_to_name(ZFS_PROP_ACLINHERIT
), acl_inherit_changed_cb
, zsb
);
313 error
= error
? error
: dsl_prop_register(ds
,
314 zfs_prop_to_name(ZFS_PROP_VSCAN
), vscan_changed_cb
, zsb
);
315 error
= error
? error
: dsl_prop_register(ds
,
316 zfs_prop_to_name(ZFS_PROP_NBMAND
), nbmand_changed_cb
, zsb
);
317 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
322 * Invoke our callbacks to restore temporary mount options.
324 if (zmo
->z_do_readonly
)
325 readonly_changed_cb(zsb
, zmo
->z_readonly
);
326 if (zmo
->z_do_setuid
)
327 setuid_changed_cb(zsb
, zmo
->z_setuid
);
329 exec_changed_cb(zsb
, zmo
->z_exec
);
330 if (zmo
->z_do_devices
)
331 devices_changed_cb(zsb
, zmo
->z_devices
);
333 xattr_changed_cb(zsb
, zmo
->z_xattr
);
335 atime_changed_cb(zsb
, zmo
->z_atime
);
336 if (zmo
->z_do_relatime
)
337 relatime_changed_cb(zsb
, zmo
->z_relatime
);
338 if (zmo
->z_do_nbmand
)
339 nbmand_changed_cb(zsb
, zmo
->z_nbmand
);
344 dsl_prop_unregister_all(ds
, zsb
);
347 EXPORT_SYMBOL(zfs_register_callbacks
);
350 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
351 uint64_t *userp
, uint64_t *groupp
)
354 * Is it a valid type of object to track?
356 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
357 return (SET_ERROR(ENOENT
));
360 * If we have a NULL data pointer
361 * then assume the id's aren't changing and
362 * return EEXIST to the dmu to let it know to
366 return (SET_ERROR(EEXIST
));
368 if (bonustype
== DMU_OT_ZNODE
) {
369 znode_phys_t
*znp
= data
;
370 *userp
= znp
->zp_uid
;
371 *groupp
= znp
->zp_gid
;
374 sa_hdr_phys_t
*sap
= data
;
375 sa_hdr_phys_t sa
= *sap
;
376 boolean_t swap
= B_FALSE
;
378 ASSERT(bonustype
== DMU_OT_SA
);
380 if (sa
.sa_magic
== 0) {
382 * This should only happen for newly created
383 * files that haven't had the znode data filled
390 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
391 sa
.sa_magic
= SA_MAGIC
;
392 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
395 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
398 hdrsize
= sa_hdrsize(&sa
);
399 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
400 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
402 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
405 *userp
= BSWAP_64(*userp
);
406 *groupp
= BSWAP_64(*groupp
);
413 fuidstr_to_sid(zfs_sb_t
*zsb
, const char *fuidstr
,
414 char *domainbuf
, int buflen
, uid_t
*ridp
)
419 fuid
= strtonum(fuidstr
, NULL
);
421 domain
= zfs_fuid_find_by_idx(zsb
, FUID_INDEX(fuid
));
423 (void) strlcpy(domainbuf
, domain
, buflen
);
426 *ridp
= FUID_RID(fuid
);
430 zfs_userquota_prop_to_obj(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
)
433 case ZFS_PROP_USERUSED
:
434 return (DMU_USERUSED_OBJECT
);
435 case ZFS_PROP_GROUPUSED
:
436 return (DMU_GROUPUSED_OBJECT
);
437 case ZFS_PROP_USERQUOTA
:
438 return (zsb
->z_userquota_obj
);
439 case ZFS_PROP_GROUPQUOTA
:
440 return (zsb
->z_groupquota_obj
);
442 return (SET_ERROR(ENOTSUP
));
448 zfs_userspace_many(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
449 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
454 zfs_useracct_t
*buf
= vbuf
;
457 if (!dmu_objset_userspace_present(zsb
->z_os
))
458 return (SET_ERROR(ENOTSUP
));
460 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
466 for (zap_cursor_init_serialized(&zc
, zsb
->z_os
, obj
, *cookiep
);
467 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
468 zap_cursor_advance(&zc
)) {
469 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
473 fuidstr_to_sid(zsb
, za
.za_name
,
474 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
476 buf
->zu_space
= za
.za_first_integer
;
482 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
483 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
484 *cookiep
= zap_cursor_serialize(&zc
);
485 zap_cursor_fini(&zc
);
488 EXPORT_SYMBOL(zfs_userspace_many
);
491 * buf must be big enough (eg, 32 bytes)
494 id_to_fuidstr(zfs_sb_t
*zsb
, const char *domain
, uid_t rid
,
495 char *buf
, boolean_t addok
)
500 if (domain
&& domain
[0]) {
501 domainid
= zfs_fuid_find_by_domain(zsb
, domain
, NULL
, addok
);
503 return (SET_ERROR(ENOENT
));
505 fuid
= FUID_ENCODE(domainid
, rid
);
506 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
511 zfs_userspace_one(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
512 const char *domain
, uint64_t rid
, uint64_t *valp
)
520 if (!dmu_objset_userspace_present(zsb
->z_os
))
521 return (SET_ERROR(ENOTSUP
));
523 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
527 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_FALSE
);
531 err
= zap_lookup(zsb
->z_os
, obj
, buf
, 8, 1, valp
);
536 EXPORT_SYMBOL(zfs_userspace_one
);
539 zfs_set_userquota(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
540 const char *domain
, uint64_t rid
, uint64_t quota
)
546 boolean_t fuid_dirtied
;
548 if (type
!= ZFS_PROP_USERQUOTA
&& type
!= ZFS_PROP_GROUPQUOTA
)
549 return (SET_ERROR(EINVAL
));
551 if (zsb
->z_version
< ZPL_VERSION_USERSPACE
)
552 return (SET_ERROR(ENOTSUP
));
554 objp
= (type
== ZFS_PROP_USERQUOTA
) ? &zsb
->z_userquota_obj
:
555 &zsb
->z_groupquota_obj
;
557 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_TRUE
);
560 fuid_dirtied
= zsb
->z_fuid_dirty
;
562 tx
= dmu_tx_create(zsb
->z_os
);
563 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
565 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
566 zfs_userquota_prop_prefixes
[type
]);
569 zfs_fuid_txhold(zsb
, tx
);
570 err
= dmu_tx_assign(tx
, TXG_WAIT
);
576 mutex_enter(&zsb
->z_lock
);
578 *objp
= zap_create(zsb
->z_os
, DMU_OT_USERGROUP_QUOTA
,
580 VERIFY(0 == zap_add(zsb
->z_os
, MASTER_NODE_OBJ
,
581 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
583 mutex_exit(&zsb
->z_lock
);
586 err
= zap_remove(zsb
->z_os
, *objp
, buf
, tx
);
590 err
= zap_update(zsb
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
594 zfs_fuid_sync(zsb
, tx
);
598 EXPORT_SYMBOL(zfs_set_userquota
);
601 zfs_fuid_overquota(zfs_sb_t
*zsb
, boolean_t isgroup
, uint64_t fuid
)
604 uint64_t used
, quota
, usedobj
, quotaobj
;
607 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
608 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
610 if (quotaobj
== 0 || zsb
->z_replay
)
613 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
614 err
= zap_lookup(zsb
->z_os
, quotaobj
, buf
, 8, 1, "a
);
618 err
= zap_lookup(zsb
->z_os
, usedobj
, buf
, 8, 1, &used
);
621 return (used
>= quota
);
623 EXPORT_SYMBOL(zfs_fuid_overquota
);
626 zfs_owner_overquota(zfs_sb_t
*zsb
, znode_t
*zp
, boolean_t isgroup
)
631 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
633 fuid
= isgroup
? zp
->z_gid
: zp
->z_uid
;
635 if (quotaobj
== 0 || zsb
->z_replay
)
638 return (zfs_fuid_overquota(zsb
, isgroup
, fuid
));
640 EXPORT_SYMBOL(zfs_owner_overquota
);
643 zfs_mntopts_alloc(void)
645 return (kmem_zalloc(sizeof (zfs_mntopts_t
), KM_SLEEP
));
649 zfs_mntopts_free(zfs_mntopts_t
*zmo
)
652 strfree(zmo
->z_osname
);
655 strfree(zmo
->z_mntpoint
);
657 kmem_free(zmo
, sizeof (zfs_mntopts_t
));
661 zfs_sb_create(const char *osname
, zfs_mntopts_t
*zmo
, zfs_sb_t
**zsbp
)
669 zsb
= kmem_zalloc(sizeof (zfs_sb_t
), KM_SLEEP
);
672 * We claim to always be readonly so we can open snapshots;
673 * other ZPL code will prevent us from writing to snapshots.
675 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_TRUE
, zsb
, &os
);
677 kmem_free(zsb
, sizeof (zfs_sb_t
));
682 * Optional temporary mount options, free'd in zfs_sb_free().
684 zsb
->z_mntopts
= (zmo
? zmo
: zfs_mntopts_alloc());
687 * Initialize the zfs-specific filesystem structure.
688 * Should probably make this a kmem cache, shuffle fields.
692 zsb
->z_max_blksz
= SPA_OLD_MAXBLOCKSIZE
;
693 zsb
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
696 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zsb
->z_version
);
699 } else if (zsb
->z_version
> ZPL_VERSION
) {
700 error
= SET_ERROR(ENOTSUP
);
703 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &zval
)) != 0)
705 zsb
->z_norm
= (int)zval
;
707 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &zval
)) != 0)
709 zsb
->z_utf8
= (zval
!= 0);
711 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &zval
)) != 0)
713 zsb
->z_case
= (uint_t
)zval
;
715 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_ACLTYPE
, &zval
)) != 0)
717 zsb
->z_acl_type
= (uint_t
)zval
;
720 * Fold case on file systems that are always or sometimes case
723 if (zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
724 zsb
->z_case
== ZFS_CASE_MIXED
)
725 zsb
->z_norm
|= U8_TEXTPREP_TOUPPER
;
727 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
728 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
731 /* should either have both of these objects or none */
732 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
737 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &zval
);
738 if ((error
== 0) && (zval
== ZFS_XATTR_SA
))
739 zsb
->z_xattr_sa
= B_TRUE
;
742 * Pre SA versions file systems should never touch
743 * either the attribute registration or layout objects.
748 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
753 if (zsb
->z_version
>= ZPL_VERSION_SA
)
754 sa_register_update_callback(os
, zfs_sa_upgrade
);
756 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
760 ASSERT(zsb
->z_root
!= 0);
762 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
763 &zsb
->z_unlinkedobj
);
767 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
768 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
769 8, 1, &zsb
->z_userquota_obj
);
770 if (error
&& error
!= ENOENT
)
773 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
774 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
775 8, 1, &zsb
->z_groupquota_obj
);
776 if (error
&& error
!= ENOENT
)
779 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
781 if (error
&& error
!= ENOENT
)
784 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
786 if (error
&& error
!= ENOENT
)
789 mutex_init(&zsb
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
790 mutex_init(&zsb
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
791 list_create(&zsb
->z_all_znodes
, sizeof (znode_t
),
792 offsetof(znode_t
, z_link_node
));
793 rrm_init(&zsb
->z_teardown_lock
, B_FALSE
);
794 rw_init(&zsb
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
795 rw_init(&zsb
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
797 size
= MIN(1 << (highbit64(zfs_object_mutex_size
)-1), ZFS_OBJ_MTX_MAX
);
798 zsb
->z_hold_size
= size
;
799 zsb
->z_hold_trees
= vmem_zalloc(sizeof (avl_tree_t
) * size
, KM_SLEEP
);
800 zsb
->z_hold_locks
= vmem_zalloc(sizeof (kmutex_t
) * size
, KM_SLEEP
);
801 for (i
= 0; i
!= size
; i
++) {
802 avl_create(&zsb
->z_hold_trees
[i
], zfs_znode_hold_compare
,
803 sizeof (znode_hold_t
), offsetof(znode_hold_t
, zh_node
));
804 mutex_init(&zsb
->z_hold_locks
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
811 dmu_objset_disown(os
, zsb
);
814 kmem_free(zsb
, sizeof (zfs_sb_t
));
817 EXPORT_SYMBOL(zfs_sb_create
);
820 zfs_sb_setup(zfs_sb_t
*zsb
, boolean_t mounting
)
824 error
= zfs_register_callbacks(zsb
);
829 * Set the objset user_ptr to track its zsb.
831 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
832 dmu_objset_set_user(zsb
->z_os
, zsb
);
833 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
835 zsb
->z_log
= zil_open(zsb
->z_os
, zfs_get_data
);
838 * If we are not mounting (ie: online recv), then we don't
839 * have to worry about replaying the log as we blocked all
840 * operations out since we closed the ZIL.
846 * During replay we remove the read only flag to
847 * allow replays to succeed.
849 readonly
= zfs_is_readonly(zsb
);
851 readonly_changed_cb(zsb
, B_FALSE
);
853 zfs_unlinked_drain(zsb
);
856 * Parse and replay the intent log.
858 * Because of ziltest, this must be done after
859 * zfs_unlinked_drain(). (Further note: ziltest
860 * doesn't use readonly mounts, where
861 * zfs_unlinked_drain() isn't called.) This is because
862 * ziltest causes spa_sync() to think it's committed,
863 * but actually it is not, so the intent log contains
864 * many txg's worth of changes.
866 * In particular, if object N is in the unlinked set in
867 * the last txg to actually sync, then it could be
868 * actually freed in a later txg and then reallocated
869 * in a yet later txg. This would write a "create
870 * object N" record to the intent log. Normally, this
871 * would be fine because the spa_sync() would have
872 * written out the fact that object N is free, before
873 * we could write the "create object N" intent log
876 * But when we are in ziltest mode, we advance the "open
877 * txg" without actually spa_sync()-ing the changes to
878 * disk. So we would see that object N is still
879 * allocated and in the unlinked set, and there is an
880 * intent log record saying to allocate it.
882 if (spa_writeable(dmu_objset_spa(zsb
->z_os
))) {
883 if (zil_replay_disable
) {
884 zil_destroy(zsb
->z_log
, B_FALSE
);
886 zsb
->z_replay
= B_TRUE
;
887 zil_replay(zsb
->z_os
, zsb
,
889 zsb
->z_replay
= B_FALSE
;
893 /* restore readonly bit */
895 readonly_changed_cb(zsb
, B_TRUE
);
900 EXPORT_SYMBOL(zfs_sb_setup
);
903 zfs_sb_free(zfs_sb_t
*zsb
)
905 int i
, size
= zsb
->z_hold_size
;
907 zfs_fuid_destroy(zsb
);
909 mutex_destroy(&zsb
->z_znodes_lock
);
910 mutex_destroy(&zsb
->z_lock
);
911 list_destroy(&zsb
->z_all_znodes
);
912 rrm_destroy(&zsb
->z_teardown_lock
);
913 rw_destroy(&zsb
->z_teardown_inactive_lock
);
914 rw_destroy(&zsb
->z_fuid_lock
);
915 for (i
= 0; i
!= size
; i
++) {
916 avl_destroy(&zsb
->z_hold_trees
[i
]);
917 mutex_destroy(&zsb
->z_hold_locks
[i
]);
919 vmem_free(zsb
->z_hold_trees
, sizeof (avl_tree_t
) * size
);
920 vmem_free(zsb
->z_hold_locks
, sizeof (kmutex_t
) * size
);
921 zfs_mntopts_free(zsb
->z_mntopts
);
922 kmem_free(zsb
, sizeof (zfs_sb_t
));
924 EXPORT_SYMBOL(zfs_sb_free
);
927 zfs_set_fuid_feature(zfs_sb_t
*zsb
)
929 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
930 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
934 zfs_unregister_callbacks(zfs_sb_t
*zsb
)
936 objset_t
*os
= zsb
->z_os
;
938 if (!dmu_objset_is_snapshot(os
))
939 dsl_prop_unregister_all(dmu_objset_ds(os
), zsb
);
941 EXPORT_SYMBOL(zfs_unregister_callbacks
);
945 * Check that the hex label string is appropriate for the dataset being
946 * mounted into the global_zone proper.
948 * Return an error if the hex label string is not default or
949 * admin_low/admin_high. For admin_low labels, the corresponding
950 * dataset must be readonly.
953 zfs_check_global_label(const char *dsname
, const char *hexsl
)
955 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
957 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
959 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
960 /* must be readonly */
963 if (dsl_prop_get_integer(dsname
,
964 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
965 return (SET_ERROR(EACCES
));
966 return (rdonly
? 0 : EACCES
);
968 return (SET_ERROR(EACCES
));
970 EXPORT_SYMBOL(zfs_check_global_label
);
971 #endif /* HAVE_MLSLABEL */
974 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
976 zfs_sb_t
*zsb
= dentry
->d_sb
->s_fs_info
;
977 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
983 dmu_objset_space(zsb
->z_os
,
984 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
986 fsid
= dmu_objset_fsid_guid(zsb
->z_os
);
988 * The underlying storage pool actually uses multiple block
989 * size. Under Solaris frsize (fragment size) is reported as
990 * the smallest block size we support, and bsize (block size)
991 * as the filesystem's maximum block size. Unfortunately,
992 * under Linux the fragment size and block size are often used
993 * interchangeably. Thus we are forced to report both of them
994 * as the filesystem's maximum block size.
996 statp
->f_frsize
= zsb
->z_max_blksz
;
997 statp
->f_bsize
= zsb
->z_max_blksz
;
998 bshift
= fls(statp
->f_bsize
) - 1;
1001 * The following report "total" blocks of various kinds in
1002 * the file system, but reported in terms of f_bsize - the
1006 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
1007 statp
->f_bfree
= availbytes
>> bshift
;
1008 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
1011 * statvfs() should really be called statufs(), because it assumes
1012 * static metadata. ZFS doesn't preallocate files, so the best
1013 * we can do is report the max that could possibly fit in f_files,
1014 * and that minus the number actually used in f_ffree.
1015 * For f_ffree, report the smaller of the number of object available
1016 * and the number of blocks (each object will take at least a block).
1018 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
1019 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
1020 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
1021 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
1022 statp
->f_type
= ZFS_SUPER_MAGIC
;
1023 statp
->f_namelen
= ZFS_MAXNAMELEN
;
1026 * We have all of 40 characters to stuff a string here.
1027 * Is there anything useful we could/should provide?
1029 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
1034 EXPORT_SYMBOL(zfs_statvfs
);
1037 zfs_root(zfs_sb_t
*zsb
, struct inode
**ipp
)
1044 error
= zfs_zget(zsb
, zsb
->z_root
, &rootzp
);
1046 *ipp
= ZTOI(rootzp
);
1051 EXPORT_SYMBOL(zfs_root
);
1053 #ifdef HAVE_D_PRUNE_ALIASES
1055 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
1056 * To accommodate this we must improvise and manually walk the list of znodes
1057 * attempting to prune dentries in order to be able to drop the inodes.
1059 * To avoid scanning the same znodes multiple times they are always rotated
1060 * to the end of the z_all_znodes list. New znodes are inserted at the
1061 * end of the list so we're always scanning the oldest znodes first.
1064 zfs_sb_prune_aliases(zfs_sb_t
*zsb
, unsigned long nr_to_scan
)
1066 znode_t
**zp_array
, *zp
;
1067 int max_array
= MIN(nr_to_scan
, PAGE_SIZE
* 8 / sizeof (znode_t
*));
1071 zp_array
= kmem_zalloc(max_array
* sizeof (znode_t
*), KM_SLEEP
);
1073 mutex_enter(&zsb
->z_znodes_lock
);
1074 while ((zp
= list_head(&zsb
->z_all_znodes
)) != NULL
) {
1076 if ((i
++ > nr_to_scan
) || (j
>= max_array
))
1079 ASSERT(list_link_active(&zp
->z_link_node
));
1080 list_remove(&zsb
->z_all_znodes
, zp
);
1081 list_insert_tail(&zsb
->z_all_znodes
, zp
);
1083 /* Skip active znodes and .zfs entries */
1084 if (MUTEX_HELD(&zp
->z_lock
) || zp
->z_is_ctldir
)
1087 if (igrab(ZTOI(zp
)) == NULL
)
1093 mutex_exit(&zsb
->z_znodes_lock
);
1095 for (i
= 0; i
< j
; i
++) {
1098 ASSERT3P(zp
, !=, NULL
);
1099 d_prune_aliases(ZTOI(zp
));
1101 if (atomic_read(&ZTOI(zp
)->i_count
) == 1)
1107 kmem_free(zp_array
, max_array
* sizeof (znode_t
*));
1111 #endif /* HAVE_D_PRUNE_ALIASES */
1114 * The ARC has requested that the filesystem drop entries from the dentry
1115 * and inode caches. This can occur when the ARC needs to free meta data
1116 * blocks but can't because they are all pinned by entries in these caches.
1119 zfs_sb_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1121 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1123 #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1124 struct shrinker
*shrinker
= &sb
->s_shrink
;
1125 struct shrink_control sc
= {
1126 .nr_to_scan
= nr_to_scan
,
1127 .gfp_mask
= GFP_KERNEL
,
1133 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
1134 defined(SHRINK_CONTROL_HAS_NID) && \
1135 defined(SHRINKER_NUMA_AWARE)
1136 if (sb
->s_shrink
.flags
& SHRINKER_NUMA_AWARE
) {
1138 for_each_online_node(sc
.nid
)
1139 *objects
+= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1141 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1144 #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1145 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1146 #elif defined(HAVE_SHRINK)
1147 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1148 #elif defined(HAVE_D_PRUNE_ALIASES)
1149 #define D_PRUNE_ALIASES_IS_DEFAULT
1150 *objects
= zfs_sb_prune_aliases(zsb
, nr_to_scan
);
1152 #error "No available dentry and inode cache pruning mechanism."
1155 #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
1156 #undef D_PRUNE_ALIASES_IS_DEFAULT
1158 * Fall back to zfs_sb_prune_aliases if the kernel's per-superblock
1159 * shrinker couldn't free anything, possibly due to the inodes being
1160 * allocated in a different memcg.
1163 *objects
= zfs_sb_prune_aliases(zsb
, nr_to_scan
);
1168 dprintf_ds(zsb
->z_os
->os_dsl_dataset
,
1169 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1170 nr_to_scan
, *objects
, error
);
1174 EXPORT_SYMBOL(zfs_sb_prune
);
1177 * Teardown the zfs_sb_t.
1179 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1180 * and 'z_teardown_inactive_lock' held.
1183 zfs_sb_teardown(zfs_sb_t
*zsb
, boolean_t unmounting
)
1188 * If someone has not already unmounted this file system,
1189 * drain the iput_taskq to ensure all active references to the
1190 * zfs_sb_t have been handled only then can it be safely destroyed.
1194 * If we're unmounting we have to wait for the list to
1197 * If we're not unmounting there's no guarantee the list
1198 * will drain completely, but iputs run from the taskq
1199 * may add the parents of dir-based xattrs to the taskq
1200 * so we want to wait for these.
1202 * We can safely read z_nr_znodes without locking because the
1203 * VFS has already blocked operations which add to the
1204 * z_all_znodes list and thus increment z_nr_znodes.
1207 while (zsb
->z_nr_znodes
> 0) {
1208 taskq_wait_outstanding(dsl_pool_iput_taskq(
1209 dmu_objset_pool(zsb
->z_os
)), 0);
1210 if (++round
> 1 && !unmounting
)
1215 rrm_enter(&zsb
->z_teardown_lock
, RW_WRITER
, FTAG
);
1219 * We purge the parent filesystem's super block as the
1220 * parent filesystem and all of its snapshots have their
1221 * inode's super block set to the parent's filesystem's
1222 * super block. Note, 'z_parent' is self referential
1223 * for non-snapshots.
1225 shrink_dcache_sb(zsb
->z_parent
->z_sb
);
1229 * Close the zil. NB: Can't close the zil while zfs_inactive
1230 * threads are blocked as zil_close can call zfs_inactive.
1233 zil_close(zsb
->z_log
);
1237 rw_enter(&zsb
->z_teardown_inactive_lock
, RW_WRITER
);
1240 * If we are not unmounting (ie: online recv) and someone already
1241 * unmounted this file system while we were doing the switcheroo,
1242 * or a reopen of z_os failed then just bail out now.
1244 if (!unmounting
&& (zsb
->z_unmounted
|| zsb
->z_os
== NULL
)) {
1245 rw_exit(&zsb
->z_teardown_inactive_lock
);
1246 rrm_exit(&zsb
->z_teardown_lock
, FTAG
);
1247 return (SET_ERROR(EIO
));
1251 * At this point there are no VFS ops active, and any new VFS ops
1252 * will fail with EIO since we have z_teardown_lock for writer (only
1253 * relevant for forced unmount).
1255 * Release all holds on dbufs.
1258 mutex_enter(&zsb
->z_znodes_lock
);
1259 for (zp
= list_head(&zsb
->z_all_znodes
); zp
!= NULL
;
1260 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1262 zfs_znode_dmu_fini(zp
);
1264 mutex_exit(&zsb
->z_znodes_lock
);
1268 * If we are unmounting, set the unmounted flag and let new VFS ops
1269 * unblock. zfs_inactive will have the unmounted behavior, and all
1270 * other VFS ops will fail with EIO.
1273 zsb
->z_unmounted
= B_TRUE
;
1274 rrm_exit(&zsb
->z_teardown_lock
, FTAG
);
1275 rw_exit(&zsb
->z_teardown_inactive_lock
);
1279 * z_os will be NULL if there was an error in attempting to reopen
1280 * zsb, so just return as the properties had already been
1282 * unregistered and cached data had been evicted before.
1284 if (zsb
->z_os
== NULL
)
1288 * Unregister properties.
1290 zfs_unregister_callbacks(zsb
);
1295 if (dsl_dataset_is_dirty(dmu_objset_ds(zsb
->z_os
)) &&
1296 !zfs_is_readonly(zsb
))
1297 txg_wait_synced(dmu_objset_pool(zsb
->z_os
), 0);
1298 dmu_objset_evict_dbufs(zsb
->z_os
);
1302 EXPORT_SYMBOL(zfs_sb_teardown
);
1304 #if !defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER) && \
1305 !defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
1306 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1310 zfs_domount(struct super_block
*sb
, zfs_mntopts_t
*zmo
, int silent
)
1312 const char *osname
= zmo
->z_osname
;
1314 struct inode
*root_inode
;
1315 uint64_t recordsize
;
1318 error
= zfs_sb_create(osname
, zmo
, &zsb
);
1322 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1323 &recordsize
, NULL
)))
1327 sb
->s_fs_info
= zsb
;
1328 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1329 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1330 sb
->s_time_gran
= 1;
1331 sb
->s_blocksize
= recordsize
;
1332 sb
->s_blocksize_bits
= ilog2(recordsize
);
1333 zsb
->z_bdi
.ra_pages
= 0;
1334 sb
->s_bdi
= &zsb
->z_bdi
;
1336 error
= -zpl_bdi_setup_and_register(&zsb
->z_bdi
, "zfs");
1340 /* Set callback operations for the file system. */
1341 sb
->s_op
= &zpl_super_operations
;
1342 sb
->s_xattr
= zpl_xattr_handlers
;
1343 sb
->s_export_op
= &zpl_export_operations
;
1345 sb
->s_d_op
= &zpl_dentry_operations
;
1346 #endif /* HAVE_S_D_OP */
1348 /* Set features for file system. */
1349 zfs_set_fuid_feature(zsb
);
1351 if (dmu_objset_is_snapshot(zsb
->z_os
)) {
1354 atime_changed_cb(zsb
, B_FALSE
);
1355 readonly_changed_cb(zsb
, B_TRUE
);
1356 if ((error
= dsl_prop_get_integer(osname
,
1357 "xattr", &pval
, NULL
)))
1359 xattr_changed_cb(zsb
, pval
);
1360 if ((error
= dsl_prop_get_integer(osname
,
1361 "acltype", &pval
, NULL
)))
1363 acltype_changed_cb(zsb
, pval
);
1364 zsb
->z_issnap
= B_TRUE
;
1365 zsb
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1366 zsb
->z_snap_defer_time
= jiffies
;
1368 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
1369 dmu_objset_set_user(zsb
->z_os
, zsb
);
1370 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
1372 error
= zfs_sb_setup(zsb
, B_TRUE
);
1375 /* Allocate a root inode for the filesystem. */
1376 error
= zfs_root(zsb
, &root_inode
);
1378 (void) zfs_umount(sb
);
1382 /* Allocate a root dentry for the filesystem */
1383 sb
->s_root
= d_make_root(root_inode
);
1384 if (sb
->s_root
== NULL
) {
1385 (void) zfs_umount(sb
);
1386 error
= SET_ERROR(ENOMEM
);
1393 zsb
->z_arc_prune
= arc_add_prune_callback(zpl_prune_sb
, sb
);
1396 dmu_objset_disown(zsb
->z_os
, zsb
);
1402 EXPORT_SYMBOL(zfs_domount
);
1405 * Called when an unmount is requested and certain sanity checks have
1406 * already passed. At this point no dentries or inodes have been reclaimed
1407 * from their respective caches. We drop the extra reference on the .zfs
1408 * control directory to allow everything to be reclaimed. All snapshots
1409 * must already have been unmounted to reach this point.
1412 zfs_preumount(struct super_block
*sb
)
1414 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1417 zfsctl_destroy(sb
->s_fs_info
);
1419 EXPORT_SYMBOL(zfs_preumount
);
1422 * Called once all other unmount released tear down has occurred.
1423 * It is our responsibility to release any remaining infrastructure.
1427 zfs_umount(struct super_block
*sb
)
1429 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1432 arc_remove_prune_callback(zsb
->z_arc_prune
);
1433 VERIFY(zfs_sb_teardown(zsb
, B_TRUE
) == 0);
1435 bdi_destroy(sb
->s_bdi
);
1438 * z_os will be NULL if there was an error in
1439 * attempting to reopen zsb.
1443 * Unset the objset user_ptr.
1445 mutex_enter(&os
->os_user_ptr_lock
);
1446 dmu_objset_set_user(os
, NULL
);
1447 mutex_exit(&os
->os_user_ptr_lock
);
1450 * Finally release the objset
1452 dmu_objset_disown(os
, zsb
);
1458 EXPORT_SYMBOL(zfs_umount
);
1461 zfs_remount(struct super_block
*sb
, int *flags
, zfs_mntopts_t
*zmo
)
1463 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1466 zfs_unregister_callbacks(zsb
);
1467 error
= zfs_register_callbacks(zsb
);
1471 EXPORT_SYMBOL(zfs_remount
);
1474 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1476 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1478 uint64_t object
= 0;
1479 uint64_t fid_gen
= 0;
1488 if (fidp
->fid_len
== LONG_FID_LEN
) {
1489 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1490 uint64_t objsetid
= 0;
1491 uint64_t setgen
= 0;
1493 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1494 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1496 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1497 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1501 err
= zfsctl_lookup_objset(sb
, objsetid
, &zsb
);
1503 return (SET_ERROR(EINVAL
));
1508 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1509 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1511 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1512 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1514 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1515 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1518 return (SET_ERROR(EINVAL
));
1521 /* A zero fid_gen means we are in the .zfs control directories */
1523 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
1524 *ipp
= zsb
->z_ctldir
;
1525 ASSERT(*ipp
!= NULL
);
1526 if (object
== ZFSCTL_INO_SNAPDIR
) {
1527 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
1528 0, kcred
, NULL
, NULL
) == 0);
1536 gen_mask
= -1ULL >> (64 - 8 * i
);
1538 dprintf("getting %llu [%llu mask %llx]\n", object
, fid_gen
, gen_mask
);
1539 if ((err
= zfs_zget(zsb
, object
, &zp
))) {
1543 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
), &zp_gen
,
1545 zp_gen
= zp_gen
& gen_mask
;
1548 if ((fid_gen
== 0) && (zsb
->z_root
== object
))
1550 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1551 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen
,
1555 return (SET_ERROR(EINVAL
));
1560 zfs_inode_update(ITOZ(*ipp
));
1565 EXPORT_SYMBOL(zfs_vget
);
1568 * Block out VFS ops and close zfs_sb_t
1570 * Note, if successful, then we return with the 'z_teardown_lock' and
1571 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1572 * dataset and objset intact so that they can be atomically handed off during
1573 * a subsequent rollback or recv operation and the resume thereafter.
1576 zfs_suspend_fs(zfs_sb_t
*zsb
)
1580 if ((error
= zfs_sb_teardown(zsb
, B_FALSE
)) != 0)
1585 EXPORT_SYMBOL(zfs_suspend_fs
);
1588 * Reopen zfs_sb_t and release VFS ops.
1591 zfs_resume_fs(zfs_sb_t
*zsb
, const char *osname
)
1595 uint64_t sa_obj
= 0;
1597 ASSERT(RRM_WRITE_HELD(&zsb
->z_teardown_lock
));
1598 ASSERT(RW_WRITE_HELD(&zsb
->z_teardown_inactive_lock
));
1601 * We already own this, so just hold and rele it to update the
1602 * objset_t, as the one we had before may have been evicted.
1604 VERIFY0(dmu_objset_hold(osname
, zsb
, &zsb
->z_os
));
1605 VERIFY3P(zsb
->z_os
->os_dsl_dataset
->ds_owner
, ==, zsb
);
1606 VERIFY(dsl_dataset_long_held(zsb
->z_os
->os_dsl_dataset
));
1607 dmu_objset_rele(zsb
->z_os
, zsb
);
1610 * Make sure version hasn't changed
1613 err
= zfs_get_zplprop(zsb
->z_os
, ZFS_PROP_VERSION
,
1619 err
= zap_lookup(zsb
->z_os
, MASTER_NODE_OBJ
,
1620 ZFS_SA_ATTRS
, 8, 1, &sa_obj
);
1622 if (err
&& zsb
->z_version
>= ZPL_VERSION_SA
)
1625 if ((err
= sa_setup(zsb
->z_os
, sa_obj
,
1626 zfs_attr_table
, ZPL_END
, &zsb
->z_attr_table
)) != 0)
1629 if (zsb
->z_version
>= ZPL_VERSION_SA
)
1630 sa_register_update_callback(zsb
->z_os
,
1633 VERIFY(zfs_sb_setup(zsb
, B_FALSE
) == 0);
1635 zfs_set_fuid_feature(zsb
);
1636 zsb
->z_rollback_time
= jiffies
;
1639 * Attempt to re-establish all the active inodes with their
1640 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1641 * and mark it stale. This prevents a collision if a new
1642 * inode/object is created which must use the same inode
1643 * number. The stale inode will be be released when the
1644 * VFS prunes the dentry holding the remaining references
1645 * on the stale inode.
1647 mutex_enter(&zsb
->z_znodes_lock
);
1648 for (zp
= list_head(&zsb
->z_all_znodes
); zp
;
1649 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1650 err2
= zfs_rezget(zp
);
1652 remove_inode_hash(ZTOI(zp
));
1653 zp
->z_is_stale
= B_TRUE
;
1656 mutex_exit(&zsb
->z_znodes_lock
);
1659 /* release the VFS ops */
1660 rw_exit(&zsb
->z_teardown_inactive_lock
);
1661 rrm_exit(&zsb
->z_teardown_lock
, FTAG
);
1665 * Since we couldn't setup the sa framework, try to force
1666 * unmount this file system.
1669 (void) zfs_umount(zsb
->z_sb
);
1673 EXPORT_SYMBOL(zfs_resume_fs
);
1676 zfs_set_version(zfs_sb_t
*zsb
, uint64_t newvers
)
1679 objset_t
*os
= zsb
->z_os
;
1682 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
1683 return (SET_ERROR(EINVAL
));
1685 if (newvers
< zsb
->z_version
)
1686 return (SET_ERROR(EINVAL
));
1688 if (zfs_spa_version_map(newvers
) >
1689 spa_version(dmu_objset_spa(zsb
->z_os
)))
1690 return (SET_ERROR(ENOTSUP
));
1692 tx
= dmu_tx_create(os
);
1693 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
1694 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1695 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
1697 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1699 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1705 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
1706 8, 1, &newvers
, tx
);
1713 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1716 ASSERT3U(spa_version(dmu_objset_spa(zsb
->z_os
)), >=,
1718 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1719 DMU_OT_NONE
, 0, tx
);
1721 error
= zap_add(os
, MASTER_NODE_OBJ
,
1722 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1725 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
1726 sa_register_update_callback(os
, zfs_sa_upgrade
);
1729 spa_history_log_internal_ds(dmu_objset_ds(os
), "upgrade", tx
,
1730 "from %llu to %llu", zsb
->z_version
, newvers
);
1734 zsb
->z_version
= newvers
;
1736 zfs_set_fuid_feature(zsb
);
1740 EXPORT_SYMBOL(zfs_set_version
);
1743 * Read a property stored within the master node.
1746 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
1749 int error
= SET_ERROR(ENOENT
);
1752 * Look up the file system's value for the property. For the
1753 * version property, we look up a slightly different string.
1755 if (prop
== ZFS_PROP_VERSION
)
1756 pname
= ZPL_VERSION_STR
;
1758 pname
= zfs_prop_to_name(prop
);
1761 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
1763 if (error
== ENOENT
) {
1764 /* No value set, use the default value */
1766 case ZFS_PROP_VERSION
:
1767 *value
= ZPL_VERSION
;
1769 case ZFS_PROP_NORMALIZE
:
1770 case ZFS_PROP_UTF8ONLY
:
1774 *value
= ZFS_CASE_SENSITIVE
;
1776 case ZFS_PROP_ACLTYPE
:
1777 *value
= ZFS_ACLTYPE_OFF
;
1786 EXPORT_SYMBOL(zfs_get_zplprop
);
1793 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
1794 register_filesystem(&zpl_fs_type
);
1800 taskq_wait_outstanding(system_taskq
, 0);
1801 unregister_filesystem(&zpl_fs_type
);