4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
33 #include <sys/pathname.h>
34 #include <sys/vnode.h>
36 #include <sys/vfs_opreg.h>
37 #include <sys/mntent.h>
38 #include <sys/mount.h>
39 #include <sys/cmn_err.h>
40 #include "fs/fs_subr.h"
41 #include <sys/zfs_znode.h>
42 #include <sys/zfs_vnops.h>
43 #include <sys/zfs_dir.h>
45 #include <sys/fs/zfs.h>
47 #include <sys/dsl_prop.h>
48 #include <sys/dsl_dataset.h>
49 #include <sys/dsl_deleg.h>
53 #include <sys/sa_impl.h>
54 #include <sys/varargs.h>
55 #include <sys/policy.h>
56 #include <sys/atomic.h>
57 #include <sys/mkdev.h>
58 #include <sys/modctl.h>
59 #include <sys/refstr.h>
60 #include <sys/zfs_ioctl.h>
61 #include <sys/zfs_ctldir.h>
62 #include <sys/zfs_fuid.h>
63 #include <sys/bootconf.h>
64 #include <sys/sunddi.h>
66 #include <sys/dmu_objset.h>
67 #include <sys/spa_boot.h>
69 #include "zfs_comutil.h"
74 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
76 zfs_sb_t
*zsb
= sb
->s_fs_info
;
79 * Data integrity is job one. We don't want a compromised kernel
80 * writing to the storage pool, so we never sync during panic.
82 if (unlikely(oops_in_progress
))
86 * Semantically, the only requirement is that the sync be initiated.
87 * The DMU syncs out txgs frequently, so there's nothing to do.
94 * Sync a specific filesystem.
99 dp
= dmu_objset_pool(zsb
->z_os
);
102 * If the system is shutting down, then skip any
103 * filesystems which may exist on a suspended pool.
105 if (spa_suspended(dp
->dp_spa
)) {
110 if (zsb
->z_log
!= NULL
)
111 zil_commit(zsb
->z_log
, 0);
116 * Sync all ZFS filesystems. This is what happens when you
117 * run sync(1M). Unlike other filesystems, ZFS honors the
118 * request by waiting for all pools to commit all dirty data.
125 EXPORT_SYMBOL(zfs_sync
);
128 zfs_is_readonly(zfs_sb_t
*zsb
)
130 return (!!(zsb
->z_sb
->s_flags
& MS_RDONLY
));
132 EXPORT_SYMBOL(zfs_is_readonly
);
135 atime_changed_cb(void *arg
, uint64_t newval
)
137 ((zfs_sb_t
*)arg
)->z_atime
= newval
;
141 relatime_changed_cb(void *arg
, uint64_t newval
)
143 ((zfs_sb_t
*)arg
)->z_relatime
= newval
;
147 xattr_changed_cb(void *arg
, uint64_t newval
)
151 if (newval
== ZFS_XATTR_OFF
) {
152 zsb
->z_flags
&= ~ZSB_XATTR
;
154 zsb
->z_flags
|= ZSB_XATTR
;
156 if (newval
== ZFS_XATTR_SA
)
157 zsb
->z_xattr_sa
= B_TRUE
;
159 zsb
->z_xattr_sa
= B_FALSE
;
164 acltype_changed_cb(void *arg
, uint64_t newval
)
169 case ZFS_ACLTYPE_OFF
:
170 zsb
->z_acl_type
= ZFS_ACLTYPE_OFF
;
171 zsb
->z_sb
->s_flags
&= ~MS_POSIXACL
;
173 case ZFS_ACLTYPE_POSIXACL
:
174 #ifdef CONFIG_FS_POSIX_ACL
175 zsb
->z_acl_type
= ZFS_ACLTYPE_POSIXACL
;
176 zsb
->z_sb
->s_flags
|= MS_POSIXACL
;
178 zsb
->z_acl_type
= ZFS_ACLTYPE_OFF
;
179 zsb
->z_sb
->s_flags
&= ~MS_POSIXACL
;
180 #endif /* CONFIG_FS_POSIX_ACL */
188 blksz_changed_cb(void *arg
, uint64_t newval
)
192 if (newval
< SPA_MINBLOCKSIZE
||
193 newval
> SPA_MAXBLOCKSIZE
|| !ISP2(newval
))
194 newval
= SPA_MAXBLOCKSIZE
;
196 zsb
->z_max_blksz
= newval
;
200 readonly_changed_cb(void *arg
, uint64_t newval
)
203 struct super_block
*sb
= zsb
->z_sb
;
209 sb
->s_flags
|= MS_RDONLY
;
211 sb
->s_flags
&= ~MS_RDONLY
;
215 devices_changed_cb(void *arg
, uint64_t newval
)
220 setuid_changed_cb(void *arg
, uint64_t newval
)
225 exec_changed_cb(void *arg
, uint64_t newval
)
230 nbmand_changed_cb(void *arg
, uint64_t newval
)
233 struct super_block
*sb
= zsb
->z_sb
;
239 sb
->s_flags
|= MS_MANDLOCK
;
241 sb
->s_flags
&= ~MS_MANDLOCK
;
245 snapdir_changed_cb(void *arg
, uint64_t newval
)
247 ((zfs_sb_t
*)arg
)->z_show_ctldir
= newval
;
251 vscan_changed_cb(void *arg
, uint64_t newval
)
253 ((zfs_sb_t
*)arg
)->z_vscan
= newval
;
257 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
259 ((zfs_sb_t
*)arg
)->z_acl_inherit
= newval
;
263 zfs_register_callbacks(zfs_sb_t
*zsb
)
265 struct dsl_dataset
*ds
= NULL
;
266 objset_t
*os
= zsb
->z_os
;
267 boolean_t do_readonly
= B_FALSE
;
270 if (zfs_is_readonly(zsb
) || !spa_writeable(dmu_objset_spa(os
)))
271 do_readonly
= B_TRUE
;
274 * Register property callbacks.
276 * It would probably be fine to just check for i/o error from
277 * the first prop_register(), but I guess I like to go
280 ds
= dmu_objset_ds(os
);
281 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
282 error
= dsl_prop_register(ds
,
283 zfs_prop_to_name(ZFS_PROP_ATIME
), atime_changed_cb
, zsb
);
284 error
= error
? error
: dsl_prop_register(ds
,
285 zfs_prop_to_name(ZFS_PROP_RELATIME
), relatime_changed_cb
, zsb
);
286 error
= error
? error
: dsl_prop_register(ds
,
287 zfs_prop_to_name(ZFS_PROP_XATTR
), xattr_changed_cb
, zsb
);
288 error
= error
? error
: dsl_prop_register(ds
,
289 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), blksz_changed_cb
, zsb
);
290 error
= error
? error
: dsl_prop_register(ds
,
291 zfs_prop_to_name(ZFS_PROP_READONLY
), readonly_changed_cb
, zsb
);
292 error
= error
? error
: dsl_prop_register(ds
,
293 zfs_prop_to_name(ZFS_PROP_DEVICES
), devices_changed_cb
, zsb
);
294 error
= error
? error
: dsl_prop_register(ds
,
295 zfs_prop_to_name(ZFS_PROP_SETUID
), setuid_changed_cb
, zsb
);
296 error
= error
? error
: dsl_prop_register(ds
,
297 zfs_prop_to_name(ZFS_PROP_EXEC
), exec_changed_cb
, zsb
);
298 error
= error
? error
: dsl_prop_register(ds
,
299 zfs_prop_to_name(ZFS_PROP_SNAPDIR
), snapdir_changed_cb
, zsb
);
300 error
= error
? error
: dsl_prop_register(ds
,
301 zfs_prop_to_name(ZFS_PROP_ACLTYPE
), acltype_changed_cb
, zsb
);
302 error
= error
? error
: dsl_prop_register(ds
,
303 zfs_prop_to_name(ZFS_PROP_ACLINHERIT
), acl_inherit_changed_cb
, zsb
);
304 error
= error
? error
: dsl_prop_register(ds
,
305 zfs_prop_to_name(ZFS_PROP_VSCAN
), vscan_changed_cb
, zsb
);
306 error
= error
? error
: dsl_prop_register(ds
,
307 zfs_prop_to_name(ZFS_PROP_NBMAND
), nbmand_changed_cb
, zsb
);
308 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
313 readonly_changed_cb(zsb
, B_TRUE
);
319 * We may attempt to unregister some callbacks that are not
320 * registered, but this is OK; it will simply return ENOMSG,
321 * which we will ignore.
323 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_ATIME
),
324 atime_changed_cb
, zsb
);
325 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_RELATIME
),
326 relatime_changed_cb
, zsb
);
327 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_XATTR
),
328 xattr_changed_cb
, zsb
);
329 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_RECORDSIZE
),
330 blksz_changed_cb
, zsb
);
331 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_READONLY
),
332 readonly_changed_cb
, zsb
);
333 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_DEVICES
),
334 devices_changed_cb
, zsb
);
335 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_SETUID
),
336 setuid_changed_cb
, zsb
);
337 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_EXEC
),
338 exec_changed_cb
, zsb
);
339 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_SNAPDIR
),
340 snapdir_changed_cb
, zsb
);
341 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_ACLTYPE
),
342 acltype_changed_cb
, zsb
);
343 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_ACLINHERIT
),
344 acl_inherit_changed_cb
, zsb
);
345 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_VSCAN
),
346 vscan_changed_cb
, zsb
);
347 (void) dsl_prop_unregister(ds
, zfs_prop_to_name(ZFS_PROP_NBMAND
),
348 nbmand_changed_cb
, zsb
);
352 EXPORT_SYMBOL(zfs_register_callbacks
);
355 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
356 uint64_t *userp
, uint64_t *groupp
)
359 * Is it a valid type of object to track?
361 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
362 return (SET_ERROR(ENOENT
));
365 * If we have a NULL data pointer
366 * then assume the id's aren't changing and
367 * return EEXIST to the dmu to let it know to
371 return (SET_ERROR(EEXIST
));
373 if (bonustype
== DMU_OT_ZNODE
) {
374 znode_phys_t
*znp
= data
;
375 *userp
= znp
->zp_uid
;
376 *groupp
= znp
->zp_gid
;
379 sa_hdr_phys_t
*sap
= data
;
380 sa_hdr_phys_t sa
= *sap
;
381 boolean_t swap
= B_FALSE
;
383 ASSERT(bonustype
== DMU_OT_SA
);
385 if (sa
.sa_magic
== 0) {
387 * This should only happen for newly created
388 * files that haven't had the znode data filled
395 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
396 sa
.sa_magic
= SA_MAGIC
;
397 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
400 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
403 hdrsize
= sa_hdrsize(&sa
);
404 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
405 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
407 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
410 *userp
= BSWAP_64(*userp
);
411 *groupp
= BSWAP_64(*groupp
);
418 fuidstr_to_sid(zfs_sb_t
*zsb
, const char *fuidstr
,
419 char *domainbuf
, int buflen
, uid_t
*ridp
)
424 fuid
= strtonum(fuidstr
, NULL
);
426 domain
= zfs_fuid_find_by_idx(zsb
, FUID_INDEX(fuid
));
428 (void) strlcpy(domainbuf
, domain
, buflen
);
431 *ridp
= FUID_RID(fuid
);
435 zfs_userquota_prop_to_obj(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
)
438 case ZFS_PROP_USERUSED
:
439 return (DMU_USERUSED_OBJECT
);
440 case ZFS_PROP_GROUPUSED
:
441 return (DMU_GROUPUSED_OBJECT
);
442 case ZFS_PROP_USERQUOTA
:
443 return (zsb
->z_userquota_obj
);
444 case ZFS_PROP_GROUPQUOTA
:
445 return (zsb
->z_groupquota_obj
);
447 return (SET_ERROR(ENOTSUP
));
453 zfs_userspace_many(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
454 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
459 zfs_useracct_t
*buf
= vbuf
;
462 if (!dmu_objset_userspace_present(zsb
->z_os
))
463 return (SET_ERROR(ENOTSUP
));
465 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
471 for (zap_cursor_init_serialized(&zc
, zsb
->z_os
, obj
, *cookiep
);
472 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
473 zap_cursor_advance(&zc
)) {
474 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
478 fuidstr_to_sid(zsb
, za
.za_name
,
479 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
481 buf
->zu_space
= za
.za_first_integer
;
487 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
488 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
489 *cookiep
= zap_cursor_serialize(&zc
);
490 zap_cursor_fini(&zc
);
493 EXPORT_SYMBOL(zfs_userspace_many
);
496 * buf must be big enough (eg, 32 bytes)
499 id_to_fuidstr(zfs_sb_t
*zsb
, const char *domain
, uid_t rid
,
500 char *buf
, boolean_t addok
)
505 if (domain
&& domain
[0]) {
506 domainid
= zfs_fuid_find_by_domain(zsb
, domain
, NULL
, addok
);
508 return (SET_ERROR(ENOENT
));
510 fuid
= FUID_ENCODE(domainid
, rid
);
511 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
516 zfs_userspace_one(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
517 const char *domain
, uint64_t rid
, uint64_t *valp
)
525 if (!dmu_objset_userspace_present(zsb
->z_os
))
526 return (SET_ERROR(ENOTSUP
));
528 obj
= zfs_userquota_prop_to_obj(zsb
, type
);
532 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_FALSE
);
536 err
= zap_lookup(zsb
->z_os
, obj
, buf
, 8, 1, valp
);
541 EXPORT_SYMBOL(zfs_userspace_one
);
544 zfs_set_userquota(zfs_sb_t
*zsb
, zfs_userquota_prop_t type
,
545 const char *domain
, uint64_t rid
, uint64_t quota
)
551 boolean_t fuid_dirtied
;
553 if (type
!= ZFS_PROP_USERQUOTA
&& type
!= ZFS_PROP_GROUPQUOTA
)
554 return (SET_ERROR(EINVAL
));
556 if (zsb
->z_version
< ZPL_VERSION_USERSPACE
)
557 return (SET_ERROR(ENOTSUP
));
559 objp
= (type
== ZFS_PROP_USERQUOTA
) ? &zsb
->z_userquota_obj
:
560 &zsb
->z_groupquota_obj
;
562 err
= id_to_fuidstr(zsb
, domain
, rid
, buf
, B_TRUE
);
565 fuid_dirtied
= zsb
->z_fuid_dirty
;
567 tx
= dmu_tx_create(zsb
->z_os
);
568 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
570 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
571 zfs_userquota_prop_prefixes
[type
]);
574 zfs_fuid_txhold(zsb
, tx
);
575 err
= dmu_tx_assign(tx
, TXG_WAIT
);
581 mutex_enter(&zsb
->z_lock
);
583 *objp
= zap_create(zsb
->z_os
, DMU_OT_USERGROUP_QUOTA
,
585 VERIFY(0 == zap_add(zsb
->z_os
, MASTER_NODE_OBJ
,
586 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
588 mutex_exit(&zsb
->z_lock
);
591 err
= zap_remove(zsb
->z_os
, *objp
, buf
, tx
);
595 err
= zap_update(zsb
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
599 zfs_fuid_sync(zsb
, tx
);
603 EXPORT_SYMBOL(zfs_set_userquota
);
606 zfs_fuid_overquota(zfs_sb_t
*zsb
, boolean_t isgroup
, uint64_t fuid
)
609 uint64_t used
, quota
, usedobj
, quotaobj
;
612 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
613 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
615 if (quotaobj
== 0 || zsb
->z_replay
)
618 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
619 err
= zap_lookup(zsb
->z_os
, quotaobj
, buf
, 8, 1, "a
);
623 err
= zap_lookup(zsb
->z_os
, usedobj
, buf
, 8, 1, &used
);
626 return (used
>= quota
);
628 EXPORT_SYMBOL(zfs_fuid_overquota
);
631 zfs_owner_overquota(zfs_sb_t
*zsb
, znode_t
*zp
, boolean_t isgroup
)
636 quotaobj
= isgroup
? zsb
->z_groupquota_obj
: zsb
->z_userquota_obj
;
638 fuid
= isgroup
? zp
->z_gid
: zp
->z_uid
;
640 if (quotaobj
== 0 || zsb
->z_replay
)
643 return (zfs_fuid_overquota(zsb
, isgroup
, fuid
));
645 EXPORT_SYMBOL(zfs_owner_overquota
);
648 zfs_sb_create(const char *osname
, zfs_sb_t
**zsbp
)
656 zsb
= kmem_zalloc(sizeof (zfs_sb_t
), KM_SLEEP
);
659 * We claim to always be readonly so we can open snapshots;
660 * other ZPL code will prevent us from writing to snapshots.
662 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_TRUE
, zsb
, &os
);
664 kmem_free(zsb
, sizeof (zfs_sb_t
));
669 * Initialize the zfs-specific filesystem structure.
670 * Should probably make this a kmem cache, shuffle fields,
671 * and just bzero up to z_hold_mtx[].
675 zsb
->z_max_blksz
= SPA_MAXBLOCKSIZE
;
676 zsb
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
679 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zsb
->z_version
);
682 } else if (zsb
->z_version
> ZPL_VERSION
) {
683 error
= SET_ERROR(ENOTSUP
);
686 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &zval
)) != 0)
688 zsb
->z_norm
= (int)zval
;
690 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &zval
)) != 0)
692 zsb
->z_utf8
= (zval
!= 0);
694 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &zval
)) != 0)
696 zsb
->z_case
= (uint_t
)zval
;
698 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_ACLTYPE
, &zval
)) != 0)
700 zsb
->z_acl_type
= (uint_t
)zval
;
703 * Fold case on file systems that are always or sometimes case
706 if (zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
707 zsb
->z_case
== ZFS_CASE_MIXED
)
708 zsb
->z_norm
|= U8_TEXTPREP_TOUPPER
;
710 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
711 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
714 /* should either have both of these objects or none */
715 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
720 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &zval
);
721 if ((error
== 0) && (zval
== ZFS_XATTR_SA
))
722 zsb
->z_xattr_sa
= B_TRUE
;
725 * Pre SA versions file systems should never touch
726 * either the attribute registration or layout objects.
731 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
736 if (zsb
->z_version
>= ZPL_VERSION_SA
)
737 sa_register_update_callback(os
, zfs_sa_upgrade
);
739 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
743 ASSERT(zsb
->z_root
!= 0);
745 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
746 &zsb
->z_unlinkedobj
);
750 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
751 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
752 8, 1, &zsb
->z_userquota_obj
);
753 if (error
&& error
!= ENOENT
)
756 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
757 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
758 8, 1, &zsb
->z_groupquota_obj
);
759 if (error
&& error
!= ENOENT
)
762 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
764 if (error
&& error
!= ENOENT
)
767 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
769 if (error
&& error
!= ENOENT
)
772 mutex_init(&zsb
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
773 mutex_init(&zsb
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
774 list_create(&zsb
->z_all_znodes
, sizeof (znode_t
),
775 offsetof(znode_t
, z_link_node
));
776 rrw_init(&zsb
->z_teardown_lock
, B_FALSE
);
777 rw_init(&zsb
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
778 rw_init(&zsb
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
780 zsb
->z_hold_mtx
= vmem_zalloc(sizeof (kmutex_t
) * ZFS_OBJ_MTX_SZ
,
782 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
783 mutex_init(&zsb
->z_hold_mtx
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
785 avl_create(&zsb
->z_ctldir_snaps
, snapentry_compare
,
786 sizeof (zfs_snapentry_t
), offsetof(zfs_snapentry_t
, se_node
));
787 mutex_init(&zsb
->z_ctldir_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
793 dmu_objset_disown(os
, zsb
);
796 vmem_free(zsb
->z_hold_mtx
, sizeof (kmutex_t
) * ZFS_OBJ_MTX_SZ
);
797 kmem_free(zsb
, sizeof (zfs_sb_t
));
800 EXPORT_SYMBOL(zfs_sb_create
);
803 zfs_sb_setup(zfs_sb_t
*zsb
, boolean_t mounting
)
807 error
= zfs_register_callbacks(zsb
);
812 * Set the objset user_ptr to track its zsb.
814 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
815 dmu_objset_set_user(zsb
->z_os
, zsb
);
816 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
818 zsb
->z_log
= zil_open(zsb
->z_os
, zfs_get_data
);
821 * If we are not mounting (ie: online recv), then we don't
822 * have to worry about replaying the log as we blocked all
823 * operations out since we closed the ZIL.
829 * During replay we remove the read only flag to
830 * allow replays to succeed.
832 readonly
= zfs_is_readonly(zsb
);
834 readonly_changed_cb(zsb
, B_FALSE
);
836 zfs_unlinked_drain(zsb
);
839 * Parse and replay the intent log.
841 * Because of ziltest, this must be done after
842 * zfs_unlinked_drain(). (Further note: ziltest
843 * doesn't use readonly mounts, where
844 * zfs_unlinked_drain() isn't called.) This is because
845 * ziltest causes spa_sync() to think it's committed,
846 * but actually it is not, so the intent log contains
847 * many txg's worth of changes.
849 * In particular, if object N is in the unlinked set in
850 * the last txg to actually sync, then it could be
851 * actually freed in a later txg and then reallocated
852 * in a yet later txg. This would write a "create
853 * object N" record to the intent log. Normally, this
854 * would be fine because the spa_sync() would have
855 * written out the fact that object N is free, before
856 * we could write the "create object N" intent log
859 * But when we are in ziltest mode, we advance the "open
860 * txg" without actually spa_sync()-ing the changes to
861 * disk. So we would see that object N is still
862 * allocated and in the unlinked set, and there is an
863 * intent log record saying to allocate it.
865 if (spa_writeable(dmu_objset_spa(zsb
->z_os
))) {
866 if (zil_replay_disable
) {
867 zil_destroy(zsb
->z_log
, B_FALSE
);
869 zsb
->z_replay
= B_TRUE
;
870 zil_replay(zsb
->z_os
, zsb
,
872 zsb
->z_replay
= B_FALSE
;
876 /* restore readonly bit */
878 readonly_changed_cb(zsb
, B_TRUE
);
883 EXPORT_SYMBOL(zfs_sb_setup
);
886 zfs_sb_free(zfs_sb_t
*zsb
)
890 zfs_fuid_destroy(zsb
);
892 mutex_destroy(&zsb
->z_znodes_lock
);
893 mutex_destroy(&zsb
->z_lock
);
894 list_destroy(&zsb
->z_all_znodes
);
895 rrw_destroy(&zsb
->z_teardown_lock
);
896 rw_destroy(&zsb
->z_teardown_inactive_lock
);
897 rw_destroy(&zsb
->z_fuid_lock
);
898 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
899 mutex_destroy(&zsb
->z_hold_mtx
[i
]);
900 vmem_free(zsb
->z_hold_mtx
, sizeof (kmutex_t
) * ZFS_OBJ_MTX_SZ
);
901 mutex_destroy(&zsb
->z_ctldir_lock
);
902 avl_destroy(&zsb
->z_ctldir_snaps
);
903 kmem_free(zsb
, sizeof (zfs_sb_t
));
905 EXPORT_SYMBOL(zfs_sb_free
);
908 zfs_set_fuid_feature(zfs_sb_t
*zsb
)
910 zsb
->z_use_fuids
= USE_FUIDS(zsb
->z_version
, zsb
->z_os
);
911 zsb
->z_use_sa
= USE_SA(zsb
->z_version
, zsb
->z_os
);
915 zfs_unregister_callbacks(zfs_sb_t
*zsb
)
917 objset_t
*os
= zsb
->z_os
;
918 struct dsl_dataset
*ds
;
921 * Unregister properties.
923 if (!dmu_objset_is_snapshot(os
)) {
924 ds
= dmu_objset_ds(os
);
925 VERIFY(dsl_prop_unregister(ds
, "atime", atime_changed_cb
,
928 VERIFY(dsl_prop_unregister(ds
, "relatime", relatime_changed_cb
,
931 VERIFY(dsl_prop_unregister(ds
, "xattr", xattr_changed_cb
,
934 VERIFY(dsl_prop_unregister(ds
, "recordsize", blksz_changed_cb
,
937 VERIFY(dsl_prop_unregister(ds
, "readonly", readonly_changed_cb
,
940 VERIFY(dsl_prop_unregister(ds
, "devices", devices_changed_cb
,
943 VERIFY(dsl_prop_unregister(ds
, "setuid", setuid_changed_cb
,
946 VERIFY(dsl_prop_unregister(ds
, "exec", exec_changed_cb
,
949 VERIFY(dsl_prop_unregister(ds
, "snapdir", snapdir_changed_cb
,
952 VERIFY(dsl_prop_unregister(ds
, "acltype", acltype_changed_cb
,
955 VERIFY(dsl_prop_unregister(ds
, "aclinherit",
956 acl_inherit_changed_cb
, zsb
) == 0);
958 VERIFY(dsl_prop_unregister(ds
, "vscan",
959 vscan_changed_cb
, zsb
) == 0);
961 VERIFY(dsl_prop_unregister(ds
, "nbmand",
962 nbmand_changed_cb
, zsb
) == 0);
965 EXPORT_SYMBOL(zfs_unregister_callbacks
);
969 * Check that the hex label string is appropriate for the dataset being
970 * mounted into the global_zone proper.
972 * Return an error if the hex label string is not default or
973 * admin_low/admin_high. For admin_low labels, the corresponding
974 * dataset must be readonly.
977 zfs_check_global_label(const char *dsname
, const char *hexsl
)
979 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
981 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
983 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
984 /* must be readonly */
987 if (dsl_prop_get_integer(dsname
,
988 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
989 return (SET_ERROR(EACCES
));
990 return (rdonly
? 0 : EACCES
);
992 return (SET_ERROR(EACCES
));
994 EXPORT_SYMBOL(zfs_check_global_label
);
995 #endif /* HAVE_MLSLABEL */
998 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
1000 zfs_sb_t
*zsb
= dentry
->d_sb
->s_fs_info
;
1001 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
1007 dmu_objset_space(zsb
->z_os
,
1008 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
1010 fsid
= dmu_objset_fsid_guid(zsb
->z_os
);
1012 * The underlying storage pool actually uses multiple block
1013 * size. Under Solaris frsize (fragment size) is reported as
1014 * the smallest block size we support, and bsize (block size)
1015 * as the filesystem's maximum block size. Unfortunately,
1016 * under Linux the fragment size and block size are often used
1017 * interchangeably. Thus we are forced to report both of them
1018 * as the filesystem's maximum block size.
1020 statp
->f_frsize
= zsb
->z_max_blksz
;
1021 statp
->f_bsize
= zsb
->z_max_blksz
;
1022 bshift
= fls(statp
->f_bsize
) - 1;
1025 * The following report "total" blocks of various kinds in
1026 * the file system, but reported in terms of f_bsize - the
1030 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
1031 statp
->f_bfree
= availbytes
>> bshift
;
1032 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
1035 * statvfs() should really be called statufs(), because it assumes
1036 * static metadata. ZFS doesn't preallocate files, so the best
1037 * we can do is report the max that could possibly fit in f_files,
1038 * and that minus the number actually used in f_ffree.
1039 * For f_ffree, report the smaller of the number of object available
1040 * and the number of blocks (each object will take at least a block).
1042 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
1043 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
1044 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
1045 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
1046 statp
->f_type
= ZFS_SUPER_MAGIC
;
1047 statp
->f_namelen
= ZFS_MAXNAMELEN
;
1050 * We have all of 40 characters to stuff a string here.
1051 * Is there anything useful we could/should provide?
1053 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
1058 EXPORT_SYMBOL(zfs_statvfs
);
1061 zfs_root(zfs_sb_t
*zsb
, struct inode
**ipp
)
1068 error
= zfs_zget(zsb
, zsb
->z_root
, &rootzp
);
1070 *ipp
= ZTOI(rootzp
);
1075 EXPORT_SYMBOL(zfs_root
);
1078 * The ARC has requested that the filesystem drop entries from the dentry
1079 * and inode caches. This can occur when the ARC needs to free meta data
1080 * blocks but can't because they are all pinned by entries in these caches.
1083 zfs_sb_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1085 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1087 #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1088 struct shrinker
*shrinker
= &sb
->s_shrink
;
1089 struct shrink_control sc
= {
1090 .nr_to_scan
= nr_to_scan
,
1091 .gfp_mask
= GFP_KERNEL
,
1097 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1098 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1099 #elif defined(HAVE_SHRINK)
1100 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1103 * Linux kernels older than 3.1 do not support a per-filesystem
1104 * shrinker. Therefore, we must fall back to the only available
1105 * interface which is to discard all unused dentries and inodes.
1106 * This behavior clearly isn't ideal but it's required so the ARC
1107 * may free memory. The performance impact is mitigated by the
1108 * fact that the frequently accessed dentry and inode buffers will
1109 * still be in the ARC making them relatively cheap to recreate.
1112 shrink_dcache_parent(sb
->s_root
);
1116 dprintf_ds(zsb
->z_os
->os_dsl_dataset
,
1117 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1118 nr_to_scan
, *objects
, error
);
1122 EXPORT_SYMBOL(zfs_sb_prune
);
1125 * Teardown the zfs_sb_t.
1127 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1128 * and 'z_teardown_inactive_lock' held.
1131 zfs_sb_teardown(zfs_sb_t
*zsb
, boolean_t unmounting
)
1136 * If someone has not already unmounted this file system,
1137 * drain the iput_taskq to ensure all active references to the
1138 * zfs_sb_t have been handled only then can it be safely destroyed.
1142 * If we're unmounting we have to wait for the list to
1145 * If we're not unmounting there's no guarantee the list
1146 * will drain completely, but iputs run from the taskq
1147 * may add the parents of dir-based xattrs to the taskq
1148 * so we want to wait for these.
1150 * We can safely read z_nr_znodes without locking because the
1151 * VFS has already blocked operations which add to the
1152 * z_all_znodes list and thus increment z_nr_znodes.
1155 while (zsb
->z_nr_znodes
> 0) {
1156 taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(
1158 if (++round
> 1 && !unmounting
)
1163 rrw_enter(&zsb
->z_teardown_lock
, RW_WRITER
, FTAG
);
1167 * We purge the parent filesystem's super block as the
1168 * parent filesystem and all of its snapshots have their
1169 * inode's super block set to the parent's filesystem's
1170 * super block. Note, 'z_parent' is self referential
1171 * for non-snapshots.
1173 shrink_dcache_sb(zsb
->z_parent
->z_sb
);
1177 * Close the zil. NB: Can't close the zil while zfs_inactive
1178 * threads are blocked as zil_close can call zfs_inactive.
1181 zil_close(zsb
->z_log
);
1185 rw_enter(&zsb
->z_teardown_inactive_lock
, RW_WRITER
);
1188 * If we are not unmounting (ie: online recv) and someone already
1189 * unmounted this file system while we were doing the switcheroo,
1190 * or a reopen of z_os failed then just bail out now.
1192 if (!unmounting
&& (zsb
->z_unmounted
|| zsb
->z_os
== NULL
)) {
1193 rw_exit(&zsb
->z_teardown_inactive_lock
);
1194 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1195 return (SET_ERROR(EIO
));
1199 * At this point there are no VFS ops active, and any new VFS ops
1200 * will fail with EIO since we have z_teardown_lock for writer (only
1201 * relevant for forced unmount).
1203 * Release all holds on dbufs.
1206 mutex_enter(&zsb
->z_znodes_lock
);
1207 for (zp
= list_head(&zsb
->z_all_znodes
); zp
!= NULL
;
1208 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1210 zfs_znode_dmu_fini(zp
);
1212 mutex_exit(&zsb
->z_znodes_lock
);
1216 * If we are unmounting, set the unmounted flag and let new VFS ops
1217 * unblock. zfs_inactive will have the unmounted behavior, and all
1218 * other VFS ops will fail with EIO.
1221 zsb
->z_unmounted
= B_TRUE
;
1222 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1223 rw_exit(&zsb
->z_teardown_inactive_lock
);
1227 * z_os will be NULL if there was an error in attempting to reopen
1228 * zsb, so just return as the properties had already been
1230 * unregistered and cached data had been evicted before.
1232 if (zsb
->z_os
== NULL
)
1236 * Unregister properties.
1238 zfs_unregister_callbacks(zsb
);
1243 if (dsl_dataset_is_dirty(dmu_objset_ds(zsb
->z_os
)) &&
1244 !zfs_is_readonly(zsb
))
1245 txg_wait_synced(dmu_objset_pool(zsb
->z_os
), 0);
1246 dmu_objset_evict_dbufs(zsb
->z_os
);
1250 EXPORT_SYMBOL(zfs_sb_teardown
);
1252 #if !defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER) && \
1253 !defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
1254 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1258 zfs_domount(struct super_block
*sb
, void *data
, int silent
)
1260 zpl_mount_data_t
*zmd
= data
;
1261 const char *osname
= zmd
->z_osname
;
1263 struct inode
*root_inode
;
1264 uint64_t recordsize
;
1267 error
= zfs_sb_create(osname
, &zsb
);
1271 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1272 &recordsize
, NULL
)))
1276 sb
->s_fs_info
= zsb
;
1277 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1278 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1279 sb
->s_time_gran
= 1;
1280 sb
->s_blocksize
= recordsize
;
1281 sb
->s_blocksize_bits
= ilog2(recordsize
);
1282 zsb
->z_bdi
.ra_pages
= 0;
1283 sb
->s_bdi
= &zsb
->z_bdi
;
1285 error
= -zpl_bdi_setup_and_register(&zsb
->z_bdi
, "zfs");
1289 /* Set callback operations for the file system. */
1290 sb
->s_op
= &zpl_super_operations
;
1291 sb
->s_xattr
= zpl_xattr_handlers
;
1292 sb
->s_export_op
= &zpl_export_operations
;
1294 sb
->s_d_op
= &zpl_dentry_operations
;
1295 #endif /* HAVE_S_D_OP */
1297 /* Set features for file system. */
1298 zfs_set_fuid_feature(zsb
);
1300 if (dmu_objset_is_snapshot(zsb
->z_os
)) {
1303 atime_changed_cb(zsb
, B_FALSE
);
1304 readonly_changed_cb(zsb
, B_TRUE
);
1305 if ((error
= dsl_prop_get_integer(osname
,
1306 "xattr", &pval
, NULL
)))
1308 xattr_changed_cb(zsb
, pval
);
1309 if ((error
= dsl_prop_get_integer(osname
,
1310 "acltype", &pval
, NULL
)))
1312 acltype_changed_cb(zsb
, pval
);
1313 zsb
->z_issnap
= B_TRUE
;
1314 zsb
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1316 mutex_enter(&zsb
->z_os
->os_user_ptr_lock
);
1317 dmu_objset_set_user(zsb
->z_os
, zsb
);
1318 mutex_exit(&zsb
->z_os
->os_user_ptr_lock
);
1320 error
= zfs_sb_setup(zsb
, B_TRUE
);
1323 /* Allocate a root inode for the filesystem. */
1324 error
= zfs_root(zsb
, &root_inode
);
1326 (void) zfs_umount(sb
);
1330 /* Allocate a root dentry for the filesystem */
1331 sb
->s_root
= d_make_root(root_inode
);
1332 if (sb
->s_root
== NULL
) {
1333 (void) zfs_umount(sb
);
1334 error
= SET_ERROR(ENOMEM
);
1341 zsb
->z_arc_prune
= arc_add_prune_callback(zpl_prune_sb
, sb
);
1344 dmu_objset_disown(zsb
->z_os
, zsb
);
1350 EXPORT_SYMBOL(zfs_domount
);
1353 * Called when an unmount is requested and certain sanity checks have
1354 * already passed. At this point no dentries or inodes have been reclaimed
1355 * from their respective caches. We drop the extra reference on the .zfs
1356 * control directory to allow everything to be reclaimed. All snapshots
1357 * must already have been unmounted to reach this point.
1360 zfs_preumount(struct super_block
*sb
)
1362 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1364 if (zsb
!= NULL
&& zsb
->z_ctldir
!= NULL
)
1365 zfsctl_destroy(zsb
);
1367 EXPORT_SYMBOL(zfs_preumount
);
1370 * Called once all other unmount released tear down has occurred.
1371 * It is our responsibility to release any remaining infrastructure.
1375 zfs_umount(struct super_block
*sb
)
1377 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1380 arc_remove_prune_callback(zsb
->z_arc_prune
);
1381 VERIFY(zfs_sb_teardown(zsb
, B_TRUE
) == 0);
1383 bdi_destroy(sb
->s_bdi
);
1386 * z_os will be NULL if there was an error in
1387 * attempting to reopen zsb.
1391 * Unset the objset user_ptr.
1393 mutex_enter(&os
->os_user_ptr_lock
);
1394 dmu_objset_set_user(os
, NULL
);
1395 mutex_exit(&os
->os_user_ptr_lock
);
1398 * Finally release the objset
1400 dmu_objset_disown(os
, zsb
);
1406 EXPORT_SYMBOL(zfs_umount
);
1409 zfs_remount(struct super_block
*sb
, int *flags
, char *data
)
1412 * All namespace flags (MNT_*) and super block flags (MS_*) will
1413 * be handled by the Linux VFS. Only handle custom options here.
1417 EXPORT_SYMBOL(zfs_remount
);
1420 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1422 zfs_sb_t
*zsb
= sb
->s_fs_info
;
1424 uint64_t object
= 0;
1425 uint64_t fid_gen
= 0;
1434 if (fidp
->fid_len
== LONG_FID_LEN
) {
1435 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1436 uint64_t objsetid
= 0;
1437 uint64_t setgen
= 0;
1439 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1440 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1442 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1443 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1447 err
= zfsctl_lookup_objset(sb
, objsetid
, &zsb
);
1449 return (SET_ERROR(EINVAL
));
1454 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1455 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1457 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1458 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1460 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1461 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1464 return (SET_ERROR(EINVAL
));
1467 /* A zero fid_gen means we are in the .zfs control directories */
1469 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
1470 *ipp
= zsb
->z_ctldir
;
1471 ASSERT(*ipp
!= NULL
);
1472 if (object
== ZFSCTL_INO_SNAPDIR
) {
1473 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
1474 0, kcred
, NULL
, NULL
) == 0);
1482 gen_mask
= -1ULL >> (64 - 8 * i
);
1484 dprintf("getting %llu [%llu mask %llx]\n", object
, fid_gen
, gen_mask
);
1485 if ((err
= zfs_zget(zsb
, object
, &zp
))) {
1489 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
), &zp_gen
,
1491 zp_gen
= zp_gen
& gen_mask
;
1494 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1495 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen
,
1499 return (SET_ERROR(EINVAL
));
1504 zfs_inode_update(ITOZ(*ipp
));
1509 EXPORT_SYMBOL(zfs_vget
);
1512 * Block out VFS ops and close zfs_sb_t
1514 * Note, if successful, then we return with the 'z_teardown_lock' and
1515 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1516 * dataset and objset intact so that they can be atomically handed off during
1517 * a subsequent rollback or recv operation and the resume thereafter.
1520 zfs_suspend_fs(zfs_sb_t
*zsb
)
1524 if ((error
= zfs_sb_teardown(zsb
, B_FALSE
)) != 0)
1529 EXPORT_SYMBOL(zfs_suspend_fs
);
1532 * Reopen zfs_sb_t and release VFS ops.
1535 zfs_resume_fs(zfs_sb_t
*zsb
, const char *osname
)
1539 uint64_t sa_obj
= 0;
1541 ASSERT(RRW_WRITE_HELD(&zsb
->z_teardown_lock
));
1542 ASSERT(RW_WRITE_HELD(&zsb
->z_teardown_inactive_lock
));
1545 * We already own this, so just hold and rele it to update the
1546 * objset_t, as the one we had before may have been evicted.
1548 VERIFY0(dmu_objset_hold(osname
, zsb
, &zsb
->z_os
));
1549 VERIFY3P(zsb
->z_os
->os_dsl_dataset
->ds_owner
, ==, zsb
);
1550 VERIFY(dsl_dataset_long_held(zsb
->z_os
->os_dsl_dataset
));
1551 dmu_objset_rele(zsb
->z_os
, zsb
);
1554 * Make sure version hasn't changed
1557 err
= zfs_get_zplprop(zsb
->z_os
, ZFS_PROP_VERSION
,
1563 err
= zap_lookup(zsb
->z_os
, MASTER_NODE_OBJ
,
1564 ZFS_SA_ATTRS
, 8, 1, &sa_obj
);
1566 if (err
&& zsb
->z_version
>= ZPL_VERSION_SA
)
1569 if ((err
= sa_setup(zsb
->z_os
, sa_obj
,
1570 zfs_attr_table
, ZPL_END
, &zsb
->z_attr_table
)) != 0)
1573 if (zsb
->z_version
>= ZPL_VERSION_SA
)
1574 sa_register_update_callback(zsb
->z_os
,
1577 VERIFY(zfs_sb_setup(zsb
, B_FALSE
) == 0);
1579 zfs_set_fuid_feature(zsb
);
1580 zsb
->z_rollback_time
= jiffies
;
1583 * Attempt to re-establish all the active inodes with their
1584 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1585 * and mark it stale. This prevents a collision if a new
1586 * inode/object is created which must use the same inode
1587 * number. The stale inode will be be released when the
1588 * VFS prunes the dentry holding the remaining references
1589 * on the stale inode.
1591 mutex_enter(&zsb
->z_znodes_lock
);
1592 for (zp
= list_head(&zsb
->z_all_znodes
); zp
;
1593 zp
= list_next(&zsb
->z_all_znodes
, zp
)) {
1594 err2
= zfs_rezget(zp
);
1596 remove_inode_hash(ZTOI(zp
));
1597 zp
->z_is_stale
= B_TRUE
;
1600 mutex_exit(&zsb
->z_znodes_lock
);
1603 /* release the VFS ops */
1604 rw_exit(&zsb
->z_teardown_inactive_lock
);
1605 rrw_exit(&zsb
->z_teardown_lock
, FTAG
);
1609 * Since we couldn't setup the sa framework, try to force
1610 * unmount this file system.
1613 (void) zfs_umount(zsb
->z_sb
);
1617 EXPORT_SYMBOL(zfs_resume_fs
);
1620 zfs_set_version(zfs_sb_t
*zsb
, uint64_t newvers
)
1623 objset_t
*os
= zsb
->z_os
;
1626 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
1627 return (SET_ERROR(EINVAL
));
1629 if (newvers
< zsb
->z_version
)
1630 return (SET_ERROR(EINVAL
));
1632 if (zfs_spa_version_map(newvers
) >
1633 spa_version(dmu_objset_spa(zsb
->z_os
)))
1634 return (SET_ERROR(ENOTSUP
));
1636 tx
= dmu_tx_create(os
);
1637 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
1638 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1639 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
1641 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1643 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1649 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
1650 8, 1, &newvers
, tx
);
1657 if (newvers
>= ZPL_VERSION_SA
&& !zsb
->z_use_sa
) {
1660 ASSERT3U(spa_version(dmu_objset_spa(zsb
->z_os
)), >=,
1662 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1663 DMU_OT_NONE
, 0, tx
);
1665 error
= zap_add(os
, MASTER_NODE_OBJ
,
1666 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1669 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
1670 sa_register_update_callback(os
, zfs_sa_upgrade
);
1673 spa_history_log_internal_ds(dmu_objset_ds(os
), "upgrade", tx
,
1674 "from %llu to %llu", zsb
->z_version
, newvers
);
1678 zsb
->z_version
= newvers
;
1680 zfs_set_fuid_feature(zsb
);
1684 EXPORT_SYMBOL(zfs_set_version
);
1687 * Read a property stored within the master node.
1690 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
1693 int error
= SET_ERROR(ENOENT
);
1696 * Look up the file system's value for the property. For the
1697 * version property, we look up a slightly different string.
1699 if (prop
== ZFS_PROP_VERSION
)
1700 pname
= ZPL_VERSION_STR
;
1702 pname
= zfs_prop_to_name(prop
);
1705 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
1707 if (error
== ENOENT
) {
1708 /* No value set, use the default value */
1710 case ZFS_PROP_VERSION
:
1711 *value
= ZPL_VERSION
;
1713 case ZFS_PROP_NORMALIZE
:
1714 case ZFS_PROP_UTF8ONLY
:
1718 *value
= ZFS_CASE_SENSITIVE
;
1720 case ZFS_PROP_ACLTYPE
:
1721 *value
= ZFS_ACLTYPE_OFF
;
1730 EXPORT_SYMBOL(zfs_get_zplprop
);
1737 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
1738 register_filesystem(&zpl_fs_type
);
1744 taskq_wait(system_taskq
);
1745 unregister_filesystem(&zpl_fs_type
);