4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/mntent.h>
36 #include <sys/cmn_err.h>
37 #include <sys/zfs_znode.h>
38 #include <sys/zfs_vnops.h>
39 #include <sys/zfs_dir.h>
41 #include <sys/fs/zfs.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_deleg.h>
49 #include <sys/sa_impl.h>
50 #include <sys/policy.h>
51 #include <sys/atomic.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_ctldir.h>
54 #include <sys/zfs_fuid.h>
55 #include <sys/sunddi.h>
56 #include <sys/dmu_objset.h>
57 #include <sys/spa_boot.h>
59 #include "zfs_comutil.h"
84 static const match_table_t zpl_tokens
= {
85 { TOKEN_RO
, MNTOPT_RO
},
86 { TOKEN_RW
, MNTOPT_RW
},
87 { TOKEN_SETUID
, MNTOPT_SETUID
},
88 { TOKEN_NOSETUID
, MNTOPT_NOSETUID
},
89 { TOKEN_EXEC
, MNTOPT_EXEC
},
90 { TOKEN_NOEXEC
, MNTOPT_NOEXEC
},
91 { TOKEN_DEVICES
, MNTOPT_DEVICES
},
92 { TOKEN_NODEVICES
, MNTOPT_NODEVICES
},
93 { TOKEN_DIRXATTR
, MNTOPT_DIRXATTR
},
94 { TOKEN_SAXATTR
, MNTOPT_SAXATTR
},
95 { TOKEN_XATTR
, MNTOPT_XATTR
},
96 { TOKEN_NOXATTR
, MNTOPT_NOXATTR
},
97 { TOKEN_ATIME
, MNTOPT_ATIME
},
98 { TOKEN_NOATIME
, MNTOPT_NOATIME
},
99 { TOKEN_RELATIME
, MNTOPT_RELATIME
},
100 { TOKEN_NORELATIME
, MNTOPT_NORELATIME
},
101 { TOKEN_NBMAND
, MNTOPT_NBMAND
},
102 { TOKEN_NONBMAND
, MNTOPT_NONBMAND
},
103 { TOKEN_MNTPOINT
, MNTOPT_MNTPOINT
"=%s" },
104 { TOKEN_LAST
, NULL
},
108 zfsvfs_vfs_free(vfs_t
*vfsp
)
111 if (vfsp
->vfs_mntpoint
!= NULL
)
112 strfree(vfsp
->vfs_mntpoint
);
114 kmem_free(vfsp
, sizeof (vfs_t
));
119 zfsvfs_parse_option(char *option
, int token
, substring_t
*args
, vfs_t
*vfsp
)
123 vfsp
->vfs_readonly
= B_TRUE
;
124 vfsp
->vfs_do_readonly
= B_TRUE
;
127 vfsp
->vfs_readonly
= B_FALSE
;
128 vfsp
->vfs_do_readonly
= B_TRUE
;
131 vfsp
->vfs_setuid
= B_TRUE
;
132 vfsp
->vfs_do_setuid
= B_TRUE
;
135 vfsp
->vfs_setuid
= B_FALSE
;
136 vfsp
->vfs_do_setuid
= B_TRUE
;
139 vfsp
->vfs_exec
= B_TRUE
;
140 vfsp
->vfs_do_exec
= B_TRUE
;
143 vfsp
->vfs_exec
= B_FALSE
;
144 vfsp
->vfs_do_exec
= B_TRUE
;
147 vfsp
->vfs_devices
= B_TRUE
;
148 vfsp
->vfs_do_devices
= B_TRUE
;
150 case TOKEN_NODEVICES
:
151 vfsp
->vfs_devices
= B_FALSE
;
152 vfsp
->vfs_do_devices
= B_TRUE
;
155 vfsp
->vfs_xattr
= ZFS_XATTR_DIR
;
156 vfsp
->vfs_do_xattr
= B_TRUE
;
159 vfsp
->vfs_xattr
= ZFS_XATTR_SA
;
160 vfsp
->vfs_do_xattr
= B_TRUE
;
163 vfsp
->vfs_xattr
= ZFS_XATTR_DIR
;
164 vfsp
->vfs_do_xattr
= B_TRUE
;
167 vfsp
->vfs_xattr
= ZFS_XATTR_OFF
;
168 vfsp
->vfs_do_xattr
= B_TRUE
;
171 vfsp
->vfs_atime
= B_TRUE
;
172 vfsp
->vfs_do_atime
= B_TRUE
;
175 vfsp
->vfs_atime
= B_FALSE
;
176 vfsp
->vfs_do_atime
= B_TRUE
;
179 vfsp
->vfs_relatime
= B_TRUE
;
180 vfsp
->vfs_do_relatime
= B_TRUE
;
182 case TOKEN_NORELATIME
:
183 vfsp
->vfs_relatime
= B_FALSE
;
184 vfsp
->vfs_do_relatime
= B_TRUE
;
187 vfsp
->vfs_nbmand
= B_TRUE
;
188 vfsp
->vfs_do_nbmand
= B_TRUE
;
191 vfsp
->vfs_nbmand
= B_FALSE
;
192 vfsp
->vfs_do_nbmand
= B_TRUE
;
195 vfsp
->vfs_mntpoint
= match_strdup(&args
[0]);
196 if (vfsp
->vfs_mntpoint
== NULL
)
197 return (SET_ERROR(ENOMEM
));
208 * Parse the raw mntopts and return a vfs_t describing the options.
211 zfsvfs_parse_options(char *mntopts
, vfs_t
**vfsp
)
216 tmp_vfsp
= kmem_zalloc(sizeof (vfs_t
), KM_SLEEP
);
218 if (mntopts
!= NULL
) {
219 substring_t args
[MAX_OPT_ARGS
];
220 char *tmp_mntopts
, *p
, *t
;
223 tmp_mntopts
= t
= strdup(mntopts
);
224 if (tmp_mntopts
== NULL
)
225 return (SET_ERROR(ENOMEM
));
227 while ((p
= strsep(&t
, ",")) != NULL
) {
231 args
[0].to
= args
[0].from
= NULL
;
232 token
= match_token(p
, zpl_tokens
, args
);
233 error
= zfsvfs_parse_option(p
, token
, args
, tmp_vfsp
);
235 strfree(tmp_mntopts
);
236 zfsvfs_vfs_free(tmp_vfsp
);
241 strfree(tmp_mntopts
);
250 zfs_is_readonly(zfsvfs_t
*zfsvfs
)
252 return (!!(zfsvfs
->z_sb
->s_flags
& MS_RDONLY
));
257 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
259 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
262 * Data integrity is job one. We don't want a compromised kernel
263 * writing to the storage pool, so we never sync during panic.
265 if (unlikely(oops_in_progress
))
269 * Semantically, the only requirement is that the sync be initiated.
270 * The DMU syncs out txgs frequently, so there's nothing to do.
275 if (zfsvfs
!= NULL
) {
277 * Sync a specific filesystem.
282 dp
= dmu_objset_pool(zfsvfs
->z_os
);
285 * If the system is shutting down, then skip any
286 * filesystems which may exist on a suspended pool.
288 if (spa_suspended(dp
->dp_spa
)) {
293 if (zfsvfs
->z_log
!= NULL
)
294 zil_commit(zfsvfs
->z_log
, 0);
299 * Sync all ZFS filesystems. This is what happens when you
300 * run sync(1M). Unlike other filesystems, ZFS honors the
301 * request by waiting for all pools to commit all dirty data.
310 atime_changed_cb(void *arg
, uint64_t newval
)
312 ((zfsvfs_t
*)arg
)->z_atime
= newval
;
316 relatime_changed_cb(void *arg
, uint64_t newval
)
318 ((zfsvfs_t
*)arg
)->z_relatime
= newval
;
322 xattr_changed_cb(void *arg
, uint64_t newval
)
324 zfsvfs_t
*zfsvfs
= arg
;
326 if (newval
== ZFS_XATTR_OFF
) {
327 zfsvfs
->z_flags
&= ~ZSB_XATTR
;
329 zfsvfs
->z_flags
|= ZSB_XATTR
;
331 if (newval
== ZFS_XATTR_SA
)
332 zfsvfs
->z_xattr_sa
= B_TRUE
;
334 zfsvfs
->z_xattr_sa
= B_FALSE
;
339 acltype_changed_cb(void *arg
, uint64_t newval
)
341 zfsvfs_t
*zfsvfs
= arg
;
344 case ZFS_ACLTYPE_OFF
:
345 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_OFF
;
346 zfsvfs
->z_sb
->s_flags
&= ~MS_POSIXACL
;
348 case ZFS_ACLTYPE_POSIXACL
:
349 #ifdef CONFIG_FS_POSIX_ACL
350 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_POSIXACL
;
351 zfsvfs
->z_sb
->s_flags
|= MS_POSIXACL
;
353 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_OFF
;
354 zfsvfs
->z_sb
->s_flags
&= ~MS_POSIXACL
;
355 #endif /* CONFIG_FS_POSIX_ACL */
363 blksz_changed_cb(void *arg
, uint64_t newval
)
365 zfsvfs_t
*zfsvfs
= arg
;
366 ASSERT3U(newval
, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs
->z_os
)));
367 ASSERT3U(newval
, >=, SPA_MINBLOCKSIZE
);
368 ASSERT(ISP2(newval
));
370 zfsvfs
->z_max_blksz
= newval
;
374 readonly_changed_cb(void *arg
, uint64_t newval
)
376 zfsvfs_t
*zfsvfs
= arg
;
377 struct super_block
*sb
= zfsvfs
->z_sb
;
383 sb
->s_flags
|= MS_RDONLY
;
385 sb
->s_flags
&= ~MS_RDONLY
;
389 devices_changed_cb(void *arg
, uint64_t newval
)
394 setuid_changed_cb(void *arg
, uint64_t newval
)
399 exec_changed_cb(void *arg
, uint64_t newval
)
404 nbmand_changed_cb(void *arg
, uint64_t newval
)
406 zfsvfs_t
*zfsvfs
= arg
;
407 struct super_block
*sb
= zfsvfs
->z_sb
;
413 sb
->s_flags
|= MS_MANDLOCK
;
415 sb
->s_flags
&= ~MS_MANDLOCK
;
419 snapdir_changed_cb(void *arg
, uint64_t newval
)
421 ((zfsvfs_t
*)arg
)->z_show_ctldir
= newval
;
425 vscan_changed_cb(void *arg
, uint64_t newval
)
427 ((zfsvfs_t
*)arg
)->z_vscan
= newval
;
431 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
433 ((zfsvfs_t
*)arg
)->z_acl_inherit
= newval
;
437 zfs_register_callbacks(vfs_t
*vfsp
)
439 struct dsl_dataset
*ds
= NULL
;
441 zfsvfs_t
*zfsvfs
= NULL
;
445 zfsvfs
= vfsp
->vfs_data
;
450 * The act of registering our callbacks will destroy any mount
451 * options we may have. In order to enable temporary overrides
452 * of mount options, we stash away the current values and
453 * restore them after we register the callbacks.
455 if (zfs_is_readonly(zfsvfs
) || !spa_writeable(dmu_objset_spa(os
))) {
456 vfsp
->vfs_do_readonly
= B_TRUE
;
457 vfsp
->vfs_readonly
= B_TRUE
;
461 * Register property callbacks.
463 * It would probably be fine to just check for i/o error from
464 * the first prop_register(), but I guess I like to go
467 ds
= dmu_objset_ds(os
);
468 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
469 error
= dsl_prop_register(ds
,
470 zfs_prop_to_name(ZFS_PROP_ATIME
), atime_changed_cb
, zfsvfs
);
471 error
= error
? error
: dsl_prop_register(ds
,
472 zfs_prop_to_name(ZFS_PROP_RELATIME
), relatime_changed_cb
, zfsvfs
);
473 error
= error
? error
: dsl_prop_register(ds
,
474 zfs_prop_to_name(ZFS_PROP_XATTR
), xattr_changed_cb
, zfsvfs
);
475 error
= error
? error
: dsl_prop_register(ds
,
476 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), blksz_changed_cb
, zfsvfs
);
477 error
= error
? error
: dsl_prop_register(ds
,
478 zfs_prop_to_name(ZFS_PROP_READONLY
), readonly_changed_cb
, zfsvfs
);
479 error
= error
? error
: dsl_prop_register(ds
,
480 zfs_prop_to_name(ZFS_PROP_DEVICES
), devices_changed_cb
, zfsvfs
);
481 error
= error
? error
: dsl_prop_register(ds
,
482 zfs_prop_to_name(ZFS_PROP_SETUID
), setuid_changed_cb
, zfsvfs
);
483 error
= error
? error
: dsl_prop_register(ds
,
484 zfs_prop_to_name(ZFS_PROP_EXEC
), exec_changed_cb
, zfsvfs
);
485 error
= error
? error
: dsl_prop_register(ds
,
486 zfs_prop_to_name(ZFS_PROP_SNAPDIR
), snapdir_changed_cb
, zfsvfs
);
487 error
= error
? error
: dsl_prop_register(ds
,
488 zfs_prop_to_name(ZFS_PROP_ACLTYPE
), acltype_changed_cb
, zfsvfs
);
489 error
= error
? error
: dsl_prop_register(ds
,
490 zfs_prop_to_name(ZFS_PROP_ACLINHERIT
), acl_inherit_changed_cb
,
492 error
= error
? error
: dsl_prop_register(ds
,
493 zfs_prop_to_name(ZFS_PROP_VSCAN
), vscan_changed_cb
, zfsvfs
);
494 error
= error
? error
: dsl_prop_register(ds
,
495 zfs_prop_to_name(ZFS_PROP_NBMAND
), nbmand_changed_cb
, zfsvfs
);
496 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
501 * Invoke our callbacks to restore temporary mount options.
503 if (vfsp
->vfs_do_readonly
)
504 readonly_changed_cb(zfsvfs
, vfsp
->vfs_readonly
);
505 if (vfsp
->vfs_do_setuid
)
506 setuid_changed_cb(zfsvfs
, vfsp
->vfs_setuid
);
507 if (vfsp
->vfs_do_exec
)
508 exec_changed_cb(zfsvfs
, vfsp
->vfs_exec
);
509 if (vfsp
->vfs_do_devices
)
510 devices_changed_cb(zfsvfs
, vfsp
->vfs_devices
);
511 if (vfsp
->vfs_do_xattr
)
512 xattr_changed_cb(zfsvfs
, vfsp
->vfs_xattr
);
513 if (vfsp
->vfs_do_atime
)
514 atime_changed_cb(zfsvfs
, vfsp
->vfs_atime
);
515 if (vfsp
->vfs_do_relatime
)
516 relatime_changed_cb(zfsvfs
, vfsp
->vfs_relatime
);
517 if (vfsp
->vfs_do_nbmand
)
518 nbmand_changed_cb(zfsvfs
, vfsp
->vfs_nbmand
);
523 dsl_prop_unregister_all(ds
, zfsvfs
);
528 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
529 uint64_t *userp
, uint64_t *groupp
, uint64_t *projectp
)
532 sa_hdr_phys_t
*sap
= data
;
535 boolean_t swap
= B_FALSE
;
538 * Is it a valid type of object to track?
540 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
541 return (SET_ERROR(ENOENT
));
544 * If we have a NULL data pointer
545 * then assume the id's aren't changing and
546 * return EEXIST to the dmu to let it know to
550 return (SET_ERROR(EEXIST
));
552 if (bonustype
== DMU_OT_ZNODE
) {
553 znode_phys_t
*znp
= data
;
554 *userp
= znp
->zp_uid
;
555 *groupp
= znp
->zp_gid
;
556 *projectp
= ZFS_DEFAULT_PROJID
;
560 if (sap
->sa_magic
== 0) {
562 * This should only happen for newly created files
563 * that haven't had the znode data filled in yet.
567 *projectp
= ZFS_DEFAULT_PROJID
;
572 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
573 sa
.sa_magic
= SA_MAGIC
;
574 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
577 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
580 hdrsize
= sa_hdrsize(&sa
);
581 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
583 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+ SA_UID_OFFSET
));
584 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+ SA_GID_OFFSET
));
585 flags
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+ SA_FLAGS_OFFSET
));
587 flags
= BSWAP_64(flags
);
589 if (flags
& ZFS_PROJID
)
590 *projectp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
593 *projectp
= ZFS_DEFAULT_PROJID
;
596 *userp
= BSWAP_64(*userp
);
597 *groupp
= BSWAP_64(*groupp
);
598 *projectp
= BSWAP_64(*projectp
);
604 fuidstr_to_sid(zfsvfs_t
*zfsvfs
, const char *fuidstr
,
605 char *domainbuf
, int buflen
, uid_t
*ridp
)
610 fuid
= zfs_strtonum(fuidstr
, NULL
);
612 domain
= zfs_fuid_find_by_idx(zfsvfs
, FUID_INDEX(fuid
));
614 (void) strlcpy(domainbuf
, domain
, buflen
);
617 *ridp
= FUID_RID(fuid
);
621 zfs_userquota_prop_to_obj(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
)
624 case ZFS_PROP_USERUSED
:
625 case ZFS_PROP_USEROBJUSED
:
626 return (DMU_USERUSED_OBJECT
);
627 case ZFS_PROP_GROUPUSED
:
628 case ZFS_PROP_GROUPOBJUSED
:
629 return (DMU_GROUPUSED_OBJECT
);
630 case ZFS_PROP_PROJECTUSED
:
631 case ZFS_PROP_PROJECTOBJUSED
:
632 return (DMU_PROJECTUSED_OBJECT
);
633 case ZFS_PROP_USERQUOTA
:
634 return (zfsvfs
->z_userquota_obj
);
635 case ZFS_PROP_GROUPQUOTA
:
636 return (zfsvfs
->z_groupquota_obj
);
637 case ZFS_PROP_USEROBJQUOTA
:
638 return (zfsvfs
->z_userobjquota_obj
);
639 case ZFS_PROP_GROUPOBJQUOTA
:
640 return (zfsvfs
->z_groupobjquota_obj
);
641 case ZFS_PROP_PROJECTQUOTA
:
642 return (zfsvfs
->z_projectquota_obj
);
643 case ZFS_PROP_PROJECTOBJQUOTA
:
644 return (zfsvfs
->z_projectobjquota_obj
);
646 return (ZFS_NO_OBJECT
);
651 zfs_userspace_many(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
652 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
657 zfs_useracct_t
*buf
= vbuf
;
661 if (!dmu_objset_userspace_present(zfsvfs
->z_os
))
662 return (SET_ERROR(ENOTSUP
));
664 if ((type
== ZFS_PROP_PROJECTQUOTA
|| type
== ZFS_PROP_PROJECTUSED
||
665 type
== ZFS_PROP_PROJECTOBJQUOTA
||
666 type
== ZFS_PROP_PROJECTOBJUSED
) &&
667 !dmu_objset_projectquota_present(zfsvfs
->z_os
))
668 return (SET_ERROR(ENOTSUP
));
670 if ((type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
671 type
== ZFS_PROP_USEROBJQUOTA
|| type
== ZFS_PROP_GROUPOBJQUOTA
||
672 type
== ZFS_PROP_PROJECTOBJUSED
||
673 type
== ZFS_PROP_PROJECTOBJQUOTA
) &&
674 !dmu_objset_userobjspace_present(zfsvfs
->z_os
))
675 return (SET_ERROR(ENOTSUP
));
677 obj
= zfs_userquota_prop_to_obj(zfsvfs
, type
);
678 if (obj
== ZFS_NO_OBJECT
) {
683 if (type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
684 type
== ZFS_PROP_PROJECTOBJUSED
)
685 offset
= DMU_OBJACCT_PREFIX_LEN
;
687 for (zap_cursor_init_serialized(&zc
, zfsvfs
->z_os
, obj
, *cookiep
);
688 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
689 zap_cursor_advance(&zc
)) {
690 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
695 * skip object quota (with zap name prefix DMU_OBJACCT_PREFIX)
696 * when dealing with block quota and vice versa.
698 if ((offset
> 0) != (strncmp(za
.za_name
, DMU_OBJACCT_PREFIX
,
699 DMU_OBJACCT_PREFIX_LEN
) == 0))
702 fuidstr_to_sid(zfsvfs
, za
.za_name
+ offset
,
703 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
705 buf
->zu_space
= za
.za_first_integer
;
711 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
712 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
713 *cookiep
= zap_cursor_serialize(&zc
);
714 zap_cursor_fini(&zc
);
719 * buf must be big enough (eg, 32 bytes)
722 id_to_fuidstr(zfsvfs_t
*zfsvfs
, const char *domain
, uid_t rid
,
723 char *buf
, boolean_t addok
)
728 if (domain
&& domain
[0]) {
729 domainid
= zfs_fuid_find_by_domain(zfsvfs
, domain
, NULL
, addok
);
731 return (SET_ERROR(ENOENT
));
733 fuid
= FUID_ENCODE(domainid
, rid
);
734 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
739 zfs_userspace_one(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
740 const char *domain
, uint64_t rid
, uint64_t *valp
)
742 char buf
[20 + DMU_OBJACCT_PREFIX_LEN
];
749 if (!dmu_objset_userspace_present(zfsvfs
->z_os
))
750 return (SET_ERROR(ENOTSUP
));
752 if ((type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
753 type
== ZFS_PROP_USEROBJQUOTA
|| type
== ZFS_PROP_GROUPOBJQUOTA
||
754 type
== ZFS_PROP_PROJECTOBJUSED
||
755 type
== ZFS_PROP_PROJECTOBJQUOTA
) &&
756 !dmu_objset_userobjspace_present(zfsvfs
->z_os
))
757 return (SET_ERROR(ENOTSUP
));
759 if (type
== ZFS_PROP_PROJECTQUOTA
|| type
== ZFS_PROP_PROJECTUSED
||
760 type
== ZFS_PROP_PROJECTOBJQUOTA
||
761 type
== ZFS_PROP_PROJECTOBJUSED
) {
762 if (!dmu_objset_projectquota_present(zfsvfs
->z_os
))
763 return (SET_ERROR(ENOTSUP
));
764 if (!zpl_is_valid_projid(rid
))
765 return (SET_ERROR(EINVAL
));
768 obj
= zfs_userquota_prop_to_obj(zfsvfs
, type
);
769 if (obj
== ZFS_NO_OBJECT
)
772 if (type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
773 type
== ZFS_PROP_PROJECTOBJUSED
) {
774 strlcpy(buf
, DMU_OBJACCT_PREFIX
, DMU_OBJACCT_PREFIX_LEN
+ 1);
775 offset
= DMU_OBJACCT_PREFIX_LEN
;
778 err
= id_to_fuidstr(zfsvfs
, domain
, rid
, buf
+ offset
, B_FALSE
);
782 err
= zap_lookup(zfsvfs
->z_os
, obj
, buf
, 8, 1, valp
);
789 zfs_set_userquota(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
790 const char *domain
, uint64_t rid
, uint64_t quota
)
796 boolean_t fuid_dirtied
;
798 if (zfsvfs
->z_version
< ZPL_VERSION_USERSPACE
)
799 return (SET_ERROR(ENOTSUP
));
802 case ZFS_PROP_USERQUOTA
:
803 objp
= &zfsvfs
->z_userquota_obj
;
805 case ZFS_PROP_GROUPQUOTA
:
806 objp
= &zfsvfs
->z_groupquota_obj
;
808 case ZFS_PROP_USEROBJQUOTA
:
809 objp
= &zfsvfs
->z_userobjquota_obj
;
811 case ZFS_PROP_GROUPOBJQUOTA
:
812 objp
= &zfsvfs
->z_groupobjquota_obj
;
814 case ZFS_PROP_PROJECTQUOTA
:
815 if (!dmu_objset_projectquota_enabled(zfsvfs
->z_os
))
816 return (SET_ERROR(ENOTSUP
));
817 if (!zpl_is_valid_projid(rid
))
818 return (SET_ERROR(EINVAL
));
820 objp
= &zfsvfs
->z_projectquota_obj
;
822 case ZFS_PROP_PROJECTOBJQUOTA
:
823 if (!dmu_objset_projectquota_enabled(zfsvfs
->z_os
))
824 return (SET_ERROR(ENOTSUP
));
825 if (!zpl_is_valid_projid(rid
))
826 return (SET_ERROR(EINVAL
));
828 objp
= &zfsvfs
->z_projectobjquota_obj
;
831 return (SET_ERROR(EINVAL
));
834 err
= id_to_fuidstr(zfsvfs
, domain
, rid
, buf
, B_TRUE
);
837 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
839 tx
= dmu_tx_create(zfsvfs
->z_os
);
840 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
842 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
843 zfs_userquota_prop_prefixes
[type
]);
846 zfs_fuid_txhold(zfsvfs
, tx
);
847 err
= dmu_tx_assign(tx
, TXG_WAIT
);
853 mutex_enter(&zfsvfs
->z_lock
);
855 *objp
= zap_create(zfsvfs
->z_os
, DMU_OT_USERGROUP_QUOTA
,
857 VERIFY(0 == zap_add(zfsvfs
->z_os
, MASTER_NODE_OBJ
,
858 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
860 mutex_exit(&zfsvfs
->z_lock
);
863 err
= zap_remove(zfsvfs
->z_os
, *objp
, buf
, tx
);
867 err
= zap_update(zfsvfs
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
871 zfs_fuid_sync(zfsvfs
, tx
);
877 zfs_id_overobjquota(zfsvfs_t
*zfsvfs
, uint64_t usedobj
, uint64_t id
)
879 char buf
[20 + DMU_OBJACCT_PREFIX_LEN
];
880 uint64_t used
, quota
, quotaobj
;
883 if (!dmu_objset_userobjspace_present(zfsvfs
->z_os
)) {
884 if (dmu_objset_userobjspace_upgradable(zfsvfs
->z_os
)) {
885 dsl_pool_config_enter(
886 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
887 dmu_objset_id_quota_upgrade(zfsvfs
->z_os
);
888 dsl_pool_config_exit(
889 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
894 if (usedobj
== DMU_PROJECTUSED_OBJECT
) {
895 if (!dmu_objset_projectquota_present(zfsvfs
->z_os
)) {
896 if (dmu_objset_projectquota_upgradable(zfsvfs
->z_os
)) {
897 dsl_pool_config_enter(
898 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
899 dmu_objset_id_quota_upgrade(zfsvfs
->z_os
);
900 dsl_pool_config_exit(
901 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
905 quotaobj
= zfsvfs
->z_projectobjquota_obj
;
906 } else if (usedobj
== DMU_USERUSED_OBJECT
) {
907 quotaobj
= zfsvfs
->z_userobjquota_obj
;
908 } else if (usedobj
== DMU_GROUPUSED_OBJECT
) {
909 quotaobj
= zfsvfs
->z_groupobjquota_obj
;
913 if (quotaobj
== 0 || zfsvfs
->z_replay
)
916 (void) sprintf(buf
, "%llx", (longlong_t
)id
);
917 err
= zap_lookup(zfsvfs
->z_os
, quotaobj
, buf
, 8, 1, "a
);
921 (void) sprintf(buf
, DMU_OBJACCT_PREFIX
"%llx", (longlong_t
)id
);
922 err
= zap_lookup(zfsvfs
->z_os
, usedobj
, buf
, 8, 1, &used
);
925 return (used
>= quota
);
929 zfs_id_overblockquota(zfsvfs_t
*zfsvfs
, uint64_t usedobj
, uint64_t id
)
932 uint64_t used
, quota
, quotaobj
;
935 if (usedobj
== DMU_PROJECTUSED_OBJECT
) {
936 if (!dmu_objset_projectquota_present(zfsvfs
->z_os
)) {
937 if (dmu_objset_projectquota_upgradable(zfsvfs
->z_os
)) {
938 dsl_pool_config_enter(
939 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
940 dmu_objset_id_quota_upgrade(zfsvfs
->z_os
);
941 dsl_pool_config_exit(
942 dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
946 quotaobj
= zfsvfs
->z_projectquota_obj
;
947 } else if (usedobj
== DMU_USERUSED_OBJECT
) {
948 quotaobj
= zfsvfs
->z_userquota_obj
;
949 } else if (usedobj
== DMU_GROUPUSED_OBJECT
) {
950 quotaobj
= zfsvfs
->z_groupquota_obj
;
954 if (quotaobj
== 0 || zfsvfs
->z_replay
)
957 (void) sprintf(buf
, "%llx", (longlong_t
)id
);
958 err
= zap_lookup(zfsvfs
->z_os
, quotaobj
, buf
, 8, 1, "a
);
962 err
= zap_lookup(zfsvfs
->z_os
, usedobj
, buf
, 8, 1, &used
);
965 return (used
>= quota
);
969 zfs_id_overquota(zfsvfs_t
*zfsvfs
, uint64_t usedobj
, uint64_t id
)
971 return (zfs_id_overblockquota(zfsvfs
, usedobj
, id
) ||
972 zfs_id_overobjquota(zfsvfs
, usedobj
, id
));
976 * Associate this zfsvfs with the given objset, which must be owned.
977 * This will cache a bunch of on-disk state from the objset in the
981 zfsvfs_init(zfsvfs_t
*zfsvfs
, objset_t
*os
)
986 zfsvfs
->z_max_blksz
= SPA_OLD_MAXBLOCKSIZE
;
987 zfsvfs
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
990 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zfsvfs
->z_version
);
993 if (zfsvfs
->z_version
>
994 zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)))) {
995 (void) printk("Can't mount a version %lld file system "
996 "on a version %lld pool\n. Pool must be upgraded to mount "
997 "this file system.", (u_longlong_t
)zfsvfs
->z_version
,
998 (u_longlong_t
)spa_version(dmu_objset_spa(os
)));
999 return (SET_ERROR(ENOTSUP
));
1001 error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &val
);
1004 zfsvfs
->z_norm
= (int)val
;
1006 error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &val
);
1009 zfsvfs
->z_utf8
= (val
!= 0);
1011 error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &val
);
1014 zfsvfs
->z_case
= (uint_t
)val
;
1016 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_ACLTYPE
, &val
)) != 0)
1018 zfsvfs
->z_acl_type
= (uint_t
)val
;
1021 * Fold case on file systems that are always or sometimes case
1024 if (zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
1025 zfsvfs
->z_case
== ZFS_CASE_MIXED
)
1026 zfsvfs
->z_norm
|= U8_TEXTPREP_TOUPPER
;
1028 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
1029 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
1031 uint64_t sa_obj
= 0;
1032 if (zfsvfs
->z_use_sa
) {
1033 /* should either have both of these objects or none */
1034 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
1039 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &val
);
1040 if ((error
== 0) && (val
== ZFS_XATTR_SA
))
1041 zfsvfs
->z_xattr_sa
= B_TRUE
;
1044 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
1045 &zfsvfs
->z_attr_table
);
1049 if (zfsvfs
->z_version
>= ZPL_VERSION_SA
)
1050 sa_register_update_callback(os
, zfs_sa_upgrade
);
1052 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
1056 ASSERT(zfsvfs
->z_root
!= 0);
1058 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
1059 &zfsvfs
->z_unlinkedobj
);
1063 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1064 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
1065 8, 1, &zfsvfs
->z_userquota_obj
);
1066 if (error
== ENOENT
)
1067 zfsvfs
->z_userquota_obj
= 0;
1068 else if (error
!= 0)
1071 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1072 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
1073 8, 1, &zfsvfs
->z_groupquota_obj
);
1074 if (error
== ENOENT
)
1075 zfsvfs
->z_groupquota_obj
= 0;
1076 else if (error
!= 0)
1079 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1080 zfs_userquota_prop_prefixes
[ZFS_PROP_PROJECTQUOTA
],
1081 8, 1, &zfsvfs
->z_projectquota_obj
);
1082 if (error
== ENOENT
)
1083 zfsvfs
->z_projectquota_obj
= 0;
1084 else if (error
!= 0)
1087 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1088 zfs_userquota_prop_prefixes
[ZFS_PROP_USEROBJQUOTA
],
1089 8, 1, &zfsvfs
->z_userobjquota_obj
);
1090 if (error
== ENOENT
)
1091 zfsvfs
->z_userobjquota_obj
= 0;
1092 else if (error
!= 0)
1095 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1096 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPOBJQUOTA
],
1097 8, 1, &zfsvfs
->z_groupobjquota_obj
);
1098 if (error
== ENOENT
)
1099 zfsvfs
->z_groupobjquota_obj
= 0;
1100 else if (error
!= 0)
1103 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1104 zfs_userquota_prop_prefixes
[ZFS_PROP_PROJECTOBJQUOTA
],
1105 8, 1, &zfsvfs
->z_projectobjquota_obj
);
1106 if (error
== ENOENT
)
1107 zfsvfs
->z_projectobjquota_obj
= 0;
1108 else if (error
!= 0)
1111 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
1112 &zfsvfs
->z_fuid_obj
);
1113 if (error
== ENOENT
)
1114 zfsvfs
->z_fuid_obj
= 0;
1115 else if (error
!= 0)
1118 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
1119 &zfsvfs
->z_shares_dir
);
1120 if (error
== ENOENT
)
1121 zfsvfs
->z_shares_dir
= 0;
1122 else if (error
!= 0)
1129 zfsvfs_create(const char *osname
, boolean_t readonly
, zfsvfs_t
**zfvp
)
1134 boolean_t ro
= (readonly
|| (strchr(osname
, '@') != NULL
));
1136 zfsvfs
= kmem_zalloc(sizeof (zfsvfs_t
), KM_SLEEP
);
1138 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, ro
, B_TRUE
, zfsvfs
, &os
);
1140 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1144 error
= zfsvfs_create_impl(zfvp
, zfsvfs
, os
);
1146 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1152 zfsvfs_create_impl(zfsvfs_t
**zfvp
, zfsvfs_t
*zfsvfs
, objset_t
*os
)
1156 zfsvfs
->z_vfs
= NULL
;
1157 zfsvfs
->z_sb
= NULL
;
1158 zfsvfs
->z_parent
= zfsvfs
;
1160 mutex_init(&zfsvfs
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1161 mutex_init(&zfsvfs
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1162 list_create(&zfsvfs
->z_all_znodes
, sizeof (znode_t
),
1163 offsetof(znode_t
, z_link_node
));
1164 rrm_init(&zfsvfs
->z_teardown_lock
, B_FALSE
);
1165 rw_init(&zfsvfs
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
1166 rw_init(&zfsvfs
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
1168 int size
= MIN(1 << (highbit64(zfs_object_mutex_size
) - 1),
1170 zfsvfs
->z_hold_size
= size
;
1171 zfsvfs
->z_hold_trees
= vmem_zalloc(sizeof (avl_tree_t
) * size
,
1173 zfsvfs
->z_hold_locks
= vmem_zalloc(sizeof (kmutex_t
) * size
, KM_SLEEP
);
1174 for (int i
= 0; i
!= size
; i
++) {
1175 avl_create(&zfsvfs
->z_hold_trees
[i
], zfs_znode_hold_compare
,
1176 sizeof (znode_hold_t
), offsetof(znode_hold_t
, zh_node
));
1177 mutex_init(&zfsvfs
->z_hold_locks
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
1180 error
= zfsvfs_init(zfsvfs
, os
);
1183 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1192 zfsvfs_setup(zfsvfs_t
*zfsvfs
, boolean_t mounting
)
1195 boolean_t readonly
= zfs_is_readonly(zfsvfs
);
1197 error
= zfs_register_callbacks(zfsvfs
->z_vfs
);
1201 zfsvfs
->z_log
= zil_open(zfsvfs
->z_os
, zfs_get_data
);
1204 * If we are not mounting (ie: online recv), then we don't
1205 * have to worry about replaying the log as we blocked all
1206 * operations out since we closed the ZIL.
1210 * During replay we remove the read only flag to
1211 * allow replays to succeed.
1214 readonly_changed_cb(zfsvfs
, B_FALSE
);
1216 zfs_unlinked_drain(zfsvfs
);
1219 * Parse and replay the intent log.
1221 * Because of ziltest, this must be done after
1222 * zfs_unlinked_drain(). (Further note: ziltest
1223 * doesn't use readonly mounts, where
1224 * zfs_unlinked_drain() isn't called.) This is because
1225 * ziltest causes spa_sync() to think it's committed,
1226 * but actually it is not, so the intent log contains
1227 * many txg's worth of changes.
1229 * In particular, if object N is in the unlinked set in
1230 * the last txg to actually sync, then it could be
1231 * actually freed in a later txg and then reallocated
1232 * in a yet later txg. This would write a "create
1233 * object N" record to the intent log. Normally, this
1234 * would be fine because the spa_sync() would have
1235 * written out the fact that object N is free, before
1236 * we could write the "create object N" intent log
1239 * But when we are in ziltest mode, we advance the "open
1240 * txg" without actually spa_sync()-ing the changes to
1241 * disk. So we would see that object N is still
1242 * allocated and in the unlinked set, and there is an
1243 * intent log record saying to allocate it.
1245 if (spa_writeable(dmu_objset_spa(zfsvfs
->z_os
))) {
1246 if (zil_replay_disable
) {
1247 zil_destroy(zfsvfs
->z_log
, B_FALSE
);
1249 zfsvfs
->z_replay
= B_TRUE
;
1250 zil_replay(zfsvfs
->z_os
, zfsvfs
,
1252 zfsvfs
->z_replay
= B_FALSE
;
1256 /* restore readonly bit */
1258 readonly_changed_cb(zfsvfs
, B_TRUE
);
1262 * Set the objset user_ptr to track its zfsvfs.
1264 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1265 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1266 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1272 zfsvfs_free(zfsvfs_t
*zfsvfs
)
1274 int i
, size
= zfsvfs
->z_hold_size
;
1276 zfs_fuid_destroy(zfsvfs
);
1278 mutex_destroy(&zfsvfs
->z_znodes_lock
);
1279 mutex_destroy(&zfsvfs
->z_lock
);
1280 list_destroy(&zfsvfs
->z_all_znodes
);
1281 rrm_destroy(&zfsvfs
->z_teardown_lock
);
1282 rw_destroy(&zfsvfs
->z_teardown_inactive_lock
);
1283 rw_destroy(&zfsvfs
->z_fuid_lock
);
1284 for (i
= 0; i
!= size
; i
++) {
1285 avl_destroy(&zfsvfs
->z_hold_trees
[i
]);
1286 mutex_destroy(&zfsvfs
->z_hold_locks
[i
]);
1288 vmem_free(zfsvfs
->z_hold_trees
, sizeof (avl_tree_t
) * size
);
1289 vmem_free(zfsvfs
->z_hold_locks
, sizeof (kmutex_t
) * size
);
1290 zfsvfs_vfs_free(zfsvfs
->z_vfs
);
1291 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1295 zfs_set_fuid_feature(zfsvfs_t
*zfsvfs
)
1297 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
1298 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
1302 zfs_unregister_callbacks(zfsvfs_t
*zfsvfs
)
1304 objset_t
*os
= zfsvfs
->z_os
;
1306 if (!dmu_objset_is_snapshot(os
))
1307 dsl_prop_unregister_all(dmu_objset_ds(os
), zfsvfs
);
1310 #ifdef HAVE_MLSLABEL
1312 * Check that the hex label string is appropriate for the dataset being
1313 * mounted into the global_zone proper.
1315 * Return an error if the hex label string is not default or
1316 * admin_low/admin_high. For admin_low labels, the corresponding
1317 * dataset must be readonly.
1320 zfs_check_global_label(const char *dsname
, const char *hexsl
)
1322 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
1324 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
1326 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
1327 /* must be readonly */
1330 if (dsl_prop_get_integer(dsname
,
1331 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
1332 return (SET_ERROR(EACCES
));
1333 return (rdonly
? 0 : EACCES
);
1335 return (SET_ERROR(EACCES
));
1337 #endif /* HAVE_MLSLABEL */
1340 zfs_statfs_project(zfsvfs_t
*zfsvfs
, znode_t
*zp
, struct kstatfs
*statp
,
1343 char buf
[20 + DMU_OBJACCT_PREFIX_LEN
];
1344 uint64_t offset
= DMU_OBJACCT_PREFIX_LEN
;
1349 strlcpy(buf
, DMU_OBJACCT_PREFIX
, DMU_OBJACCT_PREFIX_LEN
+ 1);
1350 err
= id_to_fuidstr(zfsvfs
, NULL
, zp
->z_projid
, buf
+ offset
, B_FALSE
);
1354 if (zfsvfs
->z_projectquota_obj
== 0)
1357 err
= zap_lookup(zfsvfs
->z_os
, zfsvfs
->z_projectquota_obj
,
1358 buf
+ offset
, 8, 1, "a
);
1364 err
= zap_lookup(zfsvfs
->z_os
, DMU_PROJECTUSED_OBJECT
,
1365 buf
+ offset
, 8, 1, &used
);
1366 if (unlikely(err
== ENOENT
)) {
1368 u_longlong_t nblocks
;
1371 * Quota accounting is async, so it is possible race case.
1372 * There is at least one object with the given project ID.
1374 sa_object_size(zp
->z_sa_hdl
, &blksize
, &nblocks
);
1375 if (unlikely(zp
->z_blksz
== 0))
1376 blksize
= zfsvfs
->z_max_blksz
;
1378 used
= blksize
* nblocks
;
1383 statp
->f_blocks
= quota
>> bshift
;
1384 statp
->f_bfree
= (quota
> used
) ? ((quota
- used
) >> bshift
) : 0;
1385 statp
->f_bavail
= statp
->f_bfree
;
1388 if (zfsvfs
->z_projectobjquota_obj
== 0)
1391 err
= zap_lookup(zfsvfs
->z_os
, zfsvfs
->z_projectobjquota_obj
,
1392 buf
+ offset
, 8, 1, "a
);
1398 err
= zap_lookup(zfsvfs
->z_os
, DMU_PROJECTUSED_OBJECT
,
1400 if (unlikely(err
== ENOENT
)) {
1402 * Quota accounting is async, so it is possible race case.
1403 * There is at least one object with the given project ID.
1410 statp
->f_files
= quota
;
1411 statp
->f_ffree
= (quota
> used
) ? (quota
- used
) : 0;
1417 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
1419 zfsvfs_t
*zfsvfs
= dentry
->d_sb
->s_fs_info
;
1420 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
1427 dmu_objset_space(zfsvfs
->z_os
,
1428 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
1430 fsid
= dmu_objset_fsid_guid(zfsvfs
->z_os
);
1432 * The underlying storage pool actually uses multiple block
1433 * size. Under Solaris frsize (fragment size) is reported as
1434 * the smallest block size we support, and bsize (block size)
1435 * as the filesystem's maximum block size. Unfortunately,
1436 * under Linux the fragment size and block size are often used
1437 * interchangeably. Thus we are forced to report both of them
1438 * as the filesystem's maximum block size.
1440 statp
->f_frsize
= zfsvfs
->z_max_blksz
;
1441 statp
->f_bsize
= zfsvfs
->z_max_blksz
;
1442 bshift
= fls(statp
->f_bsize
) - 1;
1445 * The following report "total" blocks of various kinds in
1446 * the file system, but reported in terms of f_bsize - the
1450 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
1451 statp
->f_bfree
= availbytes
>> bshift
;
1452 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
1455 * statvfs() should really be called statufs(), because it assumes
1456 * static metadata. ZFS doesn't preallocate files, so the best
1457 * we can do is report the max that could possibly fit in f_files,
1458 * and that minus the number actually used in f_ffree.
1459 * For f_ffree, report the smaller of the number of object available
1460 * and the number of blocks (each object will take at least a block).
1462 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
1463 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
1464 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
1465 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
1466 statp
->f_type
= ZFS_SUPER_MAGIC
;
1467 statp
->f_namelen
= MAXNAMELEN
- 1;
1470 * We have all of 40 characters to stuff a string here.
1471 * Is there anything useful we could/should provide?
1473 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
1475 if (dmu_objset_projectquota_enabled(zfsvfs
->z_os
) &&
1476 dmu_objset_projectquota_present(zfsvfs
->z_os
)) {
1477 znode_t
*zp
= ITOZ(dentry
->d_inode
);
1479 if (zp
->z_pflags
& ZFS_PROJINHERIT
&& zp
->z_projid
&&
1480 zpl_is_valid_projid(zp
->z_projid
))
1481 err
= zfs_statfs_project(zfsvfs
, zp
, statp
, bshift
);
1489 zfs_root(zfsvfs_t
*zfsvfs
, struct inode
**ipp
)
1496 error
= zfs_zget(zfsvfs
, zfsvfs
->z_root
, &rootzp
);
1498 *ipp
= ZTOI(rootzp
);
1504 #ifdef HAVE_D_PRUNE_ALIASES
1506 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
1507 * To accommodate this we must improvise and manually walk the list of znodes
1508 * attempting to prune dentries in order to be able to drop the inodes.
1510 * To avoid scanning the same znodes multiple times they are always rotated
1511 * to the end of the z_all_znodes list. New znodes are inserted at the
1512 * end of the list so we're always scanning the oldest znodes first.
1515 zfs_prune_aliases(zfsvfs_t
*zfsvfs
, unsigned long nr_to_scan
)
1517 znode_t
**zp_array
, *zp
;
1518 int max_array
= MIN(nr_to_scan
, PAGE_SIZE
* 8 / sizeof (znode_t
*));
1522 zp_array
= kmem_zalloc(max_array
* sizeof (znode_t
*), KM_SLEEP
);
1524 mutex_enter(&zfsvfs
->z_znodes_lock
);
1525 while ((zp
= list_head(&zfsvfs
->z_all_znodes
)) != NULL
) {
1527 if ((i
++ > nr_to_scan
) || (j
>= max_array
))
1530 ASSERT(list_link_active(&zp
->z_link_node
));
1531 list_remove(&zfsvfs
->z_all_znodes
, zp
);
1532 list_insert_tail(&zfsvfs
->z_all_znodes
, zp
);
1534 /* Skip active znodes and .zfs entries */
1535 if (MUTEX_HELD(&zp
->z_lock
) || zp
->z_is_ctldir
)
1538 if (igrab(ZTOI(zp
)) == NULL
)
1544 mutex_exit(&zfsvfs
->z_znodes_lock
);
1546 for (i
= 0; i
< j
; i
++) {
1549 ASSERT3P(zp
, !=, NULL
);
1550 d_prune_aliases(ZTOI(zp
));
1552 if (atomic_read(&ZTOI(zp
)->i_count
) == 1)
1558 kmem_free(zp_array
, max_array
* sizeof (znode_t
*));
1562 #endif /* HAVE_D_PRUNE_ALIASES */
1565 * The ARC has requested that the filesystem drop entries from the dentry
1566 * and inode caches. This can occur when the ARC needs to free meta data
1567 * blocks but can't because they are all pinned by entries in these caches.
1570 zfs_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1572 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1574 #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1575 struct shrinker
*shrinker
= &sb
->s_shrink
;
1576 struct shrink_control sc
= {
1577 .nr_to_scan
= nr_to_scan
,
1578 .gfp_mask
= GFP_KERNEL
,
1584 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
1585 defined(SHRINK_CONTROL_HAS_NID) && \
1586 defined(SHRINKER_NUMA_AWARE)
1587 if (sb
->s_shrink
.flags
& SHRINKER_NUMA_AWARE
) {
1589 for_each_online_node(sc
.nid
) {
1590 *objects
+= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1593 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1596 #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1597 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1598 #elif defined(HAVE_SHRINK)
1599 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1600 #elif defined(HAVE_D_PRUNE_ALIASES)
1601 #define D_PRUNE_ALIASES_IS_DEFAULT
1602 *objects
= zfs_prune_aliases(zfsvfs
, nr_to_scan
);
1604 #error "No available dentry and inode cache pruning mechanism."
1607 #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
1608 #undef D_PRUNE_ALIASES_IS_DEFAULT
1610 * Fall back to zfs_prune_aliases if the kernel's per-superblock
1611 * shrinker couldn't free anything, possibly due to the inodes being
1612 * allocated in a different memcg.
1615 *objects
= zfs_prune_aliases(zfsvfs
, nr_to_scan
);
1620 dprintf_ds(zfsvfs
->z_os
->os_dsl_dataset
,
1621 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1622 nr_to_scan
, *objects
, error
);
1628 * Teardown the zfsvfs_t.
1630 * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
1631 * and 'z_teardown_inactive_lock' held.
1634 zfsvfs_teardown(zfsvfs_t
*zfsvfs
, boolean_t unmounting
)
1639 * If someone has not already unmounted this file system,
1640 * drain the iput_taskq to ensure all active references to the
1641 * zfsvfs_t have been handled only then can it be safely destroyed.
1645 * If we're unmounting we have to wait for the list to
1648 * If we're not unmounting there's no guarantee the list
1649 * will drain completely, but iputs run from the taskq
1650 * may add the parents of dir-based xattrs to the taskq
1651 * so we want to wait for these.
1653 * We can safely read z_nr_znodes without locking because the
1654 * VFS has already blocked operations which add to the
1655 * z_all_znodes list and thus increment z_nr_znodes.
1658 while (zfsvfs
->z_nr_znodes
> 0) {
1659 taskq_wait_outstanding(dsl_pool_iput_taskq(
1660 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1661 if (++round
> 1 && !unmounting
)
1666 rrm_enter(&zfsvfs
->z_teardown_lock
, RW_WRITER
, FTAG
);
1670 * We purge the parent filesystem's super block as the
1671 * parent filesystem and all of its snapshots have their
1672 * inode's super block set to the parent's filesystem's
1673 * super block. Note, 'z_parent' is self referential
1674 * for non-snapshots.
1676 shrink_dcache_sb(zfsvfs
->z_parent
->z_sb
);
1680 * Close the zil. NB: Can't close the zil while zfs_inactive
1681 * threads are blocked as zil_close can call zfs_inactive.
1683 if (zfsvfs
->z_log
) {
1684 zil_close(zfsvfs
->z_log
);
1685 zfsvfs
->z_log
= NULL
;
1688 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_WRITER
);
1691 * If we are not unmounting (ie: online recv) and someone already
1692 * unmounted this file system while we were doing the switcheroo,
1693 * or a reopen of z_os failed then just bail out now.
1695 if (!unmounting
&& (zfsvfs
->z_unmounted
|| zfsvfs
->z_os
== NULL
)) {
1696 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
1697 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
1698 return (SET_ERROR(EIO
));
1702 * At this point there are no VFS ops active, and any new VFS ops
1703 * will fail with EIO since we have z_teardown_lock for writer (only
1704 * relevant for forced unmount).
1706 * Release all holds on dbufs.
1709 mutex_enter(&zfsvfs
->z_znodes_lock
);
1710 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
!= NULL
;
1711 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
1713 zfs_znode_dmu_fini(zp
);
1715 mutex_exit(&zfsvfs
->z_znodes_lock
);
1719 * If we are unmounting, set the unmounted flag and let new VFS ops
1720 * unblock. zfs_inactive will have the unmounted behavior, and all
1721 * other VFS ops will fail with EIO.
1724 zfsvfs
->z_unmounted
= B_TRUE
;
1725 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
1726 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
1730 * z_os will be NULL if there was an error in attempting to reopen
1731 * zfsvfs, so just return as the properties had already been
1733 * unregistered and cached data had been evicted before.
1735 if (zfsvfs
->z_os
== NULL
)
1739 * Unregister properties.
1741 zfs_unregister_callbacks(zfsvfs
);
1746 if (dsl_dataset_is_dirty(dmu_objset_ds(zfsvfs
->z_os
)) &&
1747 !zfs_is_readonly(zfsvfs
))
1748 txg_wait_synced(dmu_objset_pool(zfsvfs
->z_os
), 0);
1749 dmu_objset_evict_dbufs(zfsvfs
->z_os
);
1754 #if !defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER) && \
1755 !defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
1756 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1760 zfs_domount(struct super_block
*sb
, zfs_mnt_t
*zm
, int silent
)
1762 const char *osname
= zm
->mnt_osname
;
1763 struct inode
*root_inode
;
1764 uint64_t recordsize
;
1766 zfsvfs_t
*zfsvfs
= NULL
;
1772 error
= zfsvfs_parse_options(zm
->mnt_data
, &vfs
);
1776 error
= zfsvfs_create(osname
, vfs
->vfs_readonly
, &zfsvfs
);
1778 zfsvfs_vfs_free(vfs
);
1782 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1783 &recordsize
, NULL
))) {
1784 zfsvfs_vfs_free(vfs
);
1788 vfs
->vfs_data
= zfsvfs
;
1789 zfsvfs
->z_vfs
= vfs
;
1791 sb
->s_fs_info
= zfsvfs
;
1792 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1793 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1794 sb
->s_time_gran
= 1;
1795 sb
->s_blocksize
= recordsize
;
1796 sb
->s_blocksize_bits
= ilog2(recordsize
);
1798 error
= -zpl_bdi_setup(sb
, "zfs");
1802 sb
->s_bdi
->ra_pages
= 0;
1804 /* Set callback operations for the file system. */
1805 sb
->s_op
= &zpl_super_operations
;
1806 sb
->s_xattr
= zpl_xattr_handlers
;
1807 sb
->s_export_op
= &zpl_export_operations
;
1809 sb
->s_d_op
= &zpl_dentry_operations
;
1810 #endif /* HAVE_S_D_OP */
1812 /* Set features for file system. */
1813 zfs_set_fuid_feature(zfsvfs
);
1815 if (dmu_objset_is_snapshot(zfsvfs
->z_os
)) {
1818 atime_changed_cb(zfsvfs
, B_FALSE
);
1819 readonly_changed_cb(zfsvfs
, B_TRUE
);
1820 if ((error
= dsl_prop_get_integer(osname
,
1821 "xattr", &pval
, NULL
)))
1823 xattr_changed_cb(zfsvfs
, pval
);
1824 if ((error
= dsl_prop_get_integer(osname
,
1825 "acltype", &pval
, NULL
)))
1827 acltype_changed_cb(zfsvfs
, pval
);
1828 zfsvfs
->z_issnap
= B_TRUE
;
1829 zfsvfs
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1830 zfsvfs
->z_snap_defer_time
= jiffies
;
1832 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1833 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1834 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1836 if ((error
= zfsvfs_setup(zfsvfs
, B_TRUE
)))
1840 /* Allocate a root inode for the filesystem. */
1841 error
= zfs_root(zfsvfs
, &root_inode
);
1843 (void) zfs_umount(sb
);
1847 /* Allocate a root dentry for the filesystem */
1848 sb
->s_root
= d_make_root(root_inode
);
1849 if (sb
->s_root
== NULL
) {
1850 (void) zfs_umount(sb
);
1851 error
= SET_ERROR(ENOMEM
);
1855 if (!zfsvfs
->z_issnap
)
1856 zfsctl_create(zfsvfs
);
1858 zfsvfs
->z_arc_prune
= arc_add_prune_callback(zpl_prune_sb
, sb
);
1861 if (zfsvfs
!= NULL
) {
1862 dmu_objset_disown(zfsvfs
->z_os
, B_TRUE
, zfsvfs
);
1863 zfsvfs_free(zfsvfs
);
1866 * make sure we don't have dangling sb->s_fs_info which
1867 * zfs_preumount will use.
1869 sb
->s_fs_info
= NULL
;
1876 * Called when an unmount is requested and certain sanity checks have
1877 * already passed. At this point no dentries or inodes have been reclaimed
1878 * from their respective caches. We drop the extra reference on the .zfs
1879 * control directory to allow everything to be reclaimed. All snapshots
1880 * must already have been unmounted to reach this point.
1883 zfs_preumount(struct super_block
*sb
)
1885 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1887 /* zfsvfs is NULL when zfs_domount fails during mount */
1889 zfsctl_destroy(sb
->s_fs_info
);
1891 * Wait for iput_async before entering evict_inodes in
1892 * generic_shutdown_super. The reason we must finish before
1893 * evict_inodes is when lazytime is on, or when zfs_purgedir
1894 * calls zfs_zget, iput would bump i_count from 0 to 1. This
1895 * would race with the i_count check in evict_inodes. This means
1896 * it could destroy the inode while we are still using it.
1898 * We wait for two passes. xattr directories in the first pass
1899 * may add xattr entries in zfs_purgedir, so in the second pass
1900 * we wait for them. We don't use taskq_wait here because it is
1901 * a pool wide taskq. Other mounted filesystems can constantly
1902 * do iput_async and there's no guarantee when taskq will be
1905 taskq_wait_outstanding(dsl_pool_iput_taskq(
1906 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1907 taskq_wait_outstanding(dsl_pool_iput_taskq(
1908 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1913 * Called once all other unmount released tear down has occurred.
1914 * It is our responsibility to release any remaining infrastructure.
1918 zfs_umount(struct super_block
*sb
)
1920 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1923 if (zfsvfs
->z_arc_prune
!= NULL
)
1924 arc_remove_prune_callback(zfsvfs
->z_arc_prune
);
1925 VERIFY(zfsvfs_teardown(zfsvfs
, B_TRUE
) == 0);
1927 zpl_bdi_destroy(sb
);
1930 * z_os will be NULL if there was an error in
1931 * attempting to reopen zfsvfs.
1935 * Unset the objset user_ptr.
1937 mutex_enter(&os
->os_user_ptr_lock
);
1938 dmu_objset_set_user(os
, NULL
);
1939 mutex_exit(&os
->os_user_ptr_lock
);
1942 * Finally release the objset
1944 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1947 zfsvfs_free(zfsvfs
);
1952 zfs_remount(struct super_block
*sb
, int *flags
, zfs_mnt_t
*zm
)
1954 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1956 boolean_t issnap
= dmu_objset_is_snapshot(zfsvfs
->z_os
);
1959 if ((issnap
|| !spa_writeable(dmu_objset_spa(zfsvfs
->z_os
))) &&
1960 !(*flags
& MS_RDONLY
)) {
1961 *flags
|= MS_RDONLY
;
1965 error
= zfsvfs_parse_options(zm
->mnt_data
, &vfsp
);
1969 zfs_unregister_callbacks(zfsvfs
);
1970 zfsvfs_vfs_free(zfsvfs
->z_vfs
);
1972 vfsp
->vfs_data
= zfsvfs
;
1973 zfsvfs
->z_vfs
= vfsp
;
1975 (void) zfs_register_callbacks(vfsp
);
1981 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1983 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1985 uint64_t object
= 0;
1986 uint64_t fid_gen
= 0;
1993 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1994 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1996 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1997 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1999 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
2000 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
2002 return (SET_ERROR(EINVAL
));
2005 /* LONG_FID_LEN means snapdirs */
2006 if (fidp
->fid_len
== LONG_FID_LEN
) {
2007 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
2008 uint64_t objsetid
= 0;
2009 uint64_t setgen
= 0;
2011 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
2012 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
2014 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
2015 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
2017 if (objsetid
!= ZFSCTL_INO_SNAPDIRS
- object
) {
2018 dprintf("snapdir fid: objsetid (%llu) != "
2019 "ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
2020 objsetid
, ZFSCTL_INO_SNAPDIRS
, object
);
2022 return (SET_ERROR(EINVAL
));
2025 if (fid_gen
> 1 || setgen
!= 0) {
2026 dprintf("snapdir fid: fid_gen (%llu) and setgen "
2027 "(%llu)\n", fid_gen
, setgen
);
2028 return (SET_ERROR(EINVAL
));
2031 return (zfsctl_snapdir_vget(sb
, objsetid
, fid_gen
, ipp
));
2035 /* A zero fid_gen means we are in the .zfs control directories */
2037 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
2038 *ipp
= zfsvfs
->z_ctldir
;
2039 ASSERT(*ipp
!= NULL
);
2040 if (object
== ZFSCTL_INO_SNAPDIR
) {
2041 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
2042 0, kcred
, NULL
, NULL
) == 0);
2050 gen_mask
= -1ULL >> (64 - 8 * i
);
2052 dprintf("getting %llu [%llu mask %llx]\n", object
, fid_gen
, gen_mask
);
2053 if ((err
= zfs_zget(zfsvfs
, object
, &zp
))) {
2058 /* Don't export xattr stuff */
2059 if (zp
->z_pflags
& ZFS_XATTR
) {
2062 return (SET_ERROR(ENOENT
));
2065 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
), &zp_gen
,
2067 zp_gen
= zp_gen
& gen_mask
;
2070 if ((fid_gen
== 0) && (zfsvfs
->z_root
== object
))
2072 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
2073 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen
,
2077 return (SET_ERROR(ENOENT
));
2082 zfs_inode_update(ITOZ(*ipp
));
2089 * Block out VFS ops and close zfsvfs_t
2091 * Note, if successful, then we return with the 'z_teardown_lock' and
2092 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
2093 * dataset and objset intact so that they can be atomically handed off during
2094 * a subsequent rollback or recv operation and the resume thereafter.
2097 zfs_suspend_fs(zfsvfs_t
*zfsvfs
)
2101 if ((error
= zfsvfs_teardown(zfsvfs
, B_FALSE
)) != 0)
2108 * Rebuild SA and release VOPs. Note that ownership of the underlying dataset
2109 * is an invariant across any of the operations that can be performed while the
2110 * filesystem was suspended. Whether it succeeded or failed, the preconditions
2111 * are the same: the relevant objset and associated dataset are owned by
2112 * zfsvfs, held, and long held on entry.
2115 zfs_resume_fs(zfsvfs_t
*zfsvfs
, dsl_dataset_t
*ds
)
2120 ASSERT(RRM_WRITE_HELD(&zfsvfs
->z_teardown_lock
));
2121 ASSERT(RW_WRITE_HELD(&zfsvfs
->z_teardown_inactive_lock
));
2124 * We already own this, so just update the objset_t, as the one we
2125 * had before may have been evicted.
2128 VERIFY3P(ds
->ds_owner
, ==, zfsvfs
);
2129 VERIFY(dsl_dataset_long_held(ds
));
2130 VERIFY0(dmu_objset_from_ds(ds
, &os
));
2132 err
= zfsvfs_init(zfsvfs
, os
);
2136 VERIFY(zfsvfs_setup(zfsvfs
, B_FALSE
) == 0);
2138 zfs_set_fuid_feature(zfsvfs
);
2139 zfsvfs
->z_rollback_time
= jiffies
;
2142 * Attempt to re-establish all the active inodes with their
2143 * dbufs. If a zfs_rezget() fails, then we unhash the inode
2144 * and mark it stale. This prevents a collision if a new
2145 * inode/object is created which must use the same inode
2146 * number. The stale inode will be be released when the
2147 * VFS prunes the dentry holding the remaining references
2148 * on the stale inode.
2150 mutex_enter(&zfsvfs
->z_znodes_lock
);
2151 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
;
2152 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
2153 err2
= zfs_rezget(zp
);
2155 remove_inode_hash(ZTOI(zp
));
2156 zp
->z_is_stale
= B_TRUE
;
2159 mutex_exit(&zfsvfs
->z_znodes_lock
);
2162 /* release the VFS ops */
2163 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
2164 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
2168 * Since we couldn't setup the sa framework, try to force
2169 * unmount this file system.
2172 (void) zfs_umount(zfsvfs
->z_sb
);
2178 zfs_set_version(zfsvfs_t
*zfsvfs
, uint64_t newvers
)
2181 objset_t
*os
= zfsvfs
->z_os
;
2184 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
2185 return (SET_ERROR(EINVAL
));
2187 if (newvers
< zfsvfs
->z_version
)
2188 return (SET_ERROR(EINVAL
));
2190 if (zfs_spa_version_map(newvers
) >
2191 spa_version(dmu_objset_spa(zfsvfs
->z_os
)))
2192 return (SET_ERROR(ENOTSUP
));
2194 tx
= dmu_tx_create(os
);
2195 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
2196 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
2197 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
2199 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
2201 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2207 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
2208 8, 1, &newvers
, tx
);
2215 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
2218 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs
->z_os
)), >=,
2220 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
2221 DMU_OT_NONE
, 0, tx
);
2223 error
= zap_add(os
, MASTER_NODE_OBJ
,
2224 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
2227 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
2228 sa_register_update_callback(os
, zfs_sa_upgrade
);
2231 spa_history_log_internal_ds(dmu_objset_ds(os
), "upgrade", tx
,
2232 "from %llu to %llu", zfsvfs
->z_version
, newvers
);
2236 zfsvfs
->z_version
= newvers
;
2237 os
->os_version
= newvers
;
2239 zfs_set_fuid_feature(zfsvfs
);
2245 * Read a property stored within the master node.
2248 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
2250 uint64_t *cached_copy
= NULL
;
2253 * Figure out where in the objset_t the cached copy would live, if it
2254 * is available for the requested property.
2258 case ZFS_PROP_VERSION
:
2259 cached_copy
= &os
->os_version
;
2261 case ZFS_PROP_NORMALIZE
:
2262 cached_copy
= &os
->os_normalization
;
2264 case ZFS_PROP_UTF8ONLY
:
2265 cached_copy
= &os
->os_utf8only
;
2268 cached_copy
= &os
->os_casesensitivity
;
2274 if (cached_copy
!= NULL
&& *cached_copy
!= OBJSET_PROP_UNINITIALIZED
) {
2275 *value
= *cached_copy
;
2280 * If the property wasn't cached, look up the file system's value for
2281 * the property. For the version property, we look up a slightly
2286 if (prop
== ZFS_PROP_VERSION
)
2287 pname
= ZPL_VERSION_STR
;
2289 pname
= zfs_prop_to_name(prop
);
2292 ASSERT3U(os
->os_phys
->os_type
, ==, DMU_OST_ZFS
);
2293 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
2296 if (error
== ENOENT
) {
2297 /* No value set, use the default value */
2299 case ZFS_PROP_VERSION
:
2300 *value
= ZPL_VERSION
;
2302 case ZFS_PROP_NORMALIZE
:
2303 case ZFS_PROP_UTF8ONLY
:
2307 *value
= ZFS_CASE_SENSITIVE
;
2309 case ZFS_PROP_ACLTYPE
:
2310 *value
= ZFS_ACLTYPE_OFF
;
2319 * If one of the methods for getting the property value above worked,
2320 * copy it into the objset_t's cache.
2322 if (error
== 0 && cached_copy
!= NULL
) {
2323 *cached_copy
= *value
;
2330 * Return true if the coresponding vfs's unmounted flag is set.
2331 * Otherwise return false.
2332 * If this function returns true we know VFS unmount has been initiated.
2335 zfs_get_vfs_flag_unmounted(objset_t
*os
)
2338 boolean_t unmounted
= B_FALSE
;
2340 ASSERT(dmu_objset_type(os
) == DMU_OST_ZFS
);
2342 mutex_enter(&os
->os_user_ptr_lock
);
2343 zfvp
= dmu_objset_get_user(os
);
2344 if (zfvp
!= NULL
&& zfvp
->z_unmounted
)
2346 mutex_exit(&os
->os_user_ptr_lock
);
2356 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
2357 register_filesystem(&zpl_fs_type
);
2364 * we don't use outstanding because zpl_posix_acl_free might add more.
2366 taskq_wait(system_delay_taskq
);
2367 taskq_wait(system_taskq
);
2368 unregister_filesystem(&zpl_fs_type
);
2373 #if defined(_KERNEL)
2374 EXPORT_SYMBOL(zfs_suspend_fs
);
2375 EXPORT_SYMBOL(zfs_resume_fs
);
2376 EXPORT_SYMBOL(zfs_userspace_one
);
2377 EXPORT_SYMBOL(zfs_userspace_many
);
2378 EXPORT_SYMBOL(zfs_set_userquota
);
2379 EXPORT_SYMBOL(zfs_id_overblockquota
);
2380 EXPORT_SYMBOL(zfs_id_overobjquota
);
2381 EXPORT_SYMBOL(zfs_id_overquota
);
2382 EXPORT_SYMBOL(zfs_set_version
);
2383 EXPORT_SYMBOL(zfsvfs_create
);
2384 EXPORT_SYMBOL(zfsvfs_free
);
2385 EXPORT_SYMBOL(zfs_is_readonly
);
2386 EXPORT_SYMBOL(zfs_domount
);
2387 EXPORT_SYMBOL(zfs_preumount
);
2388 EXPORT_SYMBOL(zfs_umount
);
2389 EXPORT_SYMBOL(zfs_remount
);
2390 EXPORT_SYMBOL(zfs_statvfs
);
2391 EXPORT_SYMBOL(zfs_vget
);
2392 EXPORT_SYMBOL(zfs_prune
);