4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
33 #include <sys/pathname.h>
34 #include <sys/vnode.h>
36 #include <sys/vfs_opreg.h>
37 #include <sys/mntent.h>
38 #include <sys/mount.h>
39 #include <sys/cmn_err.h>
40 #include "fs/fs_subr.h"
41 #include <sys/zfs_znode.h>
42 #include <sys/zfs_vnops.h>
43 #include <sys/zfs_dir.h>
45 #include <sys/fs/zfs.h>
47 #include <sys/dsl_prop.h>
48 #include <sys/dsl_dataset.h>
49 #include <sys/dsl_deleg.h>
53 #include <sys/sa_impl.h>
54 #include <sys/varargs.h>
55 #include <sys/policy.h>
56 #include <sys/atomic.h>
57 #include <sys/mkdev.h>
58 #include <sys/modctl.h>
59 #include <sys/refstr.h>
60 #include <sys/zfs_ioctl.h>
61 #include <sys/zfs_ctldir.h>
62 #include <sys/zfs_fuid.h>
63 #include <sys/bootconf.h>
64 #include <sys/sunddi.h>
66 #include <sys/dmu_objset.h>
67 #include <sys/spa_boot.h>
69 #include "zfs_comutil.h"
94 static const match_table_t zpl_tokens
= {
95 { TOKEN_RO
, MNTOPT_RO
},
96 { TOKEN_RW
, MNTOPT_RW
},
97 { TOKEN_SETUID
, MNTOPT_SETUID
},
98 { TOKEN_NOSETUID
, MNTOPT_NOSETUID
},
99 { TOKEN_EXEC
, MNTOPT_EXEC
},
100 { TOKEN_NOEXEC
, MNTOPT_NOEXEC
},
101 { TOKEN_DEVICES
, MNTOPT_DEVICES
},
102 { TOKEN_NODEVICES
, MNTOPT_NODEVICES
},
103 { TOKEN_DIRXATTR
, MNTOPT_DIRXATTR
},
104 { TOKEN_SAXATTR
, MNTOPT_SAXATTR
},
105 { TOKEN_XATTR
, MNTOPT_XATTR
},
106 { TOKEN_NOXATTR
, MNTOPT_NOXATTR
},
107 { TOKEN_ATIME
, MNTOPT_ATIME
},
108 { TOKEN_NOATIME
, MNTOPT_NOATIME
},
109 { TOKEN_RELATIME
, MNTOPT_RELATIME
},
110 { TOKEN_NORELATIME
, MNTOPT_NORELATIME
},
111 { TOKEN_NBMAND
, MNTOPT_NBMAND
},
112 { TOKEN_NONBMAND
, MNTOPT_NONBMAND
},
113 { TOKEN_MNTPOINT
, MNTOPT_MNTPOINT
"=%s" },
114 { TOKEN_LAST
, NULL
},
118 zfsvfs_vfs_free(vfs_t
*vfsp
)
121 if (vfsp
->vfs_mntpoint
!= NULL
)
122 strfree(vfsp
->vfs_mntpoint
);
124 kmem_free(vfsp
, sizeof (vfs_t
));
129 zfsvfs_parse_option(char *option
, int token
, substring_t
*args
, vfs_t
*vfsp
)
133 vfsp
->vfs_readonly
= B_TRUE
;
134 vfsp
->vfs_do_readonly
= B_TRUE
;
137 vfsp
->vfs_readonly
= B_FALSE
;
138 vfsp
->vfs_do_readonly
= B_TRUE
;
141 vfsp
->vfs_setuid
= B_TRUE
;
142 vfsp
->vfs_do_setuid
= B_TRUE
;
145 vfsp
->vfs_setuid
= B_FALSE
;
146 vfsp
->vfs_do_setuid
= B_TRUE
;
149 vfsp
->vfs_exec
= B_TRUE
;
150 vfsp
->vfs_do_exec
= B_TRUE
;
153 vfsp
->vfs_exec
= B_FALSE
;
154 vfsp
->vfs_do_exec
= B_TRUE
;
157 vfsp
->vfs_devices
= B_TRUE
;
158 vfsp
->vfs_do_devices
= B_TRUE
;
160 case TOKEN_NODEVICES
:
161 vfsp
->vfs_devices
= B_FALSE
;
162 vfsp
->vfs_do_devices
= B_TRUE
;
165 vfsp
->vfs_xattr
= ZFS_XATTR_DIR
;
166 vfsp
->vfs_do_xattr
= B_TRUE
;
169 vfsp
->vfs_xattr
= ZFS_XATTR_SA
;
170 vfsp
->vfs_do_xattr
= B_TRUE
;
173 vfsp
->vfs_xattr
= ZFS_XATTR_DIR
;
174 vfsp
->vfs_do_xattr
= B_TRUE
;
177 vfsp
->vfs_xattr
= ZFS_XATTR_OFF
;
178 vfsp
->vfs_do_xattr
= B_TRUE
;
181 vfsp
->vfs_atime
= B_TRUE
;
182 vfsp
->vfs_do_atime
= B_TRUE
;
185 vfsp
->vfs_atime
= B_FALSE
;
186 vfsp
->vfs_do_atime
= B_TRUE
;
189 vfsp
->vfs_relatime
= B_TRUE
;
190 vfsp
->vfs_do_relatime
= B_TRUE
;
192 case TOKEN_NORELATIME
:
193 vfsp
->vfs_relatime
= B_FALSE
;
194 vfsp
->vfs_do_relatime
= B_TRUE
;
197 vfsp
->vfs_nbmand
= B_TRUE
;
198 vfsp
->vfs_do_nbmand
= B_TRUE
;
201 vfsp
->vfs_nbmand
= B_FALSE
;
202 vfsp
->vfs_do_nbmand
= B_TRUE
;
205 vfsp
->vfs_mntpoint
= match_strdup(&args
[0]);
206 if (vfsp
->vfs_mntpoint
== NULL
)
207 return (SET_ERROR(ENOMEM
));
218 * Parse the raw mntopts and return a vfs_t describing the options.
221 zfsvfs_parse_options(char *mntopts
, vfs_t
**vfsp
)
226 tmp_vfsp
= kmem_zalloc(sizeof (vfs_t
), KM_SLEEP
);
228 if (mntopts
!= NULL
) {
229 substring_t args
[MAX_OPT_ARGS
];
230 char *tmp_mntopts
, *p
, *t
;
233 tmp_mntopts
= t
= strdup(mntopts
);
234 if (tmp_mntopts
== NULL
)
235 return (SET_ERROR(ENOMEM
));
237 while ((p
= strsep(&t
, ",")) != NULL
) {
241 args
[0].to
= args
[0].from
= NULL
;
242 token
= match_token(p
, zpl_tokens
, args
);
243 error
= zfsvfs_parse_option(p
, token
, args
, tmp_vfsp
);
245 strfree(tmp_mntopts
);
246 zfsvfs_vfs_free(tmp_vfsp
);
251 strfree(tmp_mntopts
);
260 zfs_is_readonly(zfsvfs_t
*zfsvfs
)
262 return (!!(zfsvfs
->z_sb
->s_flags
& MS_RDONLY
));
267 zfs_sync(struct super_block
*sb
, int wait
, cred_t
*cr
)
269 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
272 * Data integrity is job one. We don't want a compromised kernel
273 * writing to the storage pool, so we never sync during panic.
275 if (unlikely(oops_in_progress
))
279 * Semantically, the only requirement is that the sync be initiated.
280 * The DMU syncs out txgs frequently, so there's nothing to do.
285 if (zfsvfs
!= NULL
) {
287 * Sync a specific filesystem.
292 dp
= dmu_objset_pool(zfsvfs
->z_os
);
295 * If the system is shutting down, then skip any
296 * filesystems which may exist on a suspended pool.
298 if (spa_suspended(dp
->dp_spa
)) {
303 if (zfsvfs
->z_log
!= NULL
)
304 zil_commit(zfsvfs
->z_log
, 0);
309 * Sync all ZFS filesystems. This is what happens when you
310 * run sync(1M). Unlike other filesystems, ZFS honors the
311 * request by waiting for all pools to commit all dirty data.
320 atime_changed_cb(void *arg
, uint64_t newval
)
322 ((zfsvfs_t
*)arg
)->z_atime
= newval
;
326 relatime_changed_cb(void *arg
, uint64_t newval
)
328 ((zfsvfs_t
*)arg
)->z_relatime
= newval
;
332 xattr_changed_cb(void *arg
, uint64_t newval
)
334 zfsvfs_t
*zfsvfs
= arg
;
336 if (newval
== ZFS_XATTR_OFF
) {
337 zfsvfs
->z_flags
&= ~ZSB_XATTR
;
339 zfsvfs
->z_flags
|= ZSB_XATTR
;
341 if (newval
== ZFS_XATTR_SA
)
342 zfsvfs
->z_xattr_sa
= B_TRUE
;
344 zfsvfs
->z_xattr_sa
= B_FALSE
;
349 acltype_changed_cb(void *arg
, uint64_t newval
)
351 zfsvfs_t
*zfsvfs
= arg
;
354 case ZFS_ACLTYPE_OFF
:
355 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_OFF
;
356 zfsvfs
->z_sb
->s_flags
&= ~MS_POSIXACL
;
358 case ZFS_ACLTYPE_POSIXACL
:
359 #ifdef CONFIG_FS_POSIX_ACL
360 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_POSIXACL
;
361 zfsvfs
->z_sb
->s_flags
|= MS_POSIXACL
;
363 zfsvfs
->z_acl_type
= ZFS_ACLTYPE_OFF
;
364 zfsvfs
->z_sb
->s_flags
&= ~MS_POSIXACL
;
365 #endif /* CONFIG_FS_POSIX_ACL */
373 blksz_changed_cb(void *arg
, uint64_t newval
)
375 zfsvfs_t
*zfsvfs
= arg
;
376 ASSERT3U(newval
, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs
->z_os
)));
377 ASSERT3U(newval
, >=, SPA_MINBLOCKSIZE
);
378 ASSERT(ISP2(newval
));
380 zfsvfs
->z_max_blksz
= newval
;
384 readonly_changed_cb(void *arg
, uint64_t newval
)
386 zfsvfs_t
*zfsvfs
= arg
;
387 struct super_block
*sb
= zfsvfs
->z_sb
;
393 sb
->s_flags
|= MS_RDONLY
;
395 sb
->s_flags
&= ~MS_RDONLY
;
399 devices_changed_cb(void *arg
, uint64_t newval
)
404 setuid_changed_cb(void *arg
, uint64_t newval
)
409 exec_changed_cb(void *arg
, uint64_t newval
)
414 nbmand_changed_cb(void *arg
, uint64_t newval
)
416 zfsvfs_t
*zfsvfs
= arg
;
417 struct super_block
*sb
= zfsvfs
->z_sb
;
423 sb
->s_flags
|= MS_MANDLOCK
;
425 sb
->s_flags
&= ~MS_MANDLOCK
;
429 snapdir_changed_cb(void *arg
, uint64_t newval
)
431 ((zfsvfs_t
*)arg
)->z_show_ctldir
= newval
;
435 vscan_changed_cb(void *arg
, uint64_t newval
)
437 ((zfsvfs_t
*)arg
)->z_vscan
= newval
;
441 acl_inherit_changed_cb(void *arg
, uint64_t newval
)
443 ((zfsvfs_t
*)arg
)->z_acl_inherit
= newval
;
447 zfs_register_callbacks(vfs_t
*vfsp
)
449 struct dsl_dataset
*ds
= NULL
;
451 zfsvfs_t
*zfsvfs
= NULL
;
455 zfsvfs
= vfsp
->vfs_data
;
460 * The act of registering our callbacks will destroy any mount
461 * options we may have. In order to enable temporary overrides
462 * of mount options, we stash away the current values and
463 * restore them after we register the callbacks.
465 if (zfs_is_readonly(zfsvfs
) || !spa_writeable(dmu_objset_spa(os
))) {
466 vfsp
->vfs_do_readonly
= B_TRUE
;
467 vfsp
->vfs_readonly
= B_TRUE
;
471 * Register property callbacks.
473 * It would probably be fine to just check for i/o error from
474 * the first prop_register(), but I guess I like to go
477 ds
= dmu_objset_ds(os
);
478 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
479 error
= dsl_prop_register(ds
,
480 zfs_prop_to_name(ZFS_PROP_ATIME
), atime_changed_cb
, zfsvfs
);
481 error
= error
? error
: dsl_prop_register(ds
,
482 zfs_prop_to_name(ZFS_PROP_RELATIME
), relatime_changed_cb
, zfsvfs
);
483 error
= error
? error
: dsl_prop_register(ds
,
484 zfs_prop_to_name(ZFS_PROP_XATTR
), xattr_changed_cb
, zfsvfs
);
485 error
= error
? error
: dsl_prop_register(ds
,
486 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), blksz_changed_cb
, zfsvfs
);
487 error
= error
? error
: dsl_prop_register(ds
,
488 zfs_prop_to_name(ZFS_PROP_READONLY
), readonly_changed_cb
, zfsvfs
);
489 error
= error
? error
: dsl_prop_register(ds
,
490 zfs_prop_to_name(ZFS_PROP_DEVICES
), devices_changed_cb
, zfsvfs
);
491 error
= error
? error
: dsl_prop_register(ds
,
492 zfs_prop_to_name(ZFS_PROP_SETUID
), setuid_changed_cb
, zfsvfs
);
493 error
= error
? error
: dsl_prop_register(ds
,
494 zfs_prop_to_name(ZFS_PROP_EXEC
), exec_changed_cb
, zfsvfs
);
495 error
= error
? error
: dsl_prop_register(ds
,
496 zfs_prop_to_name(ZFS_PROP_SNAPDIR
), snapdir_changed_cb
, zfsvfs
);
497 error
= error
? error
: dsl_prop_register(ds
,
498 zfs_prop_to_name(ZFS_PROP_ACLTYPE
), acltype_changed_cb
, zfsvfs
);
499 error
= error
? error
: dsl_prop_register(ds
,
500 zfs_prop_to_name(ZFS_PROP_ACLINHERIT
), acl_inherit_changed_cb
,
502 error
= error
? error
: dsl_prop_register(ds
,
503 zfs_prop_to_name(ZFS_PROP_VSCAN
), vscan_changed_cb
, zfsvfs
);
504 error
= error
? error
: dsl_prop_register(ds
,
505 zfs_prop_to_name(ZFS_PROP_NBMAND
), nbmand_changed_cb
, zfsvfs
);
506 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
511 * Invoke our callbacks to restore temporary mount options.
513 if (vfsp
->vfs_do_readonly
)
514 readonly_changed_cb(zfsvfs
, vfsp
->vfs_readonly
);
515 if (vfsp
->vfs_do_setuid
)
516 setuid_changed_cb(zfsvfs
, vfsp
->vfs_setuid
);
517 if (vfsp
->vfs_do_exec
)
518 exec_changed_cb(zfsvfs
, vfsp
->vfs_exec
);
519 if (vfsp
->vfs_do_devices
)
520 devices_changed_cb(zfsvfs
, vfsp
->vfs_devices
);
521 if (vfsp
->vfs_do_xattr
)
522 xattr_changed_cb(zfsvfs
, vfsp
->vfs_xattr
);
523 if (vfsp
->vfs_do_atime
)
524 atime_changed_cb(zfsvfs
, vfsp
->vfs_atime
);
525 if (vfsp
->vfs_do_relatime
)
526 relatime_changed_cb(zfsvfs
, vfsp
->vfs_relatime
);
527 if (vfsp
->vfs_do_nbmand
)
528 nbmand_changed_cb(zfsvfs
, vfsp
->vfs_nbmand
);
533 dsl_prop_unregister_all(ds
, zfsvfs
);
538 zfs_space_delta_cb(dmu_object_type_t bonustype
, void *data
,
539 uint64_t *userp
, uint64_t *groupp
)
542 * Is it a valid type of object to track?
544 if (bonustype
!= DMU_OT_ZNODE
&& bonustype
!= DMU_OT_SA
)
545 return (SET_ERROR(ENOENT
));
548 * If we have a NULL data pointer
549 * then assume the id's aren't changing and
550 * return EEXIST to the dmu to let it know to
554 return (SET_ERROR(EEXIST
));
556 if (bonustype
== DMU_OT_ZNODE
) {
557 znode_phys_t
*znp
= data
;
558 *userp
= znp
->zp_uid
;
559 *groupp
= znp
->zp_gid
;
562 sa_hdr_phys_t
*sap
= data
;
563 sa_hdr_phys_t sa
= *sap
;
564 boolean_t swap
= B_FALSE
;
566 ASSERT(bonustype
== DMU_OT_SA
);
568 if (sa
.sa_magic
== 0) {
570 * This should only happen for newly created
571 * files that haven't had the znode data filled
578 if (sa
.sa_magic
== BSWAP_32(SA_MAGIC
)) {
579 sa
.sa_magic
= SA_MAGIC
;
580 sa
.sa_layout_info
= BSWAP_16(sa
.sa_layout_info
);
583 VERIFY3U(sa
.sa_magic
, ==, SA_MAGIC
);
586 hdrsize
= sa_hdrsize(&sa
);
587 VERIFY3U(hdrsize
, >=, sizeof (sa_hdr_phys_t
));
588 *userp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
590 *groupp
= *((uint64_t *)((uintptr_t)data
+ hdrsize
+
593 *userp
= BSWAP_64(*userp
);
594 *groupp
= BSWAP_64(*groupp
);
601 fuidstr_to_sid(zfsvfs_t
*zfsvfs
, const char *fuidstr
,
602 char *domainbuf
, int buflen
, uid_t
*ridp
)
607 fuid
= zfs_strtonum(fuidstr
, NULL
);
609 domain
= zfs_fuid_find_by_idx(zfsvfs
, FUID_INDEX(fuid
));
611 (void) strlcpy(domainbuf
, domain
, buflen
);
614 *ridp
= FUID_RID(fuid
);
618 zfs_userquota_prop_to_obj(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
)
621 case ZFS_PROP_USERUSED
:
622 case ZFS_PROP_USEROBJUSED
:
623 return (DMU_USERUSED_OBJECT
);
624 case ZFS_PROP_GROUPUSED
:
625 case ZFS_PROP_GROUPOBJUSED
:
626 return (DMU_GROUPUSED_OBJECT
);
627 case ZFS_PROP_USERQUOTA
:
628 return (zfsvfs
->z_userquota_obj
);
629 case ZFS_PROP_GROUPQUOTA
:
630 return (zfsvfs
->z_groupquota_obj
);
631 case ZFS_PROP_USEROBJQUOTA
:
632 return (zfsvfs
->z_userobjquota_obj
);
633 case ZFS_PROP_GROUPOBJQUOTA
:
634 return (zfsvfs
->z_groupobjquota_obj
);
636 return (ZFS_NO_OBJECT
);
641 zfs_userspace_many(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
642 uint64_t *cookiep
, void *vbuf
, uint64_t *bufsizep
)
647 zfs_useracct_t
*buf
= vbuf
;
651 if (!dmu_objset_userspace_present(zfsvfs
->z_os
))
652 return (SET_ERROR(ENOTSUP
));
654 if ((type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
655 type
== ZFS_PROP_USEROBJQUOTA
|| type
== ZFS_PROP_GROUPOBJQUOTA
) &&
656 !dmu_objset_userobjspace_present(zfsvfs
->z_os
))
657 return (SET_ERROR(ENOTSUP
));
659 obj
= zfs_userquota_prop_to_obj(zfsvfs
, type
);
660 if (obj
== ZFS_NO_OBJECT
) {
665 if (type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
)
666 offset
= DMU_OBJACCT_PREFIX_LEN
;
668 for (zap_cursor_init_serialized(&zc
, zfsvfs
->z_os
, obj
, *cookiep
);
669 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
670 zap_cursor_advance(&zc
)) {
671 if ((uintptr_t)buf
- (uintptr_t)vbuf
+ sizeof (zfs_useracct_t
) >
676 * skip object quota (with zap name prefix DMU_OBJACCT_PREFIX)
677 * when dealing with block quota and vice versa.
679 if ((offset
> 0) != (strncmp(za
.za_name
, DMU_OBJACCT_PREFIX
,
680 DMU_OBJACCT_PREFIX_LEN
) == 0))
683 fuidstr_to_sid(zfsvfs
, za
.za_name
+ offset
,
684 buf
->zu_domain
, sizeof (buf
->zu_domain
), &buf
->zu_rid
);
686 buf
->zu_space
= za
.za_first_integer
;
692 ASSERT3U((uintptr_t)buf
- (uintptr_t)vbuf
, <=, *bufsizep
);
693 *bufsizep
= (uintptr_t)buf
- (uintptr_t)vbuf
;
694 *cookiep
= zap_cursor_serialize(&zc
);
695 zap_cursor_fini(&zc
);
700 * buf must be big enough (eg, 32 bytes)
703 id_to_fuidstr(zfsvfs_t
*zfsvfs
, const char *domain
, uid_t rid
,
704 char *buf
, boolean_t addok
)
709 if (domain
&& domain
[0]) {
710 domainid
= zfs_fuid_find_by_domain(zfsvfs
, domain
, NULL
, addok
);
712 return (SET_ERROR(ENOENT
));
714 fuid
= FUID_ENCODE(domainid
, rid
);
715 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
720 zfs_userspace_one(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
721 const char *domain
, uint64_t rid
, uint64_t *valp
)
723 char buf
[20 + DMU_OBJACCT_PREFIX_LEN
];
730 if (!dmu_objset_userspace_present(zfsvfs
->z_os
))
731 return (SET_ERROR(ENOTSUP
));
733 if ((type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
||
734 type
== ZFS_PROP_USEROBJQUOTA
|| type
== ZFS_PROP_GROUPOBJQUOTA
) &&
735 !dmu_objset_userobjspace_present(zfsvfs
->z_os
))
736 return (SET_ERROR(ENOTSUP
));
738 obj
= zfs_userquota_prop_to_obj(zfsvfs
, type
);
739 if (obj
== ZFS_NO_OBJECT
)
742 if (type
== ZFS_PROP_USEROBJUSED
|| type
== ZFS_PROP_GROUPOBJUSED
) {
743 strlcpy(buf
, DMU_OBJACCT_PREFIX
, DMU_OBJACCT_PREFIX_LEN
);
744 offset
= DMU_OBJACCT_PREFIX_LEN
;
747 err
= id_to_fuidstr(zfsvfs
, domain
, rid
, buf
+ offset
, B_FALSE
);
751 err
= zap_lookup(zfsvfs
->z_os
, obj
, buf
, 8, 1, valp
);
758 zfs_set_userquota(zfsvfs_t
*zfsvfs
, zfs_userquota_prop_t type
,
759 const char *domain
, uint64_t rid
, uint64_t quota
)
765 boolean_t fuid_dirtied
;
767 if (zfsvfs
->z_version
< ZPL_VERSION_USERSPACE
)
768 return (SET_ERROR(ENOTSUP
));
771 case ZFS_PROP_USERQUOTA
:
772 objp
= &zfsvfs
->z_userquota_obj
;
774 case ZFS_PROP_GROUPQUOTA
:
775 objp
= &zfsvfs
->z_groupquota_obj
;
777 case ZFS_PROP_USEROBJQUOTA
:
778 objp
= &zfsvfs
->z_userobjquota_obj
;
780 case ZFS_PROP_GROUPOBJQUOTA
:
781 objp
= &zfsvfs
->z_groupobjquota_obj
;
784 return (SET_ERROR(EINVAL
));
787 err
= id_to_fuidstr(zfsvfs
, domain
, rid
, buf
, B_TRUE
);
790 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
792 tx
= dmu_tx_create(zfsvfs
->z_os
);
793 dmu_tx_hold_zap(tx
, *objp
? *objp
: DMU_NEW_OBJECT
, B_TRUE
, NULL
);
795 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
796 zfs_userquota_prop_prefixes
[type
]);
799 zfs_fuid_txhold(zfsvfs
, tx
);
800 err
= dmu_tx_assign(tx
, TXG_WAIT
);
806 mutex_enter(&zfsvfs
->z_lock
);
808 *objp
= zap_create(zfsvfs
->z_os
, DMU_OT_USERGROUP_QUOTA
,
810 VERIFY(0 == zap_add(zfsvfs
->z_os
, MASTER_NODE_OBJ
,
811 zfs_userquota_prop_prefixes
[type
], 8, 1, objp
, tx
));
813 mutex_exit(&zfsvfs
->z_lock
);
816 err
= zap_remove(zfsvfs
->z_os
, *objp
, buf
, tx
);
820 err
= zap_update(zfsvfs
->z_os
, *objp
, buf
, 8, 1, "a
, tx
);
824 zfs_fuid_sync(zfsvfs
, tx
);
830 zfs_fuid_overobjquota(zfsvfs_t
*zfsvfs
, boolean_t isgroup
, uint64_t fuid
)
832 char buf
[20 + DMU_OBJACCT_PREFIX_LEN
];
833 uint64_t used
, quota
, usedobj
, quotaobj
;
836 if (!dmu_objset_userobjspace_present(zfsvfs
->z_os
)) {
837 if (dmu_objset_userobjspace_upgradable(zfsvfs
->z_os
))
838 dmu_objset_userobjspace_upgrade(zfsvfs
->z_os
);
842 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
843 quotaobj
= isgroup
? zfsvfs
->z_groupobjquota_obj
:
844 zfsvfs
->z_userobjquota_obj
;
845 if (quotaobj
== 0 || zfsvfs
->z_replay
)
848 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
849 err
= zap_lookup(zfsvfs
->z_os
, quotaobj
, buf
, 8, 1, "a
);
853 (void) sprintf(buf
, DMU_OBJACCT_PREFIX
"%llx", (longlong_t
)fuid
);
854 err
= zap_lookup(zfsvfs
->z_os
, usedobj
, buf
, 8, 1, &used
);
857 return (used
>= quota
);
861 zfs_fuid_overquota(zfsvfs_t
*zfsvfs
, boolean_t isgroup
, uint64_t fuid
)
864 uint64_t used
, quota
, usedobj
, quotaobj
;
867 usedobj
= isgroup
? DMU_GROUPUSED_OBJECT
: DMU_USERUSED_OBJECT
;
868 quotaobj
= isgroup
? zfsvfs
->z_groupquota_obj
: zfsvfs
->z_userquota_obj
;
870 if (quotaobj
== 0 || zfsvfs
->z_replay
)
873 (void) sprintf(buf
, "%llx", (longlong_t
)fuid
);
874 err
= zap_lookup(zfsvfs
->z_os
, quotaobj
, buf
, 8, 1, "a
);
878 err
= zap_lookup(zfsvfs
->z_os
, usedobj
, buf
, 8, 1, &used
);
881 return (used
>= quota
);
885 zfs_owner_overquota(zfsvfs_t
*zfsvfs
, znode_t
*zp
, boolean_t isgroup
)
889 struct inode
*ip
= ZTOI(zp
);
891 quotaobj
= isgroup
? zfsvfs
->z_groupquota_obj
: zfsvfs
->z_userquota_obj
;
893 fuid
= isgroup
? KGID_TO_SGID(ip
->i_gid
) : KUID_TO_SUID(ip
->i_uid
);
895 if (quotaobj
== 0 || zfsvfs
->z_replay
)
898 return (zfs_fuid_overquota(zfsvfs
, isgroup
, fuid
));
902 * Associate this zfsvfs with the given objset, which must be owned.
903 * This will cache a bunch of on-disk state from the objset in the
907 zfsvfs_init(zfsvfs_t
*zfsvfs
, objset_t
*os
)
912 zfsvfs
->z_max_blksz
= SPA_OLD_MAXBLOCKSIZE
;
913 zfsvfs
->z_show_ctldir
= ZFS_SNAPDIR_VISIBLE
;
916 error
= zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &zfsvfs
->z_version
);
919 if (zfsvfs
->z_version
>
920 zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)))) {
921 (void) printk("Can't mount a version %lld file system "
922 "on a version %lld pool\n. Pool must be upgraded to mount "
923 "this file system.", (u_longlong_t
)zfsvfs
->z_version
,
924 (u_longlong_t
)spa_version(dmu_objset_spa(os
)));
925 return (SET_ERROR(ENOTSUP
));
927 error
= zfs_get_zplprop(os
, ZFS_PROP_NORMALIZE
, &val
);
930 zfsvfs
->z_norm
= (int)val
;
932 error
= zfs_get_zplprop(os
, ZFS_PROP_UTF8ONLY
, &val
);
935 zfsvfs
->z_utf8
= (val
!= 0);
937 error
= zfs_get_zplprop(os
, ZFS_PROP_CASE
, &val
);
940 zfsvfs
->z_case
= (uint_t
)val
;
942 if ((error
= zfs_get_zplprop(os
, ZFS_PROP_ACLTYPE
, &val
)) != 0)
944 zfsvfs
->z_acl_type
= (uint_t
)val
;
947 * Fold case on file systems that are always or sometimes case
950 if (zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
951 zfsvfs
->z_case
== ZFS_CASE_MIXED
)
952 zfsvfs
->z_norm
|= U8_TEXTPREP_TOUPPER
;
954 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
955 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
958 if (zfsvfs
->z_use_sa
) {
959 /* should either have both of these objects or none */
960 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1,
965 error
= zfs_get_zplprop(os
, ZFS_PROP_XATTR
, &val
);
966 if ((error
== 0) && (val
== ZFS_XATTR_SA
))
967 zfsvfs
->z_xattr_sa
= B_TRUE
;
970 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
971 &zfsvfs
->z_attr_table
);
975 if (zfsvfs
->z_version
>= ZPL_VERSION_SA
)
976 sa_register_update_callback(os
, zfs_sa_upgrade
);
978 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_ROOT_OBJ
, 8, 1,
982 ASSERT(zfsvfs
->z_root
!= 0);
984 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_UNLINKED_SET
, 8, 1,
985 &zfsvfs
->z_unlinkedobj
);
989 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
990 zfs_userquota_prop_prefixes
[ZFS_PROP_USERQUOTA
],
991 8, 1, &zfsvfs
->z_userquota_obj
);
993 zfsvfs
->z_userquota_obj
= 0;
997 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
998 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPQUOTA
],
999 8, 1, &zfsvfs
->z_groupquota_obj
);
1000 if (error
== ENOENT
)
1001 zfsvfs
->z_groupquota_obj
= 0;
1002 else if (error
!= 0)
1005 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1006 zfs_userquota_prop_prefixes
[ZFS_PROP_USEROBJQUOTA
],
1007 8, 1, &zfsvfs
->z_userobjquota_obj
);
1008 if (error
== ENOENT
)
1009 zfsvfs
->z_userobjquota_obj
= 0;
1010 else if (error
!= 0)
1013 error
= zap_lookup(os
, MASTER_NODE_OBJ
,
1014 zfs_userquota_prop_prefixes
[ZFS_PROP_GROUPOBJQUOTA
],
1015 8, 1, &zfsvfs
->z_groupobjquota_obj
);
1016 if (error
== ENOENT
)
1017 zfsvfs
->z_groupobjquota_obj
= 0;
1018 else if (error
!= 0)
1021 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_FUID_TABLES
, 8, 1,
1022 &zfsvfs
->z_fuid_obj
);
1023 if (error
== ENOENT
)
1024 zfsvfs
->z_fuid_obj
= 0;
1025 else if (error
!= 0)
1028 error
= zap_lookup(os
, MASTER_NODE_OBJ
, ZFS_SHARES_DIR
, 8, 1,
1029 &zfsvfs
->z_shares_dir
);
1030 if (error
== ENOENT
)
1031 zfsvfs
->z_shares_dir
= 0;
1032 else if (error
!= 0)
1039 zfsvfs_create(const char *osname
, zfsvfs_t
**zfvp
)
1045 zfsvfs
= kmem_zalloc(sizeof (zfsvfs_t
), KM_SLEEP
);
1048 * We claim to always be readonly so we can open snapshots;
1049 * other ZPL code will prevent us from writing to snapshots.
1051 error
= dmu_objset_own(osname
, DMU_OST_ZFS
, B_TRUE
, B_TRUE
,
1054 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1058 zfsvfs
->z_vfs
= NULL
;
1059 zfsvfs
->z_sb
= NULL
;
1060 zfsvfs
->z_parent
= zfsvfs
;
1062 mutex_init(&zfsvfs
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1063 mutex_init(&zfsvfs
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1064 list_create(&zfsvfs
->z_all_znodes
, sizeof (znode_t
),
1065 offsetof(znode_t
, z_link_node
));
1066 rrm_init(&zfsvfs
->z_teardown_lock
, B_FALSE
);
1067 rw_init(&zfsvfs
->z_teardown_inactive_lock
, NULL
, RW_DEFAULT
, NULL
);
1068 rw_init(&zfsvfs
->z_fuid_lock
, NULL
, RW_DEFAULT
, NULL
);
1070 int size
= MIN(1 << (highbit64(zfs_object_mutex_size
) - 1),
1072 zfsvfs
->z_hold_size
= size
;
1073 zfsvfs
->z_hold_trees
= vmem_zalloc(sizeof (avl_tree_t
) * size
,
1075 zfsvfs
->z_hold_locks
= vmem_zalloc(sizeof (kmutex_t
) * size
, KM_SLEEP
);
1076 for (int i
= 0; i
!= size
; i
++) {
1077 avl_create(&zfsvfs
->z_hold_trees
[i
], zfs_znode_hold_compare
,
1078 sizeof (znode_hold_t
), offsetof(znode_hold_t
, zh_node
));
1079 mutex_init(&zfsvfs
->z_hold_locks
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
1082 error
= zfsvfs_init(zfsvfs
, os
);
1084 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1086 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1095 zfsvfs_setup(zfsvfs_t
*zfsvfs
, boolean_t mounting
)
1099 error
= zfs_register_callbacks(zfsvfs
->z_vfs
);
1103 zfsvfs
->z_log
= zil_open(zfsvfs
->z_os
, zfs_get_data
);
1106 * If we are not mounting (ie: online recv), then we don't
1107 * have to worry about replaying the log as we blocked all
1108 * operations out since we closed the ZIL.
1114 * During replay we remove the read only flag to
1115 * allow replays to succeed.
1117 readonly
= zfs_is_readonly(zfsvfs
);
1119 readonly_changed_cb(zfsvfs
, B_FALSE
);
1121 zfs_unlinked_drain(zfsvfs
);
1124 * Parse and replay the intent log.
1126 * Because of ziltest, this must be done after
1127 * zfs_unlinked_drain(). (Further note: ziltest
1128 * doesn't use readonly mounts, where
1129 * zfs_unlinked_drain() isn't called.) This is because
1130 * ziltest causes spa_sync() to think it's committed,
1131 * but actually it is not, so the intent log contains
1132 * many txg's worth of changes.
1134 * In particular, if object N is in the unlinked set in
1135 * the last txg to actually sync, then it could be
1136 * actually freed in a later txg and then reallocated
1137 * in a yet later txg. This would write a "create
1138 * object N" record to the intent log. Normally, this
1139 * would be fine because the spa_sync() would have
1140 * written out the fact that object N is free, before
1141 * we could write the "create object N" intent log
1144 * But when we are in ziltest mode, we advance the "open
1145 * txg" without actually spa_sync()-ing the changes to
1146 * disk. So we would see that object N is still
1147 * allocated and in the unlinked set, and there is an
1148 * intent log record saying to allocate it.
1150 if (spa_writeable(dmu_objset_spa(zfsvfs
->z_os
))) {
1151 if (zil_replay_disable
) {
1152 zil_destroy(zfsvfs
->z_log
, B_FALSE
);
1154 zfsvfs
->z_replay
= B_TRUE
;
1155 zil_replay(zfsvfs
->z_os
, zfsvfs
,
1157 zfsvfs
->z_replay
= B_FALSE
;
1161 /* restore readonly bit */
1163 readonly_changed_cb(zfsvfs
, B_TRUE
);
1167 * Set the objset user_ptr to track its zfsvfs.
1169 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1170 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1171 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1177 zfsvfs_free(zfsvfs_t
*zfsvfs
)
1179 int i
, size
= zfsvfs
->z_hold_size
;
1181 zfs_fuid_destroy(zfsvfs
);
1183 mutex_destroy(&zfsvfs
->z_znodes_lock
);
1184 mutex_destroy(&zfsvfs
->z_lock
);
1185 list_destroy(&zfsvfs
->z_all_znodes
);
1186 rrm_destroy(&zfsvfs
->z_teardown_lock
);
1187 rw_destroy(&zfsvfs
->z_teardown_inactive_lock
);
1188 rw_destroy(&zfsvfs
->z_fuid_lock
);
1189 for (i
= 0; i
!= size
; i
++) {
1190 avl_destroy(&zfsvfs
->z_hold_trees
[i
]);
1191 mutex_destroy(&zfsvfs
->z_hold_locks
[i
]);
1193 vmem_free(zfsvfs
->z_hold_trees
, sizeof (avl_tree_t
) * size
);
1194 vmem_free(zfsvfs
->z_hold_locks
, sizeof (kmutex_t
) * size
);
1195 zfsvfs_vfs_free(zfsvfs
->z_vfs
);
1196 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1200 zfs_set_fuid_feature(zfsvfs_t
*zfsvfs
)
1202 zfsvfs
->z_use_fuids
= USE_FUIDS(zfsvfs
->z_version
, zfsvfs
->z_os
);
1203 zfsvfs
->z_use_sa
= USE_SA(zfsvfs
->z_version
, zfsvfs
->z_os
);
1207 zfs_unregister_callbacks(zfsvfs_t
*zfsvfs
)
1209 objset_t
*os
= zfsvfs
->z_os
;
1211 if (!dmu_objset_is_snapshot(os
))
1212 dsl_prop_unregister_all(dmu_objset_ds(os
), zfsvfs
);
1215 #ifdef HAVE_MLSLABEL
1217 * Check that the hex label string is appropriate for the dataset being
1218 * mounted into the global_zone proper.
1220 * Return an error if the hex label string is not default or
1221 * admin_low/admin_high. For admin_low labels, the corresponding
1222 * dataset must be readonly.
1225 zfs_check_global_label(const char *dsname
, const char *hexsl
)
1227 if (strcasecmp(hexsl
, ZFS_MLSLABEL_DEFAULT
) == 0)
1229 if (strcasecmp(hexsl
, ADMIN_HIGH
) == 0)
1231 if (strcasecmp(hexsl
, ADMIN_LOW
) == 0) {
1232 /* must be readonly */
1235 if (dsl_prop_get_integer(dsname
,
1236 zfs_prop_to_name(ZFS_PROP_READONLY
), &rdonly
, NULL
))
1237 return (SET_ERROR(EACCES
));
1238 return (rdonly
? 0 : EACCES
);
1240 return (SET_ERROR(EACCES
));
1242 #endif /* HAVE_MLSLABEL */
1245 zfs_statvfs(struct dentry
*dentry
, struct kstatfs
*statp
)
1247 zfsvfs_t
*zfsvfs
= dentry
->d_sb
->s_fs_info
;
1248 uint64_t refdbytes
, availbytes
, usedobjs
, availobjs
;
1254 dmu_objset_space(zfsvfs
->z_os
,
1255 &refdbytes
, &availbytes
, &usedobjs
, &availobjs
);
1257 fsid
= dmu_objset_fsid_guid(zfsvfs
->z_os
);
1259 * The underlying storage pool actually uses multiple block
1260 * size. Under Solaris frsize (fragment size) is reported as
1261 * the smallest block size we support, and bsize (block size)
1262 * as the filesystem's maximum block size. Unfortunately,
1263 * under Linux the fragment size and block size are often used
1264 * interchangeably. Thus we are forced to report both of them
1265 * as the filesystem's maximum block size.
1267 statp
->f_frsize
= zfsvfs
->z_max_blksz
;
1268 statp
->f_bsize
= zfsvfs
->z_max_blksz
;
1269 bshift
= fls(statp
->f_bsize
) - 1;
1272 * The following report "total" blocks of various kinds in
1273 * the file system, but reported in terms of f_bsize - the
1277 statp
->f_blocks
= (refdbytes
+ availbytes
) >> bshift
;
1278 statp
->f_bfree
= availbytes
>> bshift
;
1279 statp
->f_bavail
= statp
->f_bfree
; /* no root reservation */
1282 * statvfs() should really be called statufs(), because it assumes
1283 * static metadata. ZFS doesn't preallocate files, so the best
1284 * we can do is report the max that could possibly fit in f_files,
1285 * and that minus the number actually used in f_ffree.
1286 * For f_ffree, report the smaller of the number of object available
1287 * and the number of blocks (each object will take at least a block).
1289 statp
->f_ffree
= MIN(availobjs
, availbytes
>> DNODE_SHIFT
);
1290 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
1291 statp
->f_fsid
.val
[0] = (uint32_t)fsid
;
1292 statp
->f_fsid
.val
[1] = (uint32_t)(fsid
>> 32);
1293 statp
->f_type
= ZFS_SUPER_MAGIC
;
1294 statp
->f_namelen
= MAXNAMELEN
- 1;
1297 * We have all of 40 characters to stuff a string here.
1298 * Is there anything useful we could/should provide?
1300 bzero(statp
->f_spare
, sizeof (statp
->f_spare
));
1307 zfs_root(zfsvfs_t
*zfsvfs
, struct inode
**ipp
)
1314 error
= zfs_zget(zfsvfs
, zfsvfs
->z_root
, &rootzp
);
1316 *ipp
= ZTOI(rootzp
);
1322 #ifdef HAVE_D_PRUNE_ALIASES
1324 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
1325 * To accommodate this we must improvise and manually walk the list of znodes
1326 * attempting to prune dentries in order to be able to drop the inodes.
1328 * To avoid scanning the same znodes multiple times they are always rotated
1329 * to the end of the z_all_znodes list. New znodes are inserted at the
1330 * end of the list so we're always scanning the oldest znodes first.
1333 zfs_prune_aliases(zfsvfs_t
*zfsvfs
, unsigned long nr_to_scan
)
1335 znode_t
**zp_array
, *zp
;
1336 int max_array
= MIN(nr_to_scan
, PAGE_SIZE
* 8 / sizeof (znode_t
*));
1340 zp_array
= kmem_zalloc(max_array
* sizeof (znode_t
*), KM_SLEEP
);
1342 mutex_enter(&zfsvfs
->z_znodes_lock
);
1343 while ((zp
= list_head(&zfsvfs
->z_all_znodes
)) != NULL
) {
1345 if ((i
++ > nr_to_scan
) || (j
>= max_array
))
1348 ASSERT(list_link_active(&zp
->z_link_node
));
1349 list_remove(&zfsvfs
->z_all_znodes
, zp
);
1350 list_insert_tail(&zfsvfs
->z_all_znodes
, zp
);
1352 /* Skip active znodes and .zfs entries */
1353 if (MUTEX_HELD(&zp
->z_lock
) || zp
->z_is_ctldir
)
1356 if (igrab(ZTOI(zp
)) == NULL
)
1362 mutex_exit(&zfsvfs
->z_znodes_lock
);
1364 for (i
= 0; i
< j
; i
++) {
1367 ASSERT3P(zp
, !=, NULL
);
1368 d_prune_aliases(ZTOI(zp
));
1370 if (atomic_read(&ZTOI(zp
)->i_count
) == 1)
1376 kmem_free(zp_array
, max_array
* sizeof (znode_t
*));
1380 #endif /* HAVE_D_PRUNE_ALIASES */
1383 * The ARC has requested that the filesystem drop entries from the dentry
1384 * and inode caches. This can occur when the ARC needs to free meta data
1385 * blocks but can't because they are all pinned by entries in these caches.
1388 zfs_prune(struct super_block
*sb
, unsigned long nr_to_scan
, int *objects
)
1390 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1392 #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1393 struct shrinker
*shrinker
= &sb
->s_shrink
;
1394 struct shrink_control sc
= {
1395 .nr_to_scan
= nr_to_scan
,
1396 .gfp_mask
= GFP_KERNEL
,
1402 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
1403 defined(SHRINK_CONTROL_HAS_NID) && \
1404 defined(SHRINKER_NUMA_AWARE)
1405 if (sb
->s_shrink
.flags
& SHRINKER_NUMA_AWARE
) {
1407 for_each_online_node(sc
.nid
) {
1408 *objects
+= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1411 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1414 #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1415 *objects
= (*shrinker
->scan_objects
)(shrinker
, &sc
);
1416 #elif defined(HAVE_SHRINK)
1417 *objects
= (*shrinker
->shrink
)(shrinker
, &sc
);
1418 #elif defined(HAVE_D_PRUNE_ALIASES)
1419 #define D_PRUNE_ALIASES_IS_DEFAULT
1420 *objects
= zfs_prune_aliases(zfsvfs
, nr_to_scan
);
1422 #error "No available dentry and inode cache pruning mechanism."
1425 #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
1426 #undef D_PRUNE_ALIASES_IS_DEFAULT
1428 * Fall back to zfs_prune_aliases if the kernel's per-superblock
1429 * shrinker couldn't free anything, possibly due to the inodes being
1430 * allocated in a different memcg.
1433 *objects
= zfs_prune_aliases(zfsvfs
, nr_to_scan
);
1438 dprintf_ds(zfsvfs
->z_os
->os_dsl_dataset
,
1439 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1440 nr_to_scan
, *objects
, error
);
1446 * Teardown the zfsvfs_t.
1448 * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
1449 * and 'z_teardown_inactive_lock' held.
1452 zfsvfs_teardown(zfsvfs_t
*zfsvfs
, boolean_t unmounting
)
1457 * If someone has not already unmounted this file system,
1458 * drain the iput_taskq to ensure all active references to the
1459 * zfsvfs_t have been handled only then can it be safely destroyed.
1463 * If we're unmounting we have to wait for the list to
1466 * If we're not unmounting there's no guarantee the list
1467 * will drain completely, but iputs run from the taskq
1468 * may add the parents of dir-based xattrs to the taskq
1469 * so we want to wait for these.
1471 * We can safely read z_nr_znodes without locking because the
1472 * VFS has already blocked operations which add to the
1473 * z_all_znodes list and thus increment z_nr_znodes.
1476 while (zfsvfs
->z_nr_znodes
> 0) {
1477 taskq_wait_outstanding(dsl_pool_iput_taskq(
1478 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1479 if (++round
> 1 && !unmounting
)
1484 rrm_enter(&zfsvfs
->z_teardown_lock
, RW_WRITER
, FTAG
);
1488 * We purge the parent filesystem's super block as the
1489 * parent filesystem and all of its snapshots have their
1490 * inode's super block set to the parent's filesystem's
1491 * super block. Note, 'z_parent' is self referential
1492 * for non-snapshots.
1494 shrink_dcache_sb(zfsvfs
->z_parent
->z_sb
);
1498 * Close the zil. NB: Can't close the zil while zfs_inactive
1499 * threads are blocked as zil_close can call zfs_inactive.
1501 if (zfsvfs
->z_log
) {
1502 zil_close(zfsvfs
->z_log
);
1503 zfsvfs
->z_log
= NULL
;
1506 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_WRITER
);
1509 * If we are not unmounting (ie: online recv) and someone already
1510 * unmounted this file system while we were doing the switcheroo,
1511 * or a reopen of z_os failed then just bail out now.
1513 if (!unmounting
&& (zfsvfs
->z_unmounted
|| zfsvfs
->z_os
== NULL
)) {
1514 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
1515 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
1516 return (SET_ERROR(EIO
));
1520 * At this point there are no VFS ops active, and any new VFS ops
1521 * will fail with EIO since we have z_teardown_lock for writer (only
1522 * relevant for forced unmount).
1524 * Release all holds on dbufs.
1527 mutex_enter(&zfsvfs
->z_znodes_lock
);
1528 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
!= NULL
;
1529 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
1531 zfs_znode_dmu_fini(zp
);
1533 mutex_exit(&zfsvfs
->z_znodes_lock
);
1537 * If we are unmounting, set the unmounted flag and let new VFS ops
1538 * unblock. zfs_inactive will have the unmounted behavior, and all
1539 * other VFS ops will fail with EIO.
1542 zfsvfs
->z_unmounted
= B_TRUE
;
1543 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
1544 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
1548 * z_os will be NULL if there was an error in attempting to reopen
1549 * zfsvfs, so just return as the properties had already been
1551 * unregistered and cached data had been evicted before.
1553 if (zfsvfs
->z_os
== NULL
)
1557 * Unregister properties.
1559 zfs_unregister_callbacks(zfsvfs
);
1564 if (dsl_dataset_is_dirty(dmu_objset_ds(zfsvfs
->z_os
)) &&
1565 !zfs_is_readonly(zfsvfs
))
1566 txg_wait_synced(dmu_objset_pool(zfsvfs
->z_os
), 0);
1567 dmu_objset_evict_dbufs(zfsvfs
->z_os
);
1572 #if !defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER) && \
1573 !defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
1574 atomic_long_t zfs_bdi_seq
= ATOMIC_LONG_INIT(0);
1578 zfs_domount(struct super_block
*sb
, zfs_mnt_t
*zm
, int silent
)
1580 const char *osname
= zm
->mnt_osname
;
1581 struct inode
*root_inode
;
1582 uint64_t recordsize
;
1589 error
= zfsvfs_create(osname
, &zfsvfs
);
1593 error
= zfsvfs_parse_options(zm
->mnt_data
, &zfsvfs
->z_vfs
);
1597 if ((error
= dsl_prop_get_integer(osname
, "recordsize",
1598 &recordsize
, NULL
)))
1601 zfsvfs
->z_vfs
->vfs_data
= zfsvfs
;
1603 sb
->s_fs_info
= zfsvfs
;
1604 sb
->s_magic
= ZFS_SUPER_MAGIC
;
1605 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1606 sb
->s_time_gran
= 1;
1607 sb
->s_blocksize
= recordsize
;
1608 sb
->s_blocksize_bits
= ilog2(recordsize
);
1610 error
= -zpl_bdi_setup(sb
, "zfs");
1614 sb
->s_bdi
->ra_pages
= 0;
1616 /* Set callback operations for the file system. */
1617 sb
->s_op
= &zpl_super_operations
;
1618 sb
->s_xattr
= zpl_xattr_handlers
;
1619 sb
->s_export_op
= &zpl_export_operations
;
1621 sb
->s_d_op
= &zpl_dentry_operations
;
1622 #endif /* HAVE_S_D_OP */
1624 /* Set features for file system. */
1625 zfs_set_fuid_feature(zfsvfs
);
1627 if (dmu_objset_is_snapshot(zfsvfs
->z_os
)) {
1630 atime_changed_cb(zfsvfs
, B_FALSE
);
1631 readonly_changed_cb(zfsvfs
, B_TRUE
);
1632 if ((error
= dsl_prop_get_integer(osname
,
1633 "xattr", &pval
, NULL
)))
1635 xattr_changed_cb(zfsvfs
, pval
);
1636 if ((error
= dsl_prop_get_integer(osname
,
1637 "acltype", &pval
, NULL
)))
1639 acltype_changed_cb(zfsvfs
, pval
);
1640 zfsvfs
->z_issnap
= B_TRUE
;
1641 zfsvfs
->z_os
->os_sync
= ZFS_SYNC_DISABLED
;
1642 zfsvfs
->z_snap_defer_time
= jiffies
;
1644 mutex_enter(&zfsvfs
->z_os
->os_user_ptr_lock
);
1645 dmu_objset_set_user(zfsvfs
->z_os
, zfsvfs
);
1646 mutex_exit(&zfsvfs
->z_os
->os_user_ptr_lock
);
1648 if ((error
= zfsvfs_setup(zfsvfs
, B_TRUE
)))
1652 /* Allocate a root inode for the filesystem. */
1653 error
= zfs_root(zfsvfs
, &root_inode
);
1655 (void) zfs_umount(sb
);
1659 /* Allocate a root dentry for the filesystem */
1660 sb
->s_root
= d_make_root(root_inode
);
1661 if (sb
->s_root
== NULL
) {
1662 (void) zfs_umount(sb
);
1663 error
= SET_ERROR(ENOMEM
);
1667 if (!zfsvfs
->z_issnap
)
1668 zfsctl_create(zfsvfs
);
1670 zfsvfs
->z_arc_prune
= arc_add_prune_callback(zpl_prune_sb
, sb
);
1673 dmu_objset_disown(zfsvfs
->z_os
, B_TRUE
, zfsvfs
);
1674 zfsvfs_free(zfsvfs
);
1676 * make sure we don't have dangling sb->s_fs_info which
1677 * zfs_preumount will use.
1679 sb
->s_fs_info
= NULL
;
1686 * Called when an unmount is requested and certain sanity checks have
1687 * already passed. At this point no dentries or inodes have been reclaimed
1688 * from their respective caches. We drop the extra reference on the .zfs
1689 * control directory to allow everything to be reclaimed. All snapshots
1690 * must already have been unmounted to reach this point.
1693 zfs_preumount(struct super_block
*sb
)
1695 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1697 /* zfsvfs is NULL when zfs_domount fails during mount */
1699 zfsctl_destroy(sb
->s_fs_info
);
1701 * Wait for iput_async before entering evict_inodes in
1702 * generic_shutdown_super. The reason we must finish before
1703 * evict_inodes is when lazytime is on, or when zfs_purgedir
1704 * calls zfs_zget, iput would bump i_count from 0 to 1. This
1705 * would race with the i_count check in evict_inodes. This means
1706 * it could destroy the inode while we are still using it.
1708 * We wait for two passes. xattr directories in the first pass
1709 * may add xattr entries in zfs_purgedir, so in the second pass
1710 * we wait for them. We don't use taskq_wait here because it is
1711 * a pool wide taskq. Other mounted filesystems can constantly
1712 * do iput_async and there's no guarantee when taskq will be
1715 taskq_wait_outstanding(dsl_pool_iput_taskq(
1716 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1717 taskq_wait_outstanding(dsl_pool_iput_taskq(
1718 dmu_objset_pool(zfsvfs
->z_os
)), 0);
1723 * Called once all other unmount released tear down has occurred.
1724 * It is our responsibility to release any remaining infrastructure.
1728 zfs_umount(struct super_block
*sb
)
1730 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1733 if (zfsvfs
->z_arc_prune
!= NULL
)
1734 arc_remove_prune_callback(zfsvfs
->z_arc_prune
);
1735 VERIFY(zfsvfs_teardown(zfsvfs
, B_TRUE
) == 0);
1737 zpl_bdi_destroy(sb
);
1740 * z_os will be NULL if there was an error in
1741 * attempting to reopen zfsvfs.
1745 * Unset the objset user_ptr.
1747 mutex_enter(&os
->os_user_ptr_lock
);
1748 dmu_objset_set_user(os
, NULL
);
1749 mutex_exit(&os
->os_user_ptr_lock
);
1752 * Finally release the objset
1754 dmu_objset_disown(os
, B_TRUE
, zfsvfs
);
1757 zfsvfs_free(zfsvfs
);
1762 zfs_remount(struct super_block
*sb
, int *flags
, zfs_mnt_t
*zm
)
1764 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1768 error
= zfsvfs_parse_options(zm
->mnt_data
, &vfsp
);
1772 zfs_unregister_callbacks(zfsvfs
);
1773 zfsvfs_vfs_free(zfsvfs
->z_vfs
);
1775 vfsp
->vfs_data
= zfsvfs
;
1776 zfsvfs
->z_vfs
= vfsp
;
1777 (void) zfs_register_callbacks(vfsp
);
1783 zfs_vget(struct super_block
*sb
, struct inode
**ipp
, fid_t
*fidp
)
1785 zfsvfs_t
*zfsvfs
= sb
->s_fs_info
;
1787 uint64_t object
= 0;
1788 uint64_t fid_gen
= 0;
1795 if (fidp
->fid_len
== SHORT_FID_LEN
|| fidp
->fid_len
== LONG_FID_LEN
) {
1796 zfid_short_t
*zfid
= (zfid_short_t
*)fidp
;
1798 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
1799 object
|= ((uint64_t)zfid
->zf_object
[i
]) << (8 * i
);
1801 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
1802 fid_gen
|= ((uint64_t)zfid
->zf_gen
[i
]) << (8 * i
);
1804 return (SET_ERROR(EINVAL
));
1807 /* LONG_FID_LEN means snapdirs */
1808 if (fidp
->fid_len
== LONG_FID_LEN
) {
1809 zfid_long_t
*zlfid
= (zfid_long_t
*)fidp
;
1810 uint64_t objsetid
= 0;
1811 uint64_t setgen
= 0;
1813 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
1814 objsetid
|= ((uint64_t)zlfid
->zf_setid
[i
]) << (8 * i
);
1816 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
1817 setgen
|= ((uint64_t)zlfid
->zf_setgen
[i
]) << (8 * i
);
1819 if (objsetid
!= ZFSCTL_INO_SNAPDIRS
- object
) {
1820 dprintf("snapdir fid: objsetid (%llu) != "
1821 "ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
1822 objsetid
, ZFSCTL_INO_SNAPDIRS
, object
);
1824 return (SET_ERROR(EINVAL
));
1827 if (fid_gen
> 1 || setgen
!= 0) {
1828 dprintf("snapdir fid: fid_gen (%llu) and setgen "
1829 "(%llu)\n", fid_gen
, setgen
);
1830 return (SET_ERROR(EINVAL
));
1833 return (zfsctl_snapdir_vget(sb
, objsetid
, fid_gen
, ipp
));
1837 /* A zero fid_gen means we are in the .zfs control directories */
1839 (object
== ZFSCTL_INO_ROOT
|| object
== ZFSCTL_INO_SNAPDIR
)) {
1840 *ipp
= zfsvfs
->z_ctldir
;
1841 ASSERT(*ipp
!= NULL
);
1842 if (object
== ZFSCTL_INO_SNAPDIR
) {
1843 VERIFY(zfsctl_root_lookup(*ipp
, "snapshot", ipp
,
1844 0, kcred
, NULL
, NULL
) == 0);
1852 gen_mask
= -1ULL >> (64 - 8 * i
);
1854 dprintf("getting %llu [%llu mask %llx]\n", object
, fid_gen
, gen_mask
);
1855 if ((err
= zfs_zget(zfsvfs
, object
, &zp
))) {
1860 /* Don't export xattr stuff */
1861 if (zp
->z_pflags
& ZFS_XATTR
) {
1864 return (SET_ERROR(ENOENT
));
1867 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
), &zp_gen
,
1869 zp_gen
= zp_gen
& gen_mask
;
1872 if ((fid_gen
== 0) && (zfsvfs
->z_root
== object
))
1874 if (zp
->z_unlinked
|| zp_gen
!= fid_gen
) {
1875 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen
,
1879 return (SET_ERROR(ENOENT
));
1884 zfs_inode_update(ITOZ(*ipp
));
1891 * Block out VFS ops and close zfsvfs_t
1893 * Note, if successful, then we return with the 'z_teardown_lock' and
1894 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1895 * dataset and objset intact so that they can be atomically handed off during
1896 * a subsequent rollback or recv operation and the resume thereafter.
1899 zfs_suspend_fs(zfsvfs_t
*zfsvfs
)
1903 if ((error
= zfsvfs_teardown(zfsvfs
, B_FALSE
)) != 0)
1910 * Rebuild SA and release VOPs. Note that ownership of the underlying dataset
1911 * is an invariant across any of the operations that can be performed while the
1912 * filesystem was suspended. Whether it succeeded or failed, the preconditions
1913 * are the same: the relevant objset and associated dataset are owned by
1914 * zfsvfs, held, and long held on entry.
1917 zfs_resume_fs(zfsvfs_t
*zfsvfs
, dsl_dataset_t
*ds
)
1922 ASSERT(RRM_WRITE_HELD(&zfsvfs
->z_teardown_lock
));
1923 ASSERT(RW_WRITE_HELD(&zfsvfs
->z_teardown_inactive_lock
));
1926 * We already own this, so just update the objset_t, as the one we
1927 * had before may have been evicted.
1930 VERIFY3P(ds
->ds_owner
, ==, zfsvfs
);
1931 VERIFY(dsl_dataset_long_held(ds
));
1932 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1934 err
= zfsvfs_init(zfsvfs
, os
);
1938 VERIFY(zfsvfs_setup(zfsvfs
, B_FALSE
) == 0);
1940 zfs_set_fuid_feature(zfsvfs
);
1941 zfsvfs
->z_rollback_time
= jiffies
;
1944 * Attempt to re-establish all the active inodes with their
1945 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1946 * and mark it stale. This prevents a collision if a new
1947 * inode/object is created which must use the same inode
1948 * number. The stale inode will be be released when the
1949 * VFS prunes the dentry holding the remaining references
1950 * on the stale inode.
1952 mutex_enter(&zfsvfs
->z_znodes_lock
);
1953 for (zp
= list_head(&zfsvfs
->z_all_znodes
); zp
;
1954 zp
= list_next(&zfsvfs
->z_all_znodes
, zp
)) {
1955 err2
= zfs_rezget(zp
);
1957 remove_inode_hash(ZTOI(zp
));
1958 zp
->z_is_stale
= B_TRUE
;
1961 mutex_exit(&zfsvfs
->z_znodes_lock
);
1964 /* release the VFS ops */
1965 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
1966 rrm_exit(&zfsvfs
->z_teardown_lock
, FTAG
);
1970 * Since we couldn't setup the sa framework, try to force
1971 * unmount this file system.
1974 (void) zfs_umount(zfsvfs
->z_sb
);
1980 zfs_set_version(zfsvfs_t
*zfsvfs
, uint64_t newvers
)
1983 objset_t
*os
= zfsvfs
->z_os
;
1986 if (newvers
< ZPL_VERSION_INITIAL
|| newvers
> ZPL_VERSION
)
1987 return (SET_ERROR(EINVAL
));
1989 if (newvers
< zfsvfs
->z_version
)
1990 return (SET_ERROR(EINVAL
));
1992 if (zfs_spa_version_map(newvers
) >
1993 spa_version(dmu_objset_spa(zfsvfs
->z_os
)))
1994 return (SET_ERROR(ENOTSUP
));
1996 tx
= dmu_tx_create(os
);
1997 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_FALSE
, ZPL_VERSION_STR
);
1998 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
1999 dmu_tx_hold_zap(tx
, MASTER_NODE_OBJ
, B_TRUE
,
2001 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
2003 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2009 error
= zap_update(os
, MASTER_NODE_OBJ
, ZPL_VERSION_STR
,
2010 8, 1, &newvers
, tx
);
2017 if (newvers
>= ZPL_VERSION_SA
&& !zfsvfs
->z_use_sa
) {
2020 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs
->z_os
)), >=,
2022 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
2023 DMU_OT_NONE
, 0, tx
);
2025 error
= zap_add(os
, MASTER_NODE_OBJ
,
2026 ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
2029 VERIFY(0 == sa_set_sa_object(os
, sa_obj
));
2030 sa_register_update_callback(os
, zfs_sa_upgrade
);
2033 spa_history_log_internal_ds(dmu_objset_ds(os
), "upgrade", tx
,
2034 "from %llu to %llu", zfsvfs
->z_version
, newvers
);
2038 zfsvfs
->z_version
= newvers
;
2040 zfs_set_fuid_feature(zfsvfs
);
2046 * Read a property stored within the master node.
2049 zfs_get_zplprop(objset_t
*os
, zfs_prop_t prop
, uint64_t *value
)
2052 int error
= SET_ERROR(ENOENT
);
2055 * Look up the file system's value for the property. For the
2056 * version property, we look up a slightly different string.
2058 if (prop
== ZFS_PROP_VERSION
)
2059 pname
= ZPL_VERSION_STR
;
2061 pname
= zfs_prop_to_name(prop
);
2064 ASSERT3U(os
->os_phys
->os_type
, ==, DMU_OST_ZFS
);
2065 error
= zap_lookup(os
, MASTER_NODE_OBJ
, pname
, 8, 1, value
);
2068 if (error
== ENOENT
) {
2069 /* No value set, use the default value */
2071 case ZFS_PROP_VERSION
:
2072 *value
= ZPL_VERSION
;
2074 case ZFS_PROP_NORMALIZE
:
2075 case ZFS_PROP_UTF8ONLY
:
2079 *value
= ZFS_CASE_SENSITIVE
;
2081 case ZFS_PROP_ACLTYPE
:
2082 *value
= ZFS_ACLTYPE_OFF
;
2093 * Return true if the coresponding vfs's unmounted flag is set.
2094 * Otherwise return false.
2095 * If this function returns true we know VFS unmount has been initiated.
2098 zfs_get_vfs_flag_unmounted(objset_t
*os
)
2101 boolean_t unmounted
= B_FALSE
;
2103 ASSERT(dmu_objset_type(os
) == DMU_OST_ZFS
);
2105 mutex_enter(&os
->os_user_ptr_lock
);
2106 zfvp
= dmu_objset_get_user(os
);
2107 if (zfvp
!= NULL
&& zfvp
->z_unmounted
)
2109 mutex_exit(&os
->os_user_ptr_lock
);
2119 dmu_objset_register_type(DMU_OST_ZFS
, zfs_space_delta_cb
);
2120 register_filesystem(&zpl_fs_type
);
2127 * we don't use outstanding because zpl_posix_acl_free might add more.
2129 taskq_wait(system_delay_taskq
);
2130 taskq_wait(system_taskq
);
2131 unregister_filesystem(&zpl_fs_type
);
2136 #if defined(_KERNEL) && defined(HAVE_SPL)
2137 EXPORT_SYMBOL(zfs_suspend_fs
);
2138 EXPORT_SYMBOL(zfs_resume_fs
);
2139 EXPORT_SYMBOL(zfs_userspace_one
);
2140 EXPORT_SYMBOL(zfs_userspace_many
);
2141 EXPORT_SYMBOL(zfs_set_userquota
);
2142 EXPORT_SYMBOL(zfs_owner_overquota
);
2143 EXPORT_SYMBOL(zfs_fuid_overquota
);
2144 EXPORT_SYMBOL(zfs_fuid_overobjquota
);
2145 EXPORT_SYMBOL(zfs_set_version
);
2146 EXPORT_SYMBOL(zfsvfs_create
);
2147 EXPORT_SYMBOL(zfsvfs_free
);
2148 EXPORT_SYMBOL(zfs_is_readonly
);
2149 EXPORT_SYMBOL(zfs_domount
);
2150 EXPORT_SYMBOL(zfs_preumount
);
2151 EXPORT_SYMBOL(zfs_umount
);
2152 EXPORT_SYMBOL(zfs_remount
);
2153 EXPORT_SYMBOL(zfs_statvfs
);
2154 EXPORT_SYMBOL(zfs_vget
);
2155 EXPORT_SYMBOL(zfs_prune
);