4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
28 * ZFS control directory (a.k.a. ".zfs")
30 * This directory provides a common location for all ZFS meta-objects.
31 * Currently, this is only the 'snapshot' directory, but this may expand in the
32 * future. The elements are built using the GFS primitives, as the hierarchy
33 * does not actually exist on disk.
35 * For 'snapshot', we don't want to have all snapshots always mounted, because
36 * this would take up a huge amount of space in /etc/mnttab. We have three
39 * ctldir ------> snapshotdir -------> snapshot
45 * The 'snapshot' node contains just enough information to lookup '..' and act
46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
47 * perform an automount of the underlying filesystem and return the
48 * corresponding vnode.
50 * All mounts are handled automatically by the kernel, but unmounts are
51 * (currently) handled from user land. The main reason is that there is no
52 * reliable way to auto-unmount the filesystem when it's "no longer in use".
53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
54 * unmounts any snapshots within the snapshot directory.
56 * The '.zfs', '.zfs/snapshot', and all directories created under
57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
58 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
62 * However, vnodes within these mounted on file systems have their v_vfsp
63 * fields set to the head filesystem to make NFS happy (see
64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
65 * so that it cannot be freed until all snapshots have been unmounted.
68 #include <sys/types.h>
69 #include <sys/param.h>
70 #include <sys/libkern.h>
71 #include <sys/dirent.h>
72 #include <sys/zfs_context.h>
73 #include <sys/zfs_ctldir.h>
74 #include <sys/zfs_ioctl.h>
75 #include <sys/zfs_vfsops.h>
76 #include <sys/namei.h>
79 #include <sys/dsl_dataset.h>
80 #include <sys/dsl_destroy.h>
81 #include <sys/dsl_deleg.h>
82 #include <sys/mount.h>
84 #include <sys/sysproto.h>
86 #include "zfs_namecheck.h"
88 #include <sys/kernel.h>
89 #include <sys/ccompat.h>
91 /* Common access mode for all virtual directories under the ctldir */
92 const uint16_t zfsctl_ctldir_mode
= S_IRUSR
| S_IXUSR
| S_IRGRP
| S_IXGRP
|
96 * "Synthetic" filesystem implementation.
100 * Assert that A implies B.
102 #define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
104 static MALLOC_DEFINE(M_SFSNODES
, "sfs_nodes", "synthetic-fs nodes");
106 typedef struct sfs_node
{
107 char sn_name
[ZFS_MAX_DATASET_NAME_LEN
];
108 uint64_t sn_parent_id
;
113 * Check the parent's ID as well as the node's to account for a chance
114 * that IDs originating from different domains (snapshot IDs, artificial
115 * IDs, znode IDs) may clash.
118 sfs_compare_ids(struct vnode
*vp
, void *arg
)
120 sfs_node_t
*n1
= vp
->v_data
;
121 sfs_node_t
*n2
= arg
;
124 equal
= n1
->sn_id
== n2
->sn_id
&&
125 n1
->sn_parent_id
== n2
->sn_parent_id
;
127 /* Zero means equality. */
132 sfs_vnode_get(const struct mount
*mp
, int flags
, uint64_t parent_id
,
133 uint64_t id
, struct vnode
**vpp
)
139 search
.sn_parent_id
= parent_id
;
140 err
= vfs_hash_get(mp
, (uint32_t)id
, flags
, curthread
, vpp
,
141 sfs_compare_ids
, &search
);
146 sfs_vnode_insert(struct vnode
*vp
, int flags
, uint64_t parent_id
,
147 uint64_t id
, struct vnode
**vpp
)
151 KASSERT(vp
->v_data
!= NULL
, ("sfs_vnode_insert with NULL v_data"));
152 err
= vfs_hash_insert(vp
, (uint32_t)id
, flags
, curthread
, vpp
,
153 sfs_compare_ids
, vp
->v_data
);
158 sfs_vnode_remove(struct vnode
*vp
)
163 typedef void sfs_vnode_setup_fn(vnode_t
*vp
, void *arg
);
166 sfs_vgetx(struct mount
*mp
, int flags
, uint64_t parent_id
, uint64_t id
,
167 const char *tag
, struct vop_vector
*vops
,
168 sfs_vnode_setup_fn setup
, void *arg
,
174 error
= sfs_vnode_get(mp
, flags
, parent_id
, id
, vpp
);
175 if (error
!= 0 || *vpp
!= NULL
) {
176 KASSERT_IMPLY(error
== 0, (*vpp
)->v_data
!= NULL
,
177 "sfs vnode with no data");
181 /* Allocate a new vnode/inode. */
182 error
= getnewvnode(tag
, mp
, vops
, &vp
);
189 * Exclusively lock the vnode vnode while it's being constructed.
191 lockmgr(vp
->v_vnlock
, LK_EXCLUSIVE
, NULL
);
192 error
= insmntque(vp
, mp
);
200 error
= sfs_vnode_insert(vp
, flags
, parent_id
, id
, vpp
);
201 if (error
!= 0 || *vpp
!= NULL
) {
202 KASSERT_IMPLY(error
== 0, (*vpp
)->v_data
!= NULL
,
203 "sfs vnode with no data");
212 sfs_print_node(sfs_node_t
*node
)
214 printf("\tname = %s\n", node
->sn_name
);
215 printf("\tparent_id = %ju\n", (uintmax_t)node
->sn_parent_id
);
216 printf("\tid = %ju\n", (uintmax_t)node
->sn_id
);
220 sfs_alloc_node(size_t size
, const char *name
, uint64_t parent_id
, uint64_t id
)
222 struct sfs_node
*node
;
224 KASSERT(strlen(name
) < sizeof (node
->sn_name
),
225 ("sfs node name is too long"));
226 KASSERT(size
>= sizeof (*node
), ("sfs node size is too small"));
227 node
= malloc(size
, M_SFSNODES
, M_WAITOK
| M_ZERO
);
228 strlcpy(node
->sn_name
, name
, sizeof (node
->sn_name
));
229 node
->sn_parent_id
= parent_id
;
236 sfs_destroy_node(sfs_node_t
*node
)
238 free(node
, M_SFSNODES
);
242 sfs_reclaim_vnode(vnode_t
*vp
)
246 sfs_vnode_remove(vp
);
253 sfs_readdir_common(uint64_t parent_id
, uint64_t id
, struct vop_readdir_args
*ap
,
254 zfs_uio_t
*uio
, off_t
*offp
)
259 /* Reset ncookies for subsequent use of vfs_read_dirent. */
260 if (ap
->a_ncookies
!= NULL
)
263 if (zfs_uio_resid(uio
) < sizeof (entry
))
264 return (SET_ERROR(EINVAL
));
266 if (zfs_uio_offset(uio
) < 0)
267 return (SET_ERROR(EINVAL
));
268 if (zfs_uio_offset(uio
) == 0) {
270 entry
.d_type
= DT_DIR
;
271 entry
.d_name
[0] = '.';
272 entry
.d_name
[1] = '\0';
274 entry
.d_reclen
= sizeof (entry
);
275 error
= vfs_read_dirent(ap
, &entry
, zfs_uio_offset(uio
));
277 return (SET_ERROR(error
));
280 if (zfs_uio_offset(uio
) < sizeof (entry
))
281 return (SET_ERROR(EINVAL
));
282 if (zfs_uio_offset(uio
) == sizeof (entry
)) {
283 entry
.d_fileno
= parent_id
;
284 entry
.d_type
= DT_DIR
;
285 entry
.d_name
[0] = '.';
286 entry
.d_name
[1] = '.';
287 entry
.d_name
[2] = '\0';
289 entry
.d_reclen
= sizeof (entry
);
290 error
= vfs_read_dirent(ap
, &entry
, zfs_uio_offset(uio
));
292 return (SET_ERROR(error
));
296 *offp
= 2 * sizeof (entry
);
302 * .zfs inode namespace
304 * We need to generate unique inode numbers for all files and directories
305 * within the .zfs pseudo-filesystem. We use the following scheme:
310 * .zfs/snapshot/<snap> objectid(snap)
312 #define ZFSCTL_INO_SNAP(id) (id)
314 static struct vop_vector zfsctl_ops_root
;
315 static struct vop_vector zfsctl_ops_snapdir
;
316 static struct vop_vector zfsctl_ops_snapshot
;
329 zfsctl_is_node(vnode_t
*vp
)
331 return (vn_matchops(vp
, zfsctl_ops_root
) ||
332 vn_matchops(vp
, zfsctl_ops_snapdir
) ||
333 vn_matchops(vp
, zfsctl_ops_snapshot
));
337 typedef struct zfsctl_root
{
345 * Create the '.zfs' directory.
348 zfsctl_create(zfsvfs_t
*zfsvfs
)
350 zfsctl_root_t
*dot_zfs
;
355 ASSERT(zfsvfs
->z_ctldir
== NULL
);
357 snapdir
= sfs_alloc_node(sizeof (*snapdir
), "snapshot", ZFSCTL_INO_ROOT
,
359 dot_zfs
= (zfsctl_root_t
*)sfs_alloc_node(sizeof (*dot_zfs
), ".zfs", 0,
361 dot_zfs
->snapdir
= snapdir
;
363 VERIFY(VFS_ROOT(zfsvfs
->z_vfs
, LK_EXCLUSIVE
, &rvp
) == 0);
364 VERIFY(0 == sa_lookup(VTOZ(rvp
)->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
365 &crtime
, sizeof (crtime
)));
366 ZFS_TIME_DECODE(&dot_zfs
->cmtime
, crtime
);
369 zfsvfs
->z_ctldir
= dot_zfs
;
373 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
374 * The nodes must not have any associated vnodes by now as they should be
378 zfsctl_destroy(zfsvfs_t
*zfsvfs
)
380 sfs_destroy_node(zfsvfs
->z_ctldir
->snapdir
);
381 sfs_destroy_node((sfs_node_t
*)zfsvfs
->z_ctldir
);
382 zfsvfs
->z_ctldir
= NULL
;
386 zfsctl_fs_root_vnode(struct mount
*mp
, void *arg __unused
, int flags
,
389 return (VFS_ROOT(mp
, flags
, vpp
));
393 zfsctl_common_vnode_setup(vnode_t
*vp
, void *arg
)
395 ASSERT_VOP_ELOCKED(vp
, __func__
);
397 /* We support shared locking. */
404 zfsctl_root_vnode(struct mount
*mp
, void *arg __unused
, int flags
,
410 node
= ((zfsvfs_t
*)mp
->mnt_data
)->z_ctldir
;
411 err
= sfs_vgetx(mp
, flags
, 0, ZFSCTL_INO_ROOT
, "zfs", &zfsctl_ops_root
,
412 zfsctl_common_vnode_setup
, node
, vpp
);
417 zfsctl_snapdir_vnode(struct mount
*mp
, void *arg __unused
, int flags
,
423 node
= ((zfsvfs_t
*)mp
->mnt_data
)->z_ctldir
->snapdir
;
424 err
= sfs_vgetx(mp
, flags
, ZFSCTL_INO_ROOT
, ZFSCTL_INO_SNAPDIR
, "zfs",
425 &zfsctl_ops_snapdir
, zfsctl_common_vnode_setup
, node
, vpp
);
430 * Given a root znode, retrieve the associated .zfs directory.
431 * Add a hold to the vnode and return it.
434 zfsctl_root(zfsvfs_t
*zfsvfs
, int flags
, vnode_t
**vpp
)
438 error
= zfsctl_root_vnode(zfsvfs
->z_vfs
, NULL
, flags
, vpp
);
443 * Common open routine. Disallow any write access.
446 zfsctl_common_open(struct vop_open_args
*ap
)
448 int flags
= ap
->a_mode
;
451 return (SET_ERROR(EACCES
));
457 * Common close routine. Nothing to do here.
461 zfsctl_common_close(struct vop_close_args
*ap
)
467 * Common access routine. Disallow writes.
470 zfsctl_common_access(struct vop_access_args
*ap
)
472 accmode_t accmode
= ap
->a_accmode
;
474 if (accmode
& VWRITE
)
475 return (SET_ERROR(EACCES
));
480 * Common getattr function. Fill in basic information.
483 zfsctl_common_getattr(vnode_t
*vp
, vattr_t
*vap
)
494 * We are a purely virtual object, so we have no
495 * blocksize or allocated blocks.
501 vap
->va_mode
= zfsctl_ctldir_mode
;
504 * We live in the now (for atime).
508 /* FreeBSD: Reset chflags(2) flags. */
511 vap
->va_nodeid
= node
->sn_id
;
513 /* At least '.' and '..'. */
517 #ifndef _OPENSOLARIS_SYS_VNODE_H_
518 struct vop_fid_args
{
525 zfsctl_common_fid(struct vop_fid_args
*ap
)
527 vnode_t
*vp
= ap
->a_vp
;
528 fid_t
*fidp
= (void *)ap
->a_fid
;
529 sfs_node_t
*node
= vp
->v_data
;
530 uint64_t object
= node
->sn_id
;
534 zfid
= (zfid_short_t
*)fidp
;
535 zfid
->zf_len
= SHORT_FID_LEN
;
537 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
538 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
540 /* .zfs nodes always have a generation number of 0 */
541 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
547 #ifndef _SYS_SYSPROTO_H_
548 struct vop_reclaim_args
{
555 zfsctl_common_reclaim(struct vop_reclaim_args
*ap
)
557 vnode_t
*vp
= ap
->a_vp
;
559 (void) sfs_reclaim_vnode(vp
);
563 #ifndef _SYS_SYSPROTO_H_
564 struct vop_print_args
{
570 zfsctl_common_print(struct vop_print_args
*ap
)
572 sfs_print_node(ap
->a_vp
->v_data
);
576 #ifndef _SYS_SYSPROTO_H_
577 struct vop_getattr_args
{
580 struct ucred
*a_cred
;
585 * Get root directory attributes.
588 zfsctl_root_getattr(struct vop_getattr_args
*ap
)
590 struct vnode
*vp
= ap
->a_vp
;
591 struct vattr
*vap
= ap
->a_vap
;
592 zfsctl_root_t
*node
= vp
->v_data
;
594 zfsctl_common_getattr(vp
, vap
);
595 vap
->va_ctime
= node
->cmtime
;
596 vap
->va_mtime
= vap
->va_ctime
;
597 vap
->va_birthtime
= vap
->va_ctime
;
598 vap
->va_nlink
+= 1; /* snapdir */
599 vap
->va_size
= vap
->va_nlink
;
604 * When we lookup "." we still can be asked to lock it
605 * differently, can't we?
608 zfsctl_relock_dot(vnode_t
*dvp
, int ltype
)
611 if (ltype
!= VOP_ISLOCKED(dvp
)) {
612 if (ltype
== LK_EXCLUSIVE
)
613 vn_lock(dvp
, LK_UPGRADE
| LK_RETRY
);
614 else /* if (ltype == LK_SHARED) */
615 vn_lock(dvp
, LK_DOWNGRADE
| LK_RETRY
);
617 /* Relock for the "." case may left us with reclaimed vnode. */
618 if (VN_IS_DOOMED(dvp
)) {
620 return (SET_ERROR(ENOENT
));
627 * Special case the handling of "..".
630 zfsctl_root_lookup(struct vop_lookup_args
*ap
)
632 struct componentname
*cnp
= ap
->a_cnp
;
633 vnode_t
*dvp
= ap
->a_dvp
;
634 vnode_t
**vpp
= ap
->a_vpp
;
635 int flags
= ap
->a_cnp
->cn_flags
;
636 int lkflags
= ap
->a_cnp
->cn_lkflags
;
637 int nameiop
= ap
->a_cnp
->cn_nameiop
;
640 ASSERT(dvp
->v_type
== VDIR
);
642 if ((flags
& ISLASTCN
) != 0 && nameiop
!= LOOKUP
)
643 return (SET_ERROR(ENOTSUP
));
645 if (cnp
->cn_namelen
== 1 && *cnp
->cn_nameptr
== '.') {
646 err
= zfsctl_relock_dot(dvp
, lkflags
& LK_TYPE_MASK
);
649 } else if ((flags
& ISDOTDOT
) != 0) {
650 err
= vn_vget_ino_gen(dvp
, zfsctl_fs_root_vnode
, NULL
,
652 } else if (strncmp(cnp
->cn_nameptr
, "snapshot", cnp
->cn_namelen
) == 0) {
653 err
= zfsctl_snapdir_vnode(dvp
->v_mount
, NULL
, lkflags
, vpp
);
655 err
= SET_ERROR(ENOENT
);
663 zfsctl_root_readdir(struct vop_readdir_args
*ap
)
666 vnode_t
*vp
= ap
->a_vp
;
667 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
668 zfsctl_root_t
*node
= vp
->v_data
;
670 int *eofp
= ap
->a_eofflag
;
674 zfs_uio_init(&uio
, ap
->a_uio
);
676 ASSERT(vp
->v_type
== VDIR
);
678 error
= sfs_readdir_common(zfsvfs
->z_root
, ZFSCTL_INO_ROOT
, ap
, &uio
,
681 if (error
== ENAMETOOLONG
) /* ran out of destination space */
685 if (zfs_uio_offset(&uio
) != dots_offset
)
686 return (SET_ERROR(EINVAL
));
688 CTASSERT(sizeof (node
->snapdir
->sn_name
) <= sizeof (entry
.d_name
));
689 entry
.d_fileno
= node
->snapdir
->sn_id
;
690 entry
.d_type
= DT_DIR
;
691 strcpy(entry
.d_name
, node
->snapdir
->sn_name
);
692 entry
.d_namlen
= strlen(entry
.d_name
);
693 entry
.d_reclen
= sizeof (entry
);
694 error
= vfs_read_dirent(ap
, &entry
, zfs_uio_offset(&uio
));
696 if (error
== ENAMETOOLONG
)
698 return (SET_ERROR(error
));
706 zfsctl_root_vptocnp(struct vop_vptocnp_args
*ap
)
708 static const char dotzfs_name
[4] = ".zfs";
712 if (*ap
->a_buflen
< sizeof (dotzfs_name
))
713 return (SET_ERROR(ENOMEM
));
715 error
= vn_vget_ino_gen(ap
->a_vp
, zfsctl_fs_root_vnode
, NULL
,
718 return (SET_ERROR(error
));
722 *ap
->a_buflen
-= sizeof (dotzfs_name
);
723 bcopy(dotzfs_name
, ap
->a_buf
+ *ap
->a_buflen
, sizeof (dotzfs_name
));
728 zfsctl_common_pathconf(struct vop_pathconf_args
*ap
)
731 * We care about ACL variables so that user land utilities like ls
732 * can display them correctly. Since the ctldir's st_dev is set to be
733 * the same as the parent dataset, we must support all variables that
736 switch (ap
->a_name
) {
738 *ap
->a_retval
= MIN(LONG_MAX
, ZFS_LINK_MAX
);
741 case _PC_FILESIZEBITS
:
745 case _PC_MIN_HOLE_SIZE
:
746 *ap
->a_retval
= (int)SPA_MINBLOCKSIZE
;
749 case _PC_ACL_EXTENDED
:
757 case _PC_ACL_PATH_MAX
:
758 *ap
->a_retval
= ACL_MAX_ENTRIES
;
762 *ap
->a_retval
= NAME_MAX
;
766 return (vop_stdpathconf(ap
));
771 * Returns a trivial ACL
774 zfsctl_common_getacl(struct vop_getacl_args
*ap
)
778 if (ap
->a_type
!= ACL_TYPE_NFS4
)
781 acl_nfs4_sync_acl_from_mode(ap
->a_aclp
, zfsctl_ctldir_mode
, 0);
783 * acl_nfs4_sync_acl_from_mode assumes that the owner can always modify
784 * attributes. That is not the case for the ctldir, so we must clear
785 * those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs
786 * aren't supported by the ctldir.
788 for (i
= 0; i
< ap
->a_aclp
->acl_cnt
; i
++) {
789 struct acl_entry
*entry
;
790 entry
= &(ap
->a_aclp
->acl_entry
[i
]);
791 entry
->ae_perm
&= ~(ACL_WRITE_ACL
| ACL_WRITE_OWNER
|
792 ACL_WRITE_ATTRIBUTES
| ACL_WRITE_NAMED_ATTRS
|
793 ACL_READ_NAMED_ATTRS
);
799 static struct vop_vector zfsctl_ops_root
= {
800 .vop_default
= &default_vnodeops
,
801 #if __FreeBSD_version >= 1300121
802 .vop_fplookup_vexec
= VOP_EAGAIN
,
804 .vop_open
= zfsctl_common_open
,
805 .vop_close
= zfsctl_common_close
,
806 .vop_ioctl
= VOP_EINVAL
,
807 .vop_getattr
= zfsctl_root_getattr
,
808 .vop_access
= zfsctl_common_access
,
809 .vop_readdir
= zfsctl_root_readdir
,
810 .vop_lookup
= zfsctl_root_lookup
,
811 .vop_inactive
= VOP_NULL
,
812 .vop_reclaim
= zfsctl_common_reclaim
,
813 .vop_fid
= zfsctl_common_fid
,
814 .vop_print
= zfsctl_common_print
,
815 .vop_vptocnp
= zfsctl_root_vptocnp
,
816 .vop_pathconf
= zfsctl_common_pathconf
,
817 .vop_getacl
= zfsctl_common_getacl
,
819 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root
);
822 zfsctl_snapshot_zname(vnode_t
*vp
, const char *name
, int len
, char *zname
)
824 objset_t
*os
= ((zfsvfs_t
*)((vp
)->v_vfsp
->vfs_data
))->z_os
;
826 dmu_objset_name(os
, zname
);
827 if (strlen(zname
) + 1 + strlen(name
) >= len
)
828 return (SET_ERROR(ENAMETOOLONG
));
829 (void) strcat(zname
, "@");
830 (void) strcat(zname
, name
);
835 zfsctl_snapshot_lookup(vnode_t
*vp
, const char *name
, uint64_t *id
)
837 objset_t
*os
= ((zfsvfs_t
*)((vp
)->v_vfsp
->vfs_data
))->z_os
;
840 err
= dsl_dataset_snap_lookup(dmu_objset_ds(os
), name
, id
);
845 * Given a vnode get a root vnode of a filesystem mounted on top of
846 * the vnode, if any. The root vnode is referenced and locked.
847 * If no filesystem is mounted then the orinal vnode remains referenced
848 * and locked. If any error happens the orinal vnode is unlocked and
852 zfsctl_mounted_here(vnode_t
**vpp
, int flags
)
857 ASSERT_VOP_LOCKED(*vpp
, __func__
);
858 ASSERT3S((*vpp
)->v_type
, ==, VDIR
);
860 if ((mp
= (*vpp
)->v_mountedhere
) != NULL
) {
861 err
= vfs_busy(mp
, 0);
862 KASSERT(err
== 0, ("vfs_busy(mp, 0) failed with %d", err
));
863 KASSERT(vrefcnt(*vpp
) > 1, ("unreferenced mountpoint"));
865 err
= VFS_ROOT(mp
, flags
, vpp
);
869 return (EJUSTRETURN
);
873 const char *snap_name
;
875 } snapshot_setup_arg_t
;
878 zfsctl_snapshot_vnode_setup(vnode_t
*vp
, void *arg
)
880 snapshot_setup_arg_t
*ssa
= arg
;
883 ASSERT_VOP_ELOCKED(vp
, __func__
);
885 node
= sfs_alloc_node(sizeof (sfs_node_t
),
886 ssa
->snap_name
, ZFSCTL_INO_SNAPDIR
, ssa
->snap_id
);
887 zfsctl_common_vnode_setup(vp
, node
);
889 /* We have to support recursive locking. */
894 * Lookup entry point for the 'snapshot' directory. Try to open the
895 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
896 * Perform a mount of the associated dataset on top of the vnode.
897 * There are four possibilities:
898 * - the snapshot node and vnode do not exist
899 * - the snapshot vnode is covered by the mounted snapshot
900 * - the snapshot vnode is not covered yet, the mount operation is in progress
901 * - the snapshot vnode is not covered, because the snapshot has been unmounted
902 * The last two states are transient and should be relatively short-lived.
905 zfsctl_snapdir_lookup(struct vop_lookup_args
*ap
)
907 vnode_t
*dvp
= ap
->a_dvp
;
908 vnode_t
**vpp
= ap
->a_vpp
;
909 struct componentname
*cnp
= ap
->a_cnp
;
910 char name
[NAME_MAX
+ 1];
911 char fullname
[ZFS_MAX_DATASET_NAME_LEN
];
913 size_t mountpoint_len
;
914 zfsvfs_t
*zfsvfs
= dvp
->v_vfsp
->vfs_data
;
916 int nameiop
= cnp
->cn_nameiop
;
917 int lkflags
= cnp
->cn_lkflags
;
918 int flags
= cnp
->cn_flags
;
921 ASSERT(dvp
->v_type
== VDIR
);
923 if ((flags
& ISLASTCN
) != 0 && nameiop
!= LOOKUP
)
924 return (SET_ERROR(ENOTSUP
));
926 if (cnp
->cn_namelen
== 1 && *cnp
->cn_nameptr
== '.') {
927 err
= zfsctl_relock_dot(dvp
, lkflags
& LK_TYPE_MASK
);
932 if (flags
& ISDOTDOT
) {
933 err
= vn_vget_ino_gen(dvp
, zfsctl_root_vnode
, NULL
, lkflags
,
938 if (cnp
->cn_namelen
>= sizeof (name
))
939 return (SET_ERROR(ENAMETOOLONG
));
941 strlcpy(name
, ap
->a_cnp
->cn_nameptr
, ap
->a_cnp
->cn_namelen
+ 1);
942 err
= zfsctl_snapshot_lookup(dvp
, name
, &snap_id
);
944 return (SET_ERROR(ENOENT
));
947 snapshot_setup_arg_t ssa
;
949 ssa
.snap_name
= name
;
950 ssa
.snap_id
= snap_id
;
951 err
= sfs_vgetx(dvp
->v_mount
, LK_SHARED
, ZFSCTL_INO_SNAPDIR
,
952 snap_id
, "zfs", &zfsctl_ops_snapshot
,
953 zfsctl_snapshot_vnode_setup
, &ssa
, vpp
);
957 /* Check if a new vnode has just been created. */
958 if (VOP_ISLOCKED(*vpp
) == LK_EXCLUSIVE
)
962 * Check if a snapshot is already mounted on top of the vnode.
964 err
= zfsctl_mounted_here(vpp
, lkflags
);
965 if (err
!= EJUSTRETURN
)
969 * If the vnode is not covered, then either the mount operation
970 * is in progress or the snapshot has already been unmounted
971 * but the vnode hasn't been inactivated and reclaimed yet.
972 * We can try to re-use the vnode in the latter case.
975 if (((*vpp
)->v_iflag
& VI_MOUNT
) == 0) {
977 * Upgrade to exclusive lock in order to:
978 * - avoid race conditions
979 * - satisfy the contract of mount_snapshot()
981 err
= VOP_LOCK(*vpp
, LK_TRYUPGRADE
| LK_INTERLOCK
);
989 * In this state we can loop on uncontested locks and starve
990 * the thread doing the lengthy, non-trivial mount operation.
991 * So, yield to prevent that from happening.
994 kern_yield(PRI_USER
);
997 VERIFY0(zfsctl_snapshot_zname(dvp
, name
, sizeof (fullname
), fullname
));
999 mountpoint_len
= strlen(dvp
->v_vfsp
->mnt_stat
.f_mntonname
) +
1000 strlen("/" ZFS_CTLDIR_NAME
"/snapshot/") + strlen(name
) + 1;
1001 mountpoint
= kmem_alloc(mountpoint_len
, KM_SLEEP
);
1002 (void) snprintf(mountpoint
, mountpoint_len
,
1003 "%s/" ZFS_CTLDIR_NAME
"/snapshot/%s",
1004 dvp
->v_vfsp
->mnt_stat
.f_mntonname
, name
);
1006 err
= mount_snapshot(curthread
, vpp
, "zfs", mountpoint
, fullname
, 0);
1007 kmem_free(mountpoint
, mountpoint_len
);
1010 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
1012 * This is where we lie about our v_vfsp in order to
1013 * make .zfs/snapshot/<snapname> accessible over NFS
1014 * without requiring manual mounts of <snapname>.
1016 ASSERT(VTOZ(*vpp
)->z_zfsvfs
!= zfsvfs
);
1017 VTOZ(*vpp
)->z_zfsvfs
->z_parent
= zfsvfs
;
1019 /* Clear the root flag (set via VFS_ROOT) as well. */
1020 (*vpp
)->v_vflag
&= ~VV_ROOT
;
1029 zfsctl_snapdir_readdir(struct vop_readdir_args
*ap
)
1031 char snapname
[ZFS_MAX_DATASET_NAME_LEN
];
1032 struct dirent entry
;
1033 vnode_t
*vp
= ap
->a_vp
;
1034 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
1036 int *eofp
= ap
->a_eofflag
;
1040 zfs_uio_init(&uio
, ap
->a_uio
);
1042 ASSERT(vp
->v_type
== VDIR
);
1044 error
= sfs_readdir_common(ZFSCTL_INO_ROOT
, ZFSCTL_INO_SNAPDIR
, ap
,
1045 &uio
, &dots_offset
);
1047 if (error
== ENAMETOOLONG
) /* ran out of destination space */
1057 cookie
= zfs_uio_offset(&uio
) - dots_offset
;
1059 dsl_pool_config_enter(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
1060 error
= dmu_snapshot_list_next(zfsvfs
->z_os
, sizeof (snapname
),
1061 snapname
, &id
, &cookie
, NULL
);
1062 dsl_pool_config_exit(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
1064 if (error
== ENOENT
) {
1073 entry
.d_fileno
= id
;
1074 entry
.d_type
= DT_DIR
;
1075 strcpy(entry
.d_name
, snapname
);
1076 entry
.d_namlen
= strlen(entry
.d_name
);
1077 entry
.d_reclen
= sizeof (entry
);
1078 error
= vfs_read_dirent(ap
, &entry
, zfs_uio_offset(&uio
));
1080 if (error
== ENAMETOOLONG
)
1083 return (SET_ERROR(error
));
1085 zfs_uio_setoffset(&uio
, cookie
+ dots_offset
);
1091 zfsctl_snapdir_getattr(struct vop_getattr_args
*ap
)
1093 vnode_t
*vp
= ap
->a_vp
;
1094 vattr_t
*vap
= ap
->a_vap
;
1095 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
1097 uint64_t snap_count
;
1101 ds
= dmu_objset_ds(zfsvfs
->z_os
);
1102 zfsctl_common_getattr(vp
, vap
);
1103 vap
->va_ctime
= dmu_objset_snap_cmtime(zfsvfs
->z_os
);
1104 vap
->va_mtime
= vap
->va_ctime
;
1105 vap
->va_birthtime
= vap
->va_ctime
;
1106 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0) {
1107 err
= zap_count(dmu_objset_pool(ds
->ds_objset
)->dp_meta_objset
,
1108 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, &snap_count
);
1113 vap
->va_nlink
+= snap_count
;
1115 vap
->va_size
= vap
->va_nlink
;
1121 static struct vop_vector zfsctl_ops_snapdir
= {
1122 .vop_default
= &default_vnodeops
,
1123 #if __FreeBSD_version >= 1300121
1124 .vop_fplookup_vexec
= VOP_EAGAIN
,
1126 .vop_open
= zfsctl_common_open
,
1127 .vop_close
= zfsctl_common_close
,
1128 .vop_getattr
= zfsctl_snapdir_getattr
,
1129 .vop_access
= zfsctl_common_access
,
1130 .vop_readdir
= zfsctl_snapdir_readdir
,
1131 .vop_lookup
= zfsctl_snapdir_lookup
,
1132 .vop_reclaim
= zfsctl_common_reclaim
,
1133 .vop_fid
= zfsctl_common_fid
,
1134 .vop_print
= zfsctl_common_print
,
1135 .vop_pathconf
= zfsctl_common_pathconf
,
1136 .vop_getacl
= zfsctl_common_getacl
,
1138 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir
);
1142 zfsctl_snapshot_inactive(struct vop_inactive_args
*ap
)
1144 vnode_t
*vp
= ap
->a_vp
;
1146 VERIFY(vrecycle(vp
) == 1);
1151 zfsctl_snapshot_reclaim(struct vop_reclaim_args
*ap
)
1153 vnode_t
*vp
= ap
->a_vp
;
1154 void *data
= vp
->v_data
;
1156 sfs_reclaim_vnode(vp
);
1157 sfs_destroy_node(data
);
1162 zfsctl_snapshot_vptocnp(struct vop_vptocnp_args
*ap
)
1174 len
= strlen(node
->sn_name
);
1175 if (*ap
->a_buflen
< len
)
1176 return (SET_ERROR(ENOMEM
));
1179 * Prevent unmounting of the snapshot while the vnode lock
1180 * is not held. That is not strictly required, but allows
1181 * us to assert that an uncovered snapshot vnode is never
1184 mp
= vp
->v_mountedhere
;
1186 return (SET_ERROR(ENOENT
));
1187 error
= vfs_busy(mp
, 0);
1188 KASSERT(error
== 0, ("vfs_busy(mp, 0) failed with %d", error
));
1191 * We can vput the vnode as we can now depend on the reference owned
1192 * by the busied mp. But we also need to hold the vnode, because
1193 * the reference may go after vfs_unbusy() which has to be called
1194 * before we can lock the vnode again.
1196 locked
= VOP_ISLOCKED(vp
);
1197 #if __FreeBSD_version >= 1300045
1198 enum vgetstate vs
= vget_prep(vp
);
1204 /* Look up .zfs/snapshot, our parent. */
1205 error
= zfsctl_snapdir_vnode(vp
->v_mount
, NULL
, LK_SHARED
, &dvp
);
1209 *ap
->a_buflen
-= len
;
1210 bcopy(node
->sn_name
, ap
->a_buf
+ *ap
->a_buflen
, len
);
1213 #if __FreeBSD_version >= 1300045
1214 vget_finish(vp
, locked
| LK_RETRY
, vs
);
1216 vget(vp
, locked
| LK_VNHELD
| LK_RETRY
, curthread
);
1222 * These VP's should never see the light of day. They should always
1225 static struct vop_vector zfsctl_ops_snapshot
= {
1226 .vop_default
= NULL
, /* ensure very restricted access */
1227 #if __FreeBSD_version >= 1300121
1228 .vop_fplookup_vexec
= VOP_EAGAIN
,
1230 .vop_inactive
= zfsctl_snapshot_inactive
,
1231 #if __FreeBSD_version >= 1300045
1232 .vop_need_inactive
= vop_stdneed_inactive
,
1234 .vop_reclaim
= zfsctl_snapshot_reclaim
,
1235 .vop_vptocnp
= zfsctl_snapshot_vptocnp
,
1236 .vop_lock1
= vop_stdlock
,
1237 .vop_unlock
= vop_stdunlock
,
1238 .vop_islocked
= vop_stdislocked
,
1239 .vop_advlockpurge
= vop_stdadvlockpurge
, /* called by vgone */
1240 .vop_print
= zfsctl_common_print
,
1242 VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot
);
1245 zfsctl_lookup_objset(vfs_t
*vfsp
, uint64_t objsetid
, zfsvfs_t
**zfsvfsp
)
1247 zfsvfs_t
*zfsvfs __unused
= vfsp
->vfs_data
;
1251 ASSERT(zfsvfs
->z_ctldir
!= NULL
);
1253 error
= sfs_vnode_get(vfsp
, LK_EXCLUSIVE
,
1254 ZFSCTL_INO_SNAPDIR
, objsetid
, &vp
);
1255 if (error
== 0 && vp
!= NULL
) {
1257 * XXX Probably need to at least reference, if not busy, the mp.
1259 if (vp
->v_mountedhere
!= NULL
)
1260 *zfsvfsp
= vp
->v_mountedhere
->mnt_data
;
1263 if (*zfsvfsp
== NULL
)
1264 return (SET_ERROR(EINVAL
));
1269 * Unmount any snapshots for the given filesystem. This is called from
1270 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1274 zfsctl_umount_snapshots(vfs_t
*vfsp
, int fflags
, cred_t
*cr
)
1276 char snapname
[ZFS_MAX_DATASET_NAME_LEN
];
1277 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1283 ASSERT(zfsvfs
->z_ctldir
!= NULL
);
1289 dsl_pool_config_enter(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
1290 error
= dmu_snapshot_list_next(zfsvfs
->z_os
, sizeof (snapname
),
1291 snapname
, &id
, &cookie
, NULL
);
1292 dsl_pool_config_exit(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
1294 if (error
== ENOENT
)
1300 error
= sfs_vnode_get(vfsp
, LK_EXCLUSIVE
,
1301 ZFSCTL_INO_SNAPDIR
, id
, &vp
);
1302 if (error
!= 0 || vp
== NULL
)
1305 mp
= vp
->v_mountedhere
;
1308 * v_mountedhere being NULL means that the
1309 * (uncovered) vnode is in a transient state
1310 * (mounting or unmounting), so loop until it
1320 continue; /* no mountpoint, nothing to do */
1323 * The mount-point vnode is kept locked to avoid spurious EBUSY
1324 * from a concurrent umount.
1325 * The vnode lock must have recursive locking enabled.
1328 error
= dounmount(mp
, fflags
, curthread
);
1329 KASSERT_IMPLY(error
== 0, vrefcnt(vp
) == 1,
1330 ("extra references after unmount"));
1335 KASSERT_IMPLY((fflags
& MS_FORCE
) != 0, error
== 0,
1336 ("force unmounting failed"));
1341 zfsctl_snapshot_unmount(const char *snapname
, int flags __unused
)
1344 zfsvfs_t
*zfsvfs
= NULL
;
1346 if (strchr(snapname
, '@') == NULL
)
1349 int err
= getzfsvfs(snapname
, &zfsvfs
);
1351 ASSERT3P(zfsvfs
, ==, NULL
);
1354 vfsp
= zfsvfs
->z_vfs
;
1356 ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs
->z_os
)));
1360 return (dounmount(vfsp
, MS_FORCE
, curthread
));