*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
+#include <sys/zfs_vnops.h>
+#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/kidmap.h>
+#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
-/*
- * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to
- * be freed before it can be safely accessed.
- */
-krwlock_t zfsvfs_lock;
static kmem_cache_t *znode_cache = NULL;
-/*ARGSUSED*/
-static void
-znode_evict_error(dmu_buf_t *dbuf, void *user_ptr)
-{
- /*
- * We should never drop all dbuf refs without first clearing
- * the eviction callback.
- */
- panic("evicting znode %p\n", user_ptr);
-}
-
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
-
- zp->z_vnode = vn_alloc(kmflags);
- if (zp->z_vnode == NULL) {
- return (-1);
- }
- ZTOV(zp)->v_data = zp;
-
+ inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
+ rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&zp->z_range_avl, zfs_range_compare,
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
+ zp->z_xattr_cached = NULL;
+ zp->z_xattr_parent = NULL;
zp->z_moved = 0;
return (0);
}
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT(ZTOV(zp)->v_data == zp);
- vn_free(ZTOV(zp));
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
+ rw_destroy(&zp->z_xattr_lock);
avl_destroy(&zp->z_range_avl);
mutex_destroy(&zp->z_range_lock);
ASSERT(zp->z_dirlocks == NULL);
ASSERT(zp->z_acl_cached == NULL);
-}
-
-#ifdef ZNODE_STATS
-static struct {
- uint64_t zms_zfsvfs_invalid;
- uint64_t zms_zfsvfs_recheck1;
- uint64_t zms_zfsvfs_unmounted;
- uint64_t zms_zfsvfs_recheck2;
- uint64_t zms_obj_held;
- uint64_t zms_vnode_locked;
- uint64_t zms_not_only_dnlc;
-} znode_move_stats;
-#endif /* ZNODE_STATS */
-
-static void
-zfs_znode_move_impl(znode_t *ozp, znode_t *nzp)
-{
- vnode_t *vp;
-
- /* Copy fields. */
- nzp->z_zfsvfs = ozp->z_zfsvfs;
-
- /* Swap vnodes. */
- vp = nzp->z_vnode;
- nzp->z_vnode = ozp->z_vnode;
- ozp->z_vnode = vp; /* let destructor free the overwritten vnode */
- ZTOV(ozp)->v_data = ozp;
- ZTOV(nzp)->v_data = nzp;
-
- nzp->z_id = ozp->z_id;
- ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */
- ASSERT(avl_numnodes(&ozp->z_range_avl) == 0);
- nzp->z_unlinked = ozp->z_unlinked;
- nzp->z_atime_dirty = ozp->z_atime_dirty;
- nzp->z_zn_prefetch = ozp->z_zn_prefetch;
- nzp->z_blksz = ozp->z_blksz;
- nzp->z_seq = ozp->z_seq;
- nzp->z_mapcnt = ozp->z_mapcnt;
- nzp->z_gen = ozp->z_gen;
- nzp->z_sync_cnt = ozp->z_sync_cnt;
- nzp->z_is_sa = ozp->z_is_sa;
- nzp->z_sa_hdl = ozp->z_sa_hdl;
- bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2);
- nzp->z_links = ozp->z_links;
- nzp->z_size = ozp->z_size;
- nzp->z_pflags = ozp->z_pflags;
- nzp->z_uid = ozp->z_uid;
- nzp->z_gid = ozp->z_gid;
- nzp->z_mode = ozp->z_mode;
-
- /*
- * Since this is just an idle znode and kmem is already dealing with
- * memory pressure, release any cached ACL.
- */
- if (ozp->z_acl_cached) {
- zfs_acl_free(ozp->z_acl_cached);
- ozp->z_acl_cached = NULL;
- }
-
- sa_set_userp(nzp->z_sa_hdl, nzp);
-
- /*
- * Invalidate the original znode by clearing fields that provide a
- * pointer back to the znode. Set the low bit of the vfs pointer to
- * ensure that zfs_znode_move() recognizes the znode as invalid in any
- * subsequent callback.
- */
- ozp->z_sa_hdl = NULL;
- POINTER_INVALIDATE(&ozp->z_zfsvfs);
-
- /*
- * Mark the znode.
- */
- nzp->z_moved = 1;
- ozp->z_moved = (uint8_t)-1;
-}
-
-/*ARGSUSED*/
-static kmem_cbrc_t
-zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg)
-{
- znode_t *ozp = buf, *nzp = newbuf;
- zfsvfs_t *zfsvfs;
- vnode_t *vp;
-
- /*
- * The znode is on the file system's list of known znodes if the vfs
- * pointer is valid. We set the low bit of the vfs pointer when freeing
- * the znode to invalidate it, and the memory patterns written by kmem
- * (baddcafe and deadbeef) set at least one of the two low bits. A newly
- * created znode sets the vfs pointer last of all to indicate that the
- * znode is known and in a valid state to be moved by this function.
- */
- zfsvfs = ozp->z_zfsvfs;
- if (!POINTER_IS_VALID(zfsvfs)) {
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- /*
- * Close a small window in which it's possible that the filesystem could
- * be unmounted and freed, and zfsvfs, though valid in the previous
- * statement, could point to unrelated memory by the time we try to
- * prevent the filesystem from being unmounted.
- */
- rw_enter(&zfsvfs_lock, RW_WRITER);
- if (zfsvfs != ozp->z_zfsvfs) {
- rw_exit(&zfsvfs_lock);
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- /*
- * If the znode is still valid, then so is the file system. We know that
- * no valid file system can be freed while we hold zfsvfs_lock, so we
- * can safely ensure that the filesystem is not and will not be
- * unmounted. The next statement is equivalent to ZFS_ENTER().
- */
- rrw_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG);
- if (zfsvfs->z_unmounted) {
- ZFS_EXIT(zfsvfs);
- rw_exit(&zfsvfs_lock);
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted);
- return (KMEM_CBRC_DONT_KNOW);
- }
- rw_exit(&zfsvfs_lock);
-
- mutex_enter(&zfsvfs->z_znodes_lock);
- /*
- * Recheck the vfs pointer in case the znode was removed just before
- * acquiring the lock.
- */
- if (zfsvfs != ozp->z_zfsvfs) {
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- /*
- * At this point we know that as long as we hold z_znodes_lock, the
- * znode cannot be freed and fields within the znode can be safely
- * accessed. Now, prevent a race with zfs_zget().
- */
- if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) {
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_obj_held);
- return (KMEM_CBRC_LATER);
- }
-
- vp = ZTOV(ozp);
- if (mutex_tryenter(&vp->v_lock) == 0) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked);
- return (KMEM_CBRC_LATER);
- }
-
- /* Only move znodes that are referenced _only_ by the DNLC. */
- if (vp->v_count != 1 || !vn_in_dnlc(vp)) {
- mutex_exit(&vp->v_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc);
- return (KMEM_CBRC_LATER);
- }
-
- /*
- * The znode is known and in a valid state to move. We're holding the
- * locks needed to execute the critical section.
- */
- zfs_znode_move_impl(ozp, nzp);
- mutex_exit(&vp->v_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
-
- list_link_replace(&ozp->z_link_node, &nzp->z_link_node);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
-
- return (KMEM_CBRC_YES);
+ ASSERT(zp->z_xattr_cached == NULL);
+ ASSERT(zp->z_xattr_parent == NULL);
}
void
/*
* Initialize zcache
*/
- rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL);
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
- zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
- kmem_cache_set_move(znode_cache, zfs_znode_move);
+ zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_KMEM);
}
void
zfs_znode_fini(void)
{
- /*
- * Cleanup vfs & vnode ops
- */
- zfs_remove_op_tables();
-
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
- rw_destroy(&zfsvfs_lock);
-}
-
-struct vnodeops *zfs_dvnodeops;
-struct vnodeops *zfs_fvnodeops;
-struct vnodeops *zfs_symvnodeops;
-struct vnodeops *zfs_xdvnodeops;
-struct vnodeops *zfs_evnodeops;
-struct vnodeops *zfs_sharevnodeops;
-
-void
-zfs_remove_op_tables()
-{
- /*
- * Remove vfs ops
- */
- ASSERT(zfsfstype);
- (void) vfs_freevfsops_by_type(zfsfstype);
- zfsfstype = 0;
-
- /*
- * Remove vnode ops
- */
- if (zfs_dvnodeops)
- vn_freevnodeops(zfs_dvnodeops);
- if (zfs_fvnodeops)
- vn_freevnodeops(zfs_fvnodeops);
- if (zfs_symvnodeops)
- vn_freevnodeops(zfs_symvnodeops);
- if (zfs_xdvnodeops)
- vn_freevnodeops(zfs_xdvnodeops);
- if (zfs_evnodeops)
- vn_freevnodeops(zfs_evnodeops);
- if (zfs_sharevnodeops)
- vn_freevnodeops(zfs_sharevnodeops);
-
- zfs_dvnodeops = NULL;
- zfs_fvnodeops = NULL;
- zfs_symvnodeops = NULL;
- zfs_xdvnodeops = NULL;
- zfs_evnodeops = NULL;
- zfs_sharevnodeops = NULL;
-}
-
-extern const fs_operation_def_t zfs_dvnodeops_template[];
-extern const fs_operation_def_t zfs_fvnodeops_template[];
-extern const fs_operation_def_t zfs_xdvnodeops_template[];
-extern const fs_operation_def_t zfs_symvnodeops_template[];
-extern const fs_operation_def_t zfs_evnodeops_template[];
-extern const fs_operation_def_t zfs_sharevnodeops_template[];
-
-int
-zfs_create_op_tables()
-{
- int error;
-
- /*
- * zfs_dvnodeops can be set if mod_remove() calls mod_installfs()
- * due to a failure to remove the the 2nd modlinkage (zfs_modldrv).
- * In this case we just return as the ops vectors are already set up.
- */
- if (zfs_dvnodeops)
- return (0);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template,
- &zfs_dvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template,
- &zfs_fvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template,
- &zfs_symvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template,
- &zfs_xdvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template,
- &zfs_evnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_sharevnodeops_template,
- &zfs_sharevnodeops);
-
- return (error);
}
int
-zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
{
+#ifdef HAVE_SMB_SHARE
zfs_acl_ids_t acl_ids;
vattr_t vattr;
znode_t *sharezp;
int error;
vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VDIR;
- vattr.va_mode = S_IFDIR|0555;
+ vattr.va_mode = S_IFDIR | 0555;
vattr.va_uid = crgetuid(kcred);
vattr.va_gid = crgetgid(kcred);
- sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
- ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
+ sharezp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
zfsvfs->z_shares_dir = sharezp->z_id;
zfs_acl_ids_free(&acl_ids);
- ZTOV(sharezp)->v_count = 0;
+ // ZTOV(sharezp)->v_count = 0;
sa_handle_destroy(sharezp->z_sa_hdl);
kmem_cache_free(znode_cache, sharezp);
return (error);
-}
-
-/*
- * define a couple of values we need available
- * for both 64 and 32 bit environments.
- */
-#ifndef NBITSMINOR64
-#define NBITSMINOR64 32
-#endif
-#ifndef MAXMAJ64
-#define MAXMAJ64 0xffffffffUL
-#endif
-#ifndef MAXMIN64
-#define MAXMIN64 0xffffffffUL
-#endif
-
-/*
- * Create special expldev for ZFS private use.
- * Can't use standard expldev since it doesn't do
- * what we want. The standard expldev() takes a
- * dev32_t in LP64 and expands it to a long dev_t.
- * We need an interface that takes a dev32_t in ILP32
- * and expands it to a long dev_t.
- */
-static uint64_t
-zfs_expldev(dev_t dev)
-{
-#ifndef _LP64
- major_t major = (major_t)dev >> NBITSMINOR32 & MAXMAJ32;
- return (((uint64_t)major << NBITSMINOR64) |
- ((minor_t)dev & MAXMIN32));
#else
- return (dev);
-#endif
-}
-
-/*
- * Special cmpldev for ZFS private use.
- * Can't use standard cmpldev since it takes
- * a long dev_t and compresses it to dev32_t in
- * LP64. We need to do a compaction of a long dev_t
- * to a dev32_t in ILP32.
- */
-dev_t
-zfs_cmpldev(uint64_t dev)
-{
-#ifndef _LP64
- minor_t minor = (minor_t)dev & MAXMIN64;
- major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
-
- if (major > MAXMAJ32 || minor > MAXMIN32)
- return (NODEV32);
-
- return (((dev32_t)major << NBITSMINOR32) | minor);
-#else
- return (dev);
-#endif
+ return (0);
+#endif /* HAVE_SMB_SHARE */
}
static void
-zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
+zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zsb, zp->z_id)));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
- /*
- * Slap on VROOT if we are the root znode
- */
- if (zp->z_id == zfsvfs->z_root)
- ZTOV(zp)->v_flag |= VROOT;
-
mutex_exit(&zp->z_lock);
- vn_exists(ZTOV(zp));
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(ZTOZSB(zp), zp->z_id)) ||
zp->z_unlinked ||
- RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
+ RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
- * Construct a new znode/vnode and intialize.
+ * Called by new_inode() to allocate a new inode.
+ */
+int
+zfs_inode_alloc(struct super_block *sb, struct inode **ip)
+{
+ znode_t *zp;
+
+ zp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
+ *ip = ZTOI(zp);
+
+ return (0);
+}
+
+/*
+ * Called in multiple places when an inode should be destroyed.
+ */
+void
+zfs_inode_destroy(struct inode *ip)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ZTOZSB(zp);
+
+ if (zfsctl_is_node(ip))
+ zfsctl_inode_destroy(ip);
+
+ mutex_enter(&zsb->z_znodes_lock);
+ if (list_link_active(&zp->z_link_node)) {
+ list_remove(&zsb->z_all_znodes, zp);
+ zsb->z_nr_znodes--;
+ }
+ mutex_exit(&zsb->z_znodes_lock);
+
+ if (zp->z_acl_cached) {
+ zfs_acl_free(zp->z_acl_cached);
+ zp->z_acl_cached = NULL;
+ }
+
+ if (zp->z_xattr_cached) {
+ nvlist_free(zp->z_xattr_cached);
+ zp->z_xattr_cached = NULL;
+ }
+
+ if (zp->z_xattr_parent) {
+ iput(ZTOI(zp->z_xattr_parent));
+ zp->z_xattr_parent = NULL;
+ }
+
+ kmem_cache_free(znode_cache, zp);
+}
+
+static void
+zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
+{
+ uint64_t rdev = 0;
+
+ switch (ip->i_mode & S_IFMT) {
+ case S_IFREG:
+ ip->i_op = &zpl_inode_operations;
+ ip->i_fop = &zpl_file_operations;
+ ip->i_mapping->a_ops = &zpl_address_space_operations;
+ break;
+
+ case S_IFDIR:
+ ip->i_op = &zpl_dir_inode_operations;
+ ip->i_fop = &zpl_dir_file_operations;
+ ITOZ(ip)->z_zn_prefetch = B_TRUE;
+ break;
+
+ case S_IFLNK:
+ ip->i_op = &zpl_symlink_inode_operations;
+ break;
+
+ /*
+ * rdev is only stored in a SA only for device files.
+ */
+ case S_IFCHR:
+ case S_IFBLK:
+ VERIFY(sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb),
+ &rdev, sizeof (rdev)) == 0);
+ /*FALLTHROUGH*/
+ case S_IFIFO:
+ case S_IFSOCK:
+ init_special_inode(ip, ip->i_mode, rdev);
+ ip->i_op = &zpl_special_inode_operations;
+ break;
+
+ default:
+ printk("ZFS: Invalid mode: 0x%x\n", ip->i_mode);
+ VERIFY(0);
+ }
+}
+
+/*
+ * Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
-zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
- dmu_object_type_t obj_type, sa_handle_t *hdl)
+zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
+ dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
+ struct inode *dip)
{
znode_t *zp;
- vnode_t *vp;
- uint64_t mode;
+ struct inode *ip;
uint64_t parent;
sa_bulk_attr_t bulk[9];
int count = 0;
- zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ ASSERT(zsb != NULL);
+ ip = new_inode(zsb->z_sb);
+ if (ip == NULL)
+ return (NULL);
+
+ zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
+ ASSERT3P(zp->z_acl_cached, ==, NULL);
+ ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT3P(zp->z_xattr_parent, ==, NULL);
zp->z_moved = 0;
-
- /*
- * Defer setting z_zfsvfs until the znode is ready to be a candidate for
- * the zfs_znode_move() callback.
- */
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
-
- vp = ZTOV(zp);
- vn_reinit(vp);
-
- zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
-
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
- &zp->z_links, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ zp->z_is_zvol = B_FALSE;
+ zp->z_is_mapped = B_FALSE;
+ zp->z_is_ctldir = B_FALSE;
+ zp->z_is_stale = B_FALSE;
+
+ zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
+
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
- &zp->z_uid, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
- &zp->z_gid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
- kmem_cache_free(znode_cache, zp);
- return (NULL);
- }
-
- zp->z_mode = mode;
- vp->v_vfsp = zfsvfs->z_parent->z_vfs;
- vp->v_type = IFTOVT((mode_t)mode);
+ goto error;
+ }
- switch (vp->v_type) {
- case VDIR:
- if (zp->z_pflags & ZFS_XATTR) {
- vn_setops(vp, zfs_xdvnodeops);
- vp->v_flag |= V_XATTRDIR;
- } else {
- vn_setops(vp, zfs_dvnodeops);
- }
- zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
- break;
- case VBLK:
- case VCHR:
- {
- uint64_t rdev;
- VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
- &rdev, sizeof (rdev)) == 0);
-
- vp->v_rdev = zfs_cmpldev(rdev);
- }
- /*FALLTHROUGH*/
- case VFIFO:
- case VSOCK:
- case VDOOR:
- vn_setops(vp, zfs_fvnodeops);
- break;
- case VREG:
- vp->v_flag |= VMODSORT;
- if (parent == zfsvfs->z_shares_dir) {
- ASSERT(zp->z_uid == 0 && zp->z_gid == 0);
- vn_setops(vp, zfs_sharevnodeops);
- } else {
- vn_setops(vp, zfs_fvnodeops);
- }
- break;
- case VLNK:
- vn_setops(vp, zfs_symvnodeops);
- break;
- default:
- vn_setops(vp, zfs_evnodeops);
- break;
+ /*
+ * xattr znodes hold a reference on their unique parent
+ */
+ if (dip && zp->z_pflags & ZFS_XATTR) {
+ igrab(dip);
+ zp->z_xattr_parent = ITOZ(dip);
}
- mutex_enter(&zfsvfs->z_znodes_lock);
- list_insert_tail(&zfsvfs->z_all_znodes, zp);
- membar_producer();
+ ip->i_ino = obj;
+ zfs_inode_update(zp);
+ zfs_inode_set_ops(zsb, ip);
+
/*
- * Everything else must be valid before assigning z_zfsvfs makes the
- * znode eligible for zfs_znode_move().
+ * The only way insert_inode_locked() can fail is if the ip->i_ino
+ * number is already hashed for this super block. This can never
+ * happen because the inode numbers map 1:1 with the object numbers.
+ *
+ * The one exception is rolling back a mounted file system, but in
+ * this case all the active inode are unhashed during the rollback.
*/
- zp->z_zfsvfs = zfsvfs;
- mutex_exit(&zfsvfs->z_znodes_lock);
+ VERIFY3S(insert_inode_locked(ip), ==, 0);
- VFS_HOLD(zfsvfs->z_vfs);
+ mutex_enter(&zsb->z_znodes_lock);
+ list_insert_tail(&zsb->z_all_znodes, zp);
+ zsb->z_nr_znodes++;
+ membar_producer();
+ mutex_exit(&zsb->z_znodes_lock);
+
+ unlock_new_inode(ip);
return (zp);
+
+error:
+ unlock_new_inode(ip);
+ iput(ip);
+ return (NULL);
+}
+
+/*
+ * Update the embedded inode given the znode. We should work toward
+ * eliminating this function as soon as possible by removing values
+ * which are duplicated between the znode and inode. If the generic
+ * inode has the correct field it should be used, and the ZFS code
+ * updated to access the inode. This can be done incrementally.
+ */
+void
+zfs_inode_update(znode_t *zp)
+{
+ zfs_sb_t *zsb;
+ struct inode *ip;
+ uint32_t blksize;
+ uint64_t atime[2], mtime[2], ctime[2];
+
+ ASSERT(zp != NULL);
+ zsb = ZTOZSB(zp);
+ ip = ZTOI(zp);
+
+ /* Skip .zfs control nodes which do not exist on disk. */
+ if (zfsctl_is_node(ip))
+ return;
+
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
+
+ spin_lock(&ip->i_lock);
+ ip->i_generation = zp->z_gen;
+ ip->i_uid = SUID_TO_KUID(zp->z_uid);
+ ip->i_gid = SGID_TO_KGID(zp->z_gid);
+ set_nlink(ip, zp->z_links);
+ ip->i_mode = zp->z_mode;
+ ip->i_blkbits = SPA_MINBLOCKSHIFT;
+ dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize,
+ (u_longlong_t *)&ip->i_blocks);
+
+ ZFS_TIME_DECODE(&ip->i_atime, atime);
+ ZFS_TIME_DECODE(&ip->i_mtime, mtime);
+ ZFS_TIME_DECODE(&ip->i_ctime, ctime);
+
+ i_size_write(ip, zp->z_size);
+ spin_unlock(&ip->i_lock);
}
static uint64_t empty_xattr;
uint64_t mode, size, links, parent, pflags;
uint64_t dzp_pflags = 0;
uint64_t rdev = 0;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
dmu_buf_t *db;
timestruc_t now;
uint64_t gen, obj;
int bonuslen;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
- sa_bulk_attr_t sa_attrs[ZPL_END];
+ sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
- ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
-
- if (zfsvfs->z_replay) {
+ if (zsb->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
gen = dmu_tx_get_txg(tx);
}
- obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
+ obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
- if (vap->va_type == VDIR) {
- if (zfsvfs->z_replay) {
- err = zap_create_claim_norm(zfsvfs->z_os, obj,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ if (S_ISDIR(vap->va_mode)) {
+ if (zsb->z_replay) {
+ err = zap_create_claim_norm(zsb->z_os, obj,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
- ASSERT3U(err, ==, 0);
+ ASSERT0(err);
} else {
- obj = zap_create_norm(zfsvfs->z_os,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ obj = zap_create_norm(zsb->z_os,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, tx);
}
} else {
- if (zfsvfs->z_replay) {
- err = dmu_object_claim(zfsvfs->z_os, obj,
+ if (zsb->z_replay) {
+ err = dmu_object_claim(zsb->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
- ASSERT3U(err, ==, 0);
+ ASSERT0(err);
} else {
- obj = dmu_object_alloc(zfsvfs->z_os,
+ obj = dmu_object_alloc(zsb->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, tx);
}
}
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
- VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
+ VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
flag |= IS_XATTR;
}
- if (zfsvfs->z_use_fuids)
+ if (zsb->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
- if (vap->va_type == VDIR) {
+ if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
} else {
size = links = 0;
}
- if (vap->va_type == VBLK || vap->va_type == VCHR) {
- rdev = zfs_expldev(vap->va_rdev);
- }
+ if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
+ rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
- if (vap->va_mask & AT_ATIME) {
+ if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
- if (vap->va_mask & AT_MTIME) {
+ if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
+ sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
} else {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
NULL, &mode, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
NULL, &size, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
NULL, &gen, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
- &acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
- &acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
+ NULL, &acl_ids->z_fuid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
+ NULL, &acl_ids->z_fgid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
NULL, &parent, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
NULL, &atime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
NULL, &crtime, 16);
}
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
&empty_xattr, 8);
}
if (obj_type == DMU_OT_ZNODE ||
- (vap->va_type == VBLK || vap->va_type == VCHR)) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
+ (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
NULL, &rdev, 8);
-
}
if (obj_type == DMU_OT_ZNODE) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
NULL, &pflags, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
&acl_ids->z_fuid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
&acl_ids->z_fgid, 8);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
sizeof (uint64_t) * 4);
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
- SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
- *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
- ASSERT(*zpp != NULL);
+ *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl,
+ ZTOI(dzp));
+ VERIFY(*zpp != NULL);
+ VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = mode;
- if (vap->va_mask & AT_XVATTR)
- zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
-
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
- ASSERT3P(err, ==, 0);
+ ASSERT0(err);
}
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
+ kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
/*
- * zfs_xvattr_set only updates the in-core attributes
- * it is assumed the caller will be doing an sa_bulk_update
- * to push the changes out
+ * Update in-core attributes. It is assumed the caller will be doing an
+ * sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
×, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
}
int
-zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
+zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
int err;
sa_handle_t *hdl;
+ struct inode *ip;
*zpp = NULL;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+again:
+ ip = ilookup(zsb->z_sb, obj_num);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
+
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ iput(ip);
return (err);
}
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
- return (EINVAL);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ iput(ip);
+ return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
- zp = sa_get_userdata(hdl);
+ if (ip == NULL) {
+ /*
+ * ilookup returned NULL, which means
+ * the znode is dying - but the SA handle isn't
+ * quite dead yet, we need to drop any locks
+ * we're holding, re-schedule the task and try again.
+ */
+ sa_buf_rele(db, NULL);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+
+ schedule();
+ goto again;
+ }
+ zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
if (zp->z_unlinked) {
- err = ENOENT;
+ err = SET_ERROR(ENOENT);
} else {
- VN_HOLD(ZTOV(zp));
+ igrab(ZTOI(zp));
*zpp = zp;
err = 0;
}
sa_buf_rele(db, NULL);
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ iput(ip);
return (err);
}
+ ASSERT3P(ip, ==, NULL);
+
/*
- * Not found create new znode/vnode
- * but only if file exists.
+ * Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
- zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
- doi.doi_bonus_type, NULL);
+ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
+ doi.doi_bonus_type, obj_num, NULL, NULL);
if (zp == NULL) {
- err = ENOENT;
+ err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
int count = 0;
uint64_t gen;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
-
mutex_exit(&zp->z_acl_lock);
+
+ rw_enter(&zp->z_xattr_lock, RW_WRITER);
+ if (zp->z_xattr_cached) {
+ nvlist_free(zp->z_xattr_cached);
+ zp->z_xattr_cached = NULL;
+ }
+
+ if (zp->z_xattr_parent) {
+ iput(ZTOI(zp->z_xattr_parent));
+ zp->z_xattr_parent = NULL;
+ }
+ rw_exit(&zp->z_xattr_lock);
+
ASSERT(zp->z_sa_hdl == NULL);
- err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
- return (EINVAL);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ return (SET_ERROR(EINVAL));
}
- zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
+ zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
&gen, sizeof (gen));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
&zp->z_size, sizeof (zp->z_size));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&zp->z_links, sizeof (zp->z_links));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, sizeof (zp->z_atime));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
&zp->z_uid, sizeof (zp->z_uid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
&zp->z_gid, sizeof (zp->z_gid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&mode, sizeof (mode));
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
- return (EIO);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ return (SET_ERROR(EIO));
}
zp->z_mode = mode;
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
- return (EIO);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ return (SET_ERROR(EIO));
}
zp->z_unlinked = (zp->z_links == 0);
zp->z_blksz = doi.doi_data_block_size;
+ zfs_inode_update(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ objset_t *os = zsb->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
- zfs_znode_free(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
void
zfs_zinactive(znode_t *zp)
{
- vnode_t *vp = ZTOV(zp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
+ boolean_t drop_mutex = 0;
ASSERT(zp->z_sa_hdl);
/*
- * Don't allow a zfs_zget() while were trying to release this znode
+ * Don't allow a zfs_zget() while were trying to release this znode.
+ *
+ * Linux allows direct memory reclaim which means that any KM_SLEEP
+ * allocation may trigger inode eviction. This can lead to a deadlock
+ * through the ->shrink_icache_memory()->evict()->zfs_inactive()->
+ * zfs_zinactive() call path. To avoid this deadlock the process
+ * must not reacquire the mutex when it is already holding it.
*/
- ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
+ if (!ZFS_OBJ_HOLD_OWNED(zsb, z_id)) {
+ ZFS_OBJ_HOLD_ENTER(zsb, z_id);
+ drop_mutex = 1;
+ }
mutex_enter(&zp->z_lock);
- mutex_enter(&vp->v_lock);
- vp->v_count--;
- if (vp->v_count > 0 || vn_has_cached_data(vp)) {
- /*
- * If the hold count is greater than zero, somebody has
- * obtained a new reference on this znode while we were
- * processing it here, so we are done. If we still have
- * mapped pages then we are also done, since we don't
- * want to inactivate the znode until the pages get pushed.
- *
- * XXX - if vn_has_cached_data(vp) is true, but count == 0,
- * this seems like it would leave the znode hanging with
- * no chance to go inactive...
- */
- mutex_exit(&vp->v_lock);
- mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- return;
- }
- mutex_exit(&vp->v_lock);
/*
* If this was the last reference to a file with no links,
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
+
+ if (drop_mutex)
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
+
zfs_rmnode(zp);
return;
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- zfs_znode_free(zp);
-}
-
-void
-zfs_znode_free(znode_t *zp)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- vn_invalid(ZTOV(zp));
-
- ASSERT(ZTOV(zp)->v_count == 0);
- mutex_enter(&zfsvfs->z_znodes_lock);
- POINTER_INVALIDATE(&zp->z_zfsvfs);
- list_remove(&zfsvfs->z_all_znodes, zp);
- mutex_exit(&zfsvfs->z_znodes_lock);
-
- if (zp->z_acl_cached) {
- zfs_acl_free(zp->z_acl_cached);
- zp->z_acl_cached = NULL;
- }
-
- kmem_cache_free(znode_cache, zp);
-
- VFS_RELE(zfsvfs->z_vfs);
+ if (drop_mutex)
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
}
void
zp->z_atime_dirty = 1;
}
- if (flag & AT_ATIME) {
+ if (flag & ATTR_ATIME) {
ZFS_TIME_ENCODE(&now, zp->z_atime);
+ ZTOI(zp)->i_atime.tv_sec = zp->z_atime[0];
+ ZTOI(zp)->i_atime.tv_nsec = zp->z_atime[1];
}
- if (flag & AT_MTIME) {
+ if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
- if (zp->z_zfsvfs->z_use_fuids) {
+ if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
- if (flag & AT_CTIME) {
+ if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
- if (zp->z_zfsvfs->z_use_fuids)
+ if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
- error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
+ error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
- ASSERT3U(error, ==, 0);
+ ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
-/*
- * This is a dummy interface used when pvn_vplist_dirty() should *not*
- * be calling back into the fs for a putpage(). E.g.: when truncating
- * a file, the pages being "thrown away* don't need to be written out.
- */
-/* ARGSUSED */
-static int
-zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
- int flags, cred_t *cr)
-{
- ASSERT(0);
- return (0);
-}
-
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
uint64_t newblksz;
zfs_range_unlock(rl);
return (0);
}
-top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
- (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
+ (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
- if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
+ if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, SPA_MAXBLOCKSIZE);
} else {
- newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
+ newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
- error = dmu_tx_assign(tx, TXG_NOWAIT);
+ error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
- if (error == ERESTART) {
- dmu_tx_wait(tx);
- dmu_tx_abort(tx);
- goto top;
- }
dmu_tx_abort(tx);
zfs_range_unlock(rl);
return (error);
zp->z_size = end;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_range_unlock(rl);
* off - start of section to free.
* len - length of section to free.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
rl_t *rl;
int error;
if (off + len > zp->z_size)
len = zp->z_size - off;
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
zfs_range_unlock(rl);
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- vnode_t *vp = ZTOV(zp);
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
int error;
return (0);
}
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
if (error) {
zfs_range_unlock(rl);
return (error);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
}
zp->z_size = end;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
- /*
- * Clear any mapped pages in the truncated region. This has to
- * happen outside of the transaction to avoid the possibility of
- * a deadlock with someone trying to push a page that we are
- * about to invalidate.
- */
- if (vn_has_cached_data(vp)) {
- page_t *pp;
- uint64_t start = end & PAGEMASK;
- int poff = end & PAGEOFFSET;
-
- if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
- /*
- * We need to zero a partial page.
- */
- pagezero(pp, poff, PAGESIZE - poff);
- start += PAGESIZE;
- page_unlock(pp);
- }
- error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
- B_INVAL | B_TRUNC, NULL);
- ASSERT(error == 0);
- }
-
zfs_range_unlock(rl);
return (0);
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
- vnode_t *vp = ZTOV(zp);
+ struct inode *ip = ZTOI(zp);
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
sizeof (mode))) != 0)
return (error);
/*
* Check for any locks in the region to be freed.
*/
-
- if (MANDLOCK(vp, (mode_t)mode)) {
+ if (ip->i_flock && mandatory_lock(ip)) {
uint64_t length = (len ? len : zp->z_size - off);
- if (error = chklock(vp, FWRITE, off, length, flag, NULL))
- return (error);
+ if (!lock_may_write(ip, off, length))
+ return (SET_ERROR(EAGAIN));
}
if (len == 0) {
if (error || !log)
return (error);
log:
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_NOWAIT);
+ error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
- if (error == ERESTART) {
- dmu_tx_wait(tx);
- dmu_tx_abort(tx);
- goto log;
- }
dmu_tx_abort(tx);
return (error);
}
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
+ zfs_inode_update(zp);
return (0);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
- zfsvfs_t zfsvfs;
+ struct super_block *sb;
+ zfs_sb_t *zsb;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
int error;
int i;
znode_t *rootzp = NULL;
- vnode_t *vp;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
ASSERT(error == 0);
/*
- * Create root znode. Create minimal znode/vnode/zfsvfs
+ * Create root znode. Create minimal znode/inode/zsb/sb
* to allow zfs_mknode to work.
*/
- vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VDIR;
+ vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
- rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
- ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
+ rootzp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
rootzp->z_moved = 0;
rootzp->z_unlinked = 0;
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
- vp = ZTOV(rootzp);
- vn_reinit(vp);
- vp->v_type = VDIR;
+ zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_PUSHPAGE | KM_NODEBUG);
+ zsb->z_os = os;
+ zsb->z_parent = zsb;
+ zsb->z_version = version;
+ zsb->z_use_fuids = USE_FUIDS(version, os);
+ zsb->z_use_sa = USE_SA(version, os);
+ zsb->z_norm = norm;
- bzero(&zfsvfs, sizeof (zfsvfs_t));
+ sb = kmem_zalloc(sizeof (struct super_block), KM_PUSHPAGE);
+ sb->s_fs_info = zsb;
- zfsvfs.z_os = os;
- zfsvfs.z_parent = &zfsvfs;
- zfsvfs.z_version = version;
- zfsvfs.z_use_fuids = USE_FUIDS(version, os);
- zfsvfs.z_use_sa = USE_SA(version, os);
- zfsvfs.z_norm = norm;
+ ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
- &zfsvfs.z_attr_table);
+ &zsb->z_attr_table);
ASSERT(error == 0);
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
- zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
+ zsb->z_norm |= U8_TEXTPREP_TOUPPER;
- /* XXX - This must be destroyed but I'm not quite sure yet so
- * I'm just annotating that fact when it's an issue. -Brian */
- mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
- list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
+ mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&zsb->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_init(&zfsvfs.z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
- rootzp->z_zfsvfs = &zfsvfs;
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
- ASSERT(!vn_in_dnlc(ZTOV(rootzp))); /* not valid to move */
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
- POINTER_INVALIDATE(&rootzp->z_zfsvfs);
- ZTOV(rootzp)->v_count = 0;
+ atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
/*
* Create shares directory
*/
-
- error = zfs_create_share_dir(&zfsvfs, tx);
-
+ error = zfs_create_share_dir(zsb, tx);
ASSERT(error == 0);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_destroy(&zfsvfs.z_hold_mtx[i]);
-}
+ mutex_destroy(&zsb->z_hold_mtx[i]);
+ kmem_free(sb, sizeof (struct super_block));
+ kmem_free(zsb, sizeof (zfs_sb_t));
+}
#endif /* _KERNEL */
static int
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
- dmu_buf_t **db)
+ dmu_buf_t **db, void *tag)
{
dmu_object_info_t doi;
int error;
- if ((error = sa_buf_hold(osp, obj, FTAG, db)) != 0)
+ if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
- sa_buf_rele(*db, FTAG);
- return (ENOTSUP);
+ sa_buf_rele(*db, tag);
+ return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
- sa_buf_rele(*db, FTAG);
+ sa_buf_rele(*db, tag);
return (error);
}
}
void
-zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db)
+zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
{
sa_handle_destroy(hdl);
- sa_buf_rele(db, FTAG);
+ sa_buf_rele(db, tag);
}
/*
sa_hdl = hdl;
for (;;) {
- uint64_t pobj;
+ uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
- int is_xattrdir;
+ int is_xattrdir = 0;
if (prevdb)
- zfs_release_sa_handle(prevhdl, prevdb);
+ zfs_release_sa_handle(prevhdl, prevdb, FTAG);
if ((error = zfs_obj_to_pobj(sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
prevhdl = sa_hdl;
prevdb = sa_db;
}
- error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db);
+ error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
- zfs_release_sa_handle(sa_hdl, sa_db);
+ zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
if (error != 0)
return (error);
- error = zfs_grab_sa_handle(osp, obj, &hdl, &db);
+ error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
- zfs_release_sa_handle(hdl, db);
+ zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
if (error != 0)
return (error);
- error = zfs_grab_sa_handle(osp, obj, &hdl, &db);
+ error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
- zfs_release_sa_handle(hdl, db);
+ zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
- zfs_release_sa_handle(hdl, db);
+ zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(zfs_create_fs);
+EXPORT_SYMBOL(zfs_obj_to_path);
+#endif