#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack()
#define xfs_itruncate_data(ip, off) \
- (-vmtruncate(vn_to_inode(VFS_I(ip)), (off)))
+ (-vmtruncate(VFS_I(ip), (off)))
/* Move the kernel do_div definition off to one side */
STATIC_INLINE void
xfs_revalidate_inode(
xfs_mount_t *mp,
- bhv_vnode_t *vp,
+ struct inode *inode,
xfs_inode_t *ip)
{
- struct inode *inode = vn_to_inode(vp);
inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink;
void
xfs_initialize_vnode(
struct xfs_mount *mp,
- bhv_vnode_t *vp,
+ struct inode *inode,
struct xfs_inode *ip)
{
- struct inode *inode = vn_to_inode(vp);
if (!ip->i_vnode) {
- ip->i_vnode = vp;
+ ip->i_vnode = inode;
inode->i_private = ip;
}
* finish our work.
*/
if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
- xfs_revalidate_inode(mp, vp, ip);
+ xfs_revalidate_inode(mp, inode, ip);
xfs_set_inodeops(inode);
xfs_iflags_clear(ip, XFS_INEW);
vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
if (unlikely(!vp))
return NULL;
- return vn_to_inode(vp);
+ return vp;
}
STATIC void
xfs_fs_inode_init_once(
void *vnode)
{
- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
+ inode_init_once((struct inode *)vnode);
}
/*
XFS_STATS_INC(vn_hold);
- inode = igrab(vn_to_inode(vp));
+ inode = igrab(vp);
ASSERT(inode);
return vp;
typedef struct inode bhv_vnode_t;
-/*
- * Vnode to Linux inode mapping.
- */
-static inline struct inode *vn_to_inode(bhv_vnode_t *vnode)
-{
- return vnode;
-}
-
/*
* Return values for xfs_inactive. A return value of
* VN_INACTIVE_NOCACHE implies that the file system behavior
static inline int vn_count(bhv_vnode_t *vp)
{
- return atomic_read(&vn_to_inode(vp)->i_count);
+ return atomic_read(&vp->i_count);
}
/*
xfs_itrace_hold(XFS_I(vp), __FILE__, __LINE__, (inst_t *)__return_address))
#define VN_RELE(vp) \
(xfs_itrace_rele(XFS_I(vp), __FILE__, __LINE__, (inst_t *)__return_address), \
- iput(vn_to_inode(vp)))
+ iput(vp))
#else
#define VN_HOLD(vp) ((void)vn_hold(vp))
-#define VN_RELE(vp) (iput(vn_to_inode(vp)))
+#define VN_RELE(vp) (iput(vp))
#endif
static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp)
{
- return igrab(vn_to_inode(vp));
+ return igrab(vp);
}
/*
*/
static inline int VN_BAD(bhv_vnode_t *vp)
{
- return is_bad_inode(vn_to_inode(vp));
+ return is_bad_inode(vp);
}
/*
/*
* Some useful predicates.
*/
-#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping)
-#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
-#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
+#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping)
+#define VN_CACHED(vp) (vp->i_mapping->nrpages)
+#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \
PAGECACHE_TAG_DIRTY)
return XFS_ERROR(EIO);
/* capture size updates in I/O completion before writing the inode. */
- error = filemap_fdatawait(vn_to_inode(VFS_I(ip))->i_mapping);
+ error = filemap_fdatawait(VFS_I(ip)->i_mapping);
if (error)
return XFS_ERROR(error);
XFS_MOUNT_ILOCK(mp);
spin_lock(&ip->i_flags_lock);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
- vn_to_inode(vp)->i_private = NULL;
+ vp->i_private = NULL;
ip->i_vnode = NULL;
spin_unlock(&ip->i_flags_lock);
list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);