static spl_kmem_cache_t *vn_cache;
static spl_kmem_cache_t *vn_file_cache;
-static spinlock_t vn_file_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(vn_file_lock);
static LIST_HEAD(vn_file_list);
-static vtype_t
-vn_get_sol_type(umode_t mode)
+vtype_t
+vn_mode_to_vtype(mode_t mode)
{
if (S_ISREG(mode))
return VREG;
return VCHR;
return VNON;
-} /* vn_get_sol_type() */
+} /* vn_mode_to_vtype() */
+EXPORT_SYMBOL(vn_mode_to_vtype);
+
+mode_t
+vn_vtype_to_mode(vtype_t vtype)
+{
+ if (vtype == VREG)
+ return S_IFREG;
+
+ if (vtype == VDIR)
+ return S_IFDIR;
+
+ if (vtype == VCHR)
+ return S_IFCHR;
+
+ if (vtype == VBLK)
+ return S_IFBLK;
+
+ if (vtype == VFIFO)
+ return S_IFIFO;
+
+ if (vtype == VLNK)
+ return S_IFLNK;
+
+ if (vtype == VSOCK)
+ return S_IFSOCK;
+
+ return VNON;
+} /* vn_vtype_to_mode() */
+EXPORT_SYMBOL(vn_vtype_to_mode);
vnode_t *
vn_alloc(int flag)
mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
mutex_enter(&vp->v_lock);
- vp->v_type = vn_get_sol_type(stat.mode);
+ vp->v_type = vn_mode_to_vtype(stat.mode);
vp->v_file = fp;
vp->v_gfp_mask = saved_gfp;
*vpp = vp;
* proposed seek. We perform minimal checking and allow vn_rdwr() to catch
* anything more serious. */
int
-vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
+vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
{
return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
}
ASSERT(seg == UIO_SYSSPACE);
ASSERT(flags == RMFILE);
- rc = path_lookup(path, LOOKUP_PARENT, &nd);
+ rc = spl_kern_path_parent(path, &nd);
if (rc)
SGOTO(exit, rc);
if (nd.last_type != LAST_NORM)
SGOTO(exit1, rc);
-#ifdef HAVE_INODE_I_MUTEX
- mutex_lock_nested(&nd.nd_dentry->d_inode->i_mutex, I_MUTEX_PARENT);
-#else
- down(&nd.nd_dentry->d_inode->i_sem);
-#endif /* HAVE_INODE_I_MUTEX */
+ spl_inode_lock_nested(nd.nd_dentry->d_inode, I_MUTEX_PARENT);
dentry = vn_lookup_hash(&nd);
rc = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
exit2:
dput(dentry);
}
-#ifdef HAVE_INODE_I_MUTEX
- mutex_unlock(&nd.nd_dentry->d_inode->i_mutex);
-#else
- up(&nd.nd_dentry->d_inode->i_sem);
-#endif /* HAVE_INODE_I_MUTEX */
+
+ spl_inode_unlock(nd.nd_dentry->d_inode);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
int rc = 0;
SENTRY;
- rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
+ rc = spl_kern_path_parent(oldname, &oldnd);
if (rc)
SGOTO(exit, rc);
- rc = path_lookup(newname, LOOKUP_PARENT, &newnd);
+ rc = spl_kern_path_parent(newname, &newnd);
if (rc)
SGOTO(exit1, rc);
vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
{
struct file *fp;
- struct kstat stat;
+ struct kstat stat;
int rc;
SENTRY;
if (rc)
SRETURN(-rc);
- vap->va_type = vn_get_sol_type(stat.mode);
+ vap->va_type = vn_mode_to_vtype(stat.mode);
vap->va_mode = stat.mode;
vap->va_uid = stat.uid;
vap->va_gid = stat.gid;
vap->va_nodeid = stat.ino;
vap->va_nlink = stat.nlink;
vap->va_size = stat.size;
- vap->va_blocksize = stat.blksize;
- vap->va_atime.tv_sec = stat.atime.tv_sec;
- vap->va_atime.tv_usec = stat.atime.tv_nsec / NSEC_PER_USEC;
- vap->va_mtime.tv_sec = stat.mtime.tv_sec;
- vap->va_mtime.tv_usec = stat.mtime.tv_nsec / NSEC_PER_USEC;
- vap->va_ctime.tv_sec = stat.ctime.tv_sec;
- vap->va_ctime.tv_usec = stat.ctime.tv_nsec / NSEC_PER_USEC;
+ vap->va_blksize = stat.blksize;
+ vap->va_atime = stat.atime;
+ vap->va_mtime = stat.mtime;
+ vap->va_ctime = stat.ctime;
vap->va_rdev = stat.rdev;
- vap->va_blocks = stat.blocks;
+ vap->va_nblocks = stat.blocks;
- SRETURN(0);
+ SRETURN(0);
}
EXPORT_SYMBOL(vn_getattr);
ASSERT(spin_is_locked(&vn_file_lock));
list_for_each_entry(fp, &vn_file_list, f_list) {
- if (fd == fp->f_fd) {
+ if (fd == fp->f_fd && fp->f_task == current) {
ASSERT(atomic_read(&fp->f_ref) != 0);
return fp;
}
mutex_enter(&fp->f_lock);
fp->f_fd = fd;
+ fp->f_task = current;
fp->f_offset = 0;
atomic_inc(&fp->f_ref);
SGOTO(out_vnode, rc);
mutex_enter(&vp->v_lock);
- vp->v_type = vn_get_sol_type(stat.mode);
+ vp->v_type = vn_mode_to_vtype(stat.mode);
vp->v_file = lfp;
mutex_exit(&vp->v_lock);
sizeof(struct vnode), 64,
vn_cache_constructor,
vn_cache_destructor,
- NULL, NULL, NULL, 0);
+ NULL, NULL, NULL, KMC_KMEM);
vn_file_cache = kmem_cache_create("spl_vn_file_cache",
sizeof(file_t), 64,
vn_file_cache_constructor,
vn_file_cache_destructor,
- NULL, NULL, NULL, 0);
+ NULL, NULL, NULL, KMC_KMEM);
SRETURN(0);
} /* vn_init() */