if (S_ISSOCK(mode))
return VSOCK;
- if (S_ISCHR(mode))
- return VCHR;
-
return VNON;
} /* vn_mode_to_vtype() */
EXPORT_SYMBOL(vn_mode_to_vtype);
ASSERT(vp == rootdir);
len = strlen(path) + 2;
- realpath = kmalloc(len, GFP_KERNEL);
+ realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
if (!realpath)
return (ENOMEM);
ASSERT(vp->v_file);
ASSERT(seg == UIO_SYSSPACE);
ASSERT((ioflag & ~FAPPEND) == 0);
- ASSERT(x2 == RLIM64_INFINITY);
fp = vp->v_file;
if (rc)
return (ERR_PTR(rc));
- spl_inode_lock(parent.dentry->d_inode);
+ /* use I_MUTEX_PARENT because vfs_unlink needs it */
+ spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
dentry = lookup_one_len(basename, parent.dentry, len);
if (IS_ERR(dentry)) {
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{
int datasync = 0;
+ int error;
+ int fstrans;
ASSERT(vp);
ASSERT(vp->v_file);
if (flags & FDSYNC)
datasync = 1;
- return (-spl_filp_fsync(vp->v_file, datasync));
+ /*
+ * May enter XFS which generates a warning when PF_FSTRANS is set.
+ * To avoid this the flag is cleared over vfs_sync() and then reset.
+ */
+ fstrans = spl_fstrans_check();
+ if (fstrans)
+ current->flags &= ~(PF_FSTRANS);
+
+ error = -spl_filp_fsync(vp->v_file, datasync);
+ if (fstrans)
+ current->flags |= PF_FSTRANS;
+
+ return (error);
} /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync);
offset_t offset, void *x6, void *x7)
{
int error = EOPNOTSUPP;
+#ifdef FALLOC_FL_PUNCH_HOLE
+ int fstrans;
+#endif
if (cmd != F_FREESP || bfp->l_whence != 0)
return (EOPNOTSUPP);
ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
#ifdef FALLOC_FL_PUNCH_HOLE
+ /*
+ * May enter XFS which generates a warning when PF_FSTRANS is set.
+ * To avoid this the flag is cleared over vfs_sync() and then reset.
+ */
+ fstrans = spl_fstrans_check();
+ if (fstrans)
+ current->flags &= ~(PF_FSTRANS);
+
/*
* When supported by the underlying file system preferentially
* use the fallocate() callback to preallocate the space.
error = -spl_filp_fallocate(vp->v_file,
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
bfp->l_start, bfp->l_len);
+
+ if (fstrans)
+ current->flags |= PF_FSTRANS;
+
if (error == 0)
return (0);
#endif
/* Function must be called while holding the vn_file_lock */
static file_t *
-file_find(int fd)
+file_find(int fd, struct task_struct *task)
{
file_t *fp;
ASSERT(spin_is_locked(&vn_file_lock));
list_for_each_entry(fp, &vn_file_list, f_list) {
- if (fd == fp->f_fd && fp->f_task == current) {
+ if (fd == fp->f_fd && fp->f_task == task) {
ASSERT(atomic_read(&fp->f_ref) != 0);
return fp;
}
vnode_t *vp;
int rc = 0;
+ if (fd < 0)
+ return (NULL);
+
/* Already open just take an extra reference */
spin_lock(&vn_file_lock);
- fp = file_find(fd);
+ fp = file_find(fd, current);
if (fp) {
+ lfp = fget(fd);
+ fput(fp->f_file);
+ /*
+ * areleasef() can cause us to see a stale reference when
+ * userspace has reused a file descriptor before areleasef()
+ * has run. fput() the stale reference and replace it. We
+ * retain the original reference count such that the concurrent
+ * areleasef() will decrement its reference and terminate.
+ */
+ if (lfp != fp->f_file) {
+ fp->f_file = lfp;
+ fp->f_vnode->v_file = lfp;
+ }
atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock);
return (fp);
void
vn_releasef(int fd)
+{
+ areleasef(fd, P_FINFO(current));
+}
+EXPORT_SYMBOL(releasef);
+
+void
+vn_areleasef(int fd, uf_info_t *fip)
{
file_t *fp;
+ struct task_struct *task = (struct task_struct *)fip;
+
+ if (fd < 0)
+ return;
spin_lock(&vn_file_lock);
- fp = file_find(fd);
+ fp = file_find(fd, task);
if (fp) {
atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) {
return;
} /* releasef() */
-EXPORT_SYMBOL(releasef);
+EXPORT_SYMBOL(areleasef);
+
static void
#ifdef HAVE_SET_FS_PWD_WITH_CONST
sizeof(struct vnode), 64,
vn_cache_constructor,
vn_cache_destructor,
- NULL, NULL, NULL, KMC_KMEM);
+ NULL, NULL, NULL, 0);
vn_file_cache = kmem_cache_create("spl_vn_file_cache",
sizeof(file_t), 64,
vn_file_cache_constructor,
vn_file_cache_destructor,
- NULL, NULL, NULL, KMC_KMEM);
+ NULL, NULL, NULL, 0);
return (0);
} /* vn_init() */