1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
28 #include <sys/vnode.h>
29 #include <sys/kmem_cache.h>
30 #include <linux/falloc.h>
31 #include <linux/file_compat.h>
33 vnode_t
*rootdir
= (vnode_t
*)0xabcd1234;
34 EXPORT_SYMBOL(rootdir
);
36 static spl_kmem_cache_t
*vn_cache
;
37 static spl_kmem_cache_t
*vn_file_cache
;
39 static DEFINE_SPINLOCK(vn_file_lock
);
40 static LIST_HEAD(vn_file_list
);
43 vn_mode_to_vtype(mode_t mode
)
67 } /* vn_mode_to_vtype() */
68 EXPORT_SYMBOL(vn_mode_to_vtype
);
71 vn_vtype_to_mode(vtype_t vtype
)
95 } /* vn_vtype_to_mode() */
96 EXPORT_SYMBOL(vn_vtype_to_mode
);
103 vp
= kmem_cache_alloc(vn_cache
, flag
);
111 EXPORT_SYMBOL(vn_alloc
);
116 kmem_cache_free(vn_cache
, vp
);
118 EXPORT_SYMBOL(vn_free
);
121 vn_open(const char *path
, uio_seg_t seg
, int flags
, int mode
,
122 vnode_t
**vpp
, int x1
, void *x2
)
126 int rc
, saved_umask
= 0;
130 ASSERT(flags
& (FWRITE
| FREAD
));
131 ASSERT(seg
== UIO_SYSSPACE
);
135 if (!(flags
& FCREAT
) && (flags
& FWRITE
))
138 /* Note for filp_open() the two low bits must be remapped to mean:
139 * 01 - read-only -> 00 read-only
140 * 10 - write-only -> 01 write-only
141 * 11 - read-write -> 10 read-write
146 saved_umask
= xchg(¤t
->fs
->umask
, 0);
148 fp
= filp_open(path
, flags
, mode
);
151 (void)xchg(¤t
->fs
->umask
, saved_umask
);
154 return (-PTR_ERR(fp
));
156 #if defined(HAVE_4ARGS_VFS_GETATTR)
157 rc
= vfs_getattr(&fp
->f_path
, &stat
, STATX_TYPE
, AT_STATX_SYNC_AS_STAT
);
158 #elif defined(HAVE_2ARGS_VFS_GETATTR)
159 rc
= vfs_getattr(&fp
->f_path
, &stat
);
161 rc
= vfs_getattr(fp
->f_path
.mnt
, fp
->f_dentry
, &stat
);
168 vp
= vn_alloc(KM_SLEEP
);
174 saved_gfp
= mapping_gfp_mask(fp
->f_mapping
);
175 mapping_set_gfp_mask(fp
->f_mapping
, saved_gfp
& ~(__GFP_IO
|__GFP_FS
));
177 mutex_enter(&vp
->v_lock
);
178 vp
->v_type
= vn_mode_to_vtype(stat
.mode
);
180 vp
->v_gfp_mask
= saved_gfp
;
182 mutex_exit(&vp
->v_lock
);
186 EXPORT_SYMBOL(vn_open
);
189 vn_openat(const char *path
, uio_seg_t seg
, int flags
, int mode
,
190 vnode_t
**vpp
, int x1
, void *x2
, vnode_t
*vp
, int fd
)
195 ASSERT(vp
== rootdir
);
197 len
= strlen(path
) + 2;
198 realpath
= kmalloc(len
, kmem_flags_convert(KM_SLEEP
));
202 (void)snprintf(realpath
, len
, "/%s", path
);
203 rc
= vn_open(realpath
, seg
, flags
, mode
, vpp
, x1
, x2
);
208 EXPORT_SYMBOL(vn_openat
);
211 vn_rdwr(uio_rw_t uio
, vnode_t
*vp
, void *addr
, ssize_t len
, offset_t off
,
212 uio_seg_t seg
, int ioflag
, rlim64_t x2
, void *x3
, ssize_t
*residp
)
215 mm_segment_t saved_fs
;
219 ASSERT(uio
== UIO_WRITE
|| uio
== UIO_READ
);
222 ASSERT(seg
== UIO_SYSSPACE
);
223 ASSERT((ioflag
& ~FAPPEND
) == 0);
228 if (ioflag
& FAPPEND
)
231 /* Writable user data segment must be briefly increased for this
232 * process so we can use the user space read call paths to write
233 * in to memory allocated by the kernel. */
238 rc
= vfs_write(fp
, addr
, len
, &offset
);
240 rc
= vfs_read(fp
, addr
, len
, &offset
);
257 EXPORT_SYMBOL(vn_rdwr
);
260 vn_close(vnode_t
*vp
, int flags
, int x1
, int x2
, void *x3
, void *x4
)
267 mapping_set_gfp_mask(vp
->v_file
->f_mapping
, vp
->v_gfp_mask
);
268 rc
= filp_close(vp
->v_file
, 0);
273 EXPORT_SYMBOL(vn_close
);
275 /* vn_seek() does not actually seek it only performs bounds checking on the
276 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
277 * anything more serious. */
279 vn_seek(vnode_t
*vp
, offset_t ooff
, offset_t
*noffp
, void *ct
)
281 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
283 EXPORT_SYMBOL(vn_seek
);
286 * spl_basename() takes a NULL-terminated string s as input containing a path.
287 * It returns a char pointer to a string and a length that describe the
288 * basename of the path. If the basename is not "." or "/", it will be an index
289 * into the string. While the string should be NULL terminated, the section
290 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
291 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
294 spl_basename(const char *s
, const char **str
, int *len
)
309 while (i
&& s
[i
--] == '/');
319 for (end
= i
; i
; i
--) {
331 static struct dentry
*
332 spl_kern_path_locked(const char *name
, struct path
*path
)
335 struct dentry
*dentry
;
336 const char *basename
;
343 spl_basename(name
, &basename
, &len
);
345 /* We do not accept "." or ".." */
346 if (len
<= 2 && basename
[0] == '.')
347 if (len
== 1 || basename
[1] == '.')
348 return (ERR_PTR(-EACCES
));
350 rc
= kern_path(name
, LOOKUP_PARENT
, &parent
);
352 return (ERR_PTR(rc
));
354 /* use I_MUTEX_PARENT because vfs_unlink needs it */
355 spl_inode_lock_nested(parent
.dentry
->d_inode
, I_MUTEX_PARENT
);
357 dentry
= lookup_one_len(basename
, parent
.dentry
, len
);
358 if (IS_ERR(dentry
)) {
359 spl_inode_unlock(parent
.dentry
->d_inode
);
368 /* Based on do_unlinkat() from linux/fs/namei.c */
370 vn_remove(const char *path
, uio_seg_t seg
, int flags
)
372 struct dentry
*dentry
;
374 struct inode
*inode
= NULL
;
377 ASSERT(seg
== UIO_SYSSPACE
);
378 ASSERT(flags
== RMFILE
);
380 dentry
= spl_kern_path_locked(path
, &parent
);
381 rc
= PTR_ERR(dentry
);
382 if (!IS_ERR(dentry
)) {
383 if (parent
.dentry
->d_name
.name
[parent
.dentry
->d_name
.len
]) {
388 inode
= dentry
->d_inode
;
390 atomic_inc(&inode
->i_count
);
396 #ifdef HAVE_2ARGS_VFS_UNLINK
397 rc
= vfs_unlink(parent
.dentry
->d_inode
, dentry
);
399 rc
= vfs_unlink(parent
.dentry
->d_inode
, dentry
, NULL
);
400 #endif /* HAVE_2ARGS_VFS_UNLINK */
407 spl_inode_unlock(parent
.dentry
->d_inode
);
409 iput(inode
); /* truncate the inode here */
415 rc
= !dentry
->d_inode
? -ENOENT
:
416 S_ISDIR(dentry
->d_inode
->i_mode
) ? -EISDIR
: -ENOTDIR
;
419 EXPORT_SYMBOL(vn_remove
);
421 /* Based on do_rename() from linux/fs/namei.c */
423 vn_rename(const char *oldname
, const char *newname
, int x1
)
425 struct dentry
*old_dir
, *new_dir
;
426 struct dentry
*old_dentry
, *new_dentry
;
428 struct path old_parent
, new_parent
;
431 old_dentry
= spl_kern_path_locked(oldname
, &old_parent
);
432 if (IS_ERR(old_dentry
)) {
433 rc
= PTR_ERR(old_dentry
);
437 spl_inode_unlock(old_parent
.dentry
->d_inode
);
439 new_dentry
= spl_kern_path_locked(newname
, &new_parent
);
440 if (IS_ERR(new_dentry
)) {
441 rc
= PTR_ERR(new_dentry
);
445 spl_inode_unlock(new_parent
.dentry
->d_inode
);
448 if (old_parent
.mnt
!= new_parent
.mnt
)
451 old_dir
= old_parent
.dentry
;
452 new_dir
= new_parent
.dentry
;
453 trap
= lock_rename(new_dir
, old_dir
);
455 /* source should not be ancestor of target */
457 if (old_dentry
== trap
)
460 /* target should not be an ancestor of source */
462 if (new_dentry
== trap
)
465 /* source must exist */
467 if (!old_dentry
->d_inode
)
470 /* unless the source is a directory trailing slashes give -ENOTDIR */
471 if (!S_ISDIR(old_dentry
->d_inode
->i_mode
)) {
473 if (old_dentry
->d_name
.name
[old_dentry
->d_name
.len
])
475 if (new_dentry
->d_name
.name
[new_dentry
->d_name
.len
])
479 #if defined(HAVE_4ARGS_VFS_RENAME)
480 rc
= vfs_rename(old_dir
->d_inode
, old_dentry
,
481 new_dir
->d_inode
, new_dentry
);
482 #elif defined(HAVE_5ARGS_VFS_RENAME)
483 rc
= vfs_rename(old_dir
->d_inode
, old_dentry
,
484 new_dir
->d_inode
, new_dentry
, NULL
);
486 rc
= vfs_rename(old_dir
->d_inode
, old_dentry
,
487 new_dir
->d_inode
, new_dentry
, NULL
, 0);
490 unlock_rename(new_dir
, old_dir
);
493 path_put(&new_parent
);
496 path_put(&old_parent
);
500 EXPORT_SYMBOL(vn_rename
);
503 vn_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, void *x3
, void *x4
)
515 #if defined(HAVE_4ARGS_VFS_GETATTR)
516 rc
= vfs_getattr(&fp
->f_path
, &stat
, STATX_BASIC_STATS
,
517 AT_STATX_SYNC_AS_STAT
);
518 #elif defined(HAVE_2ARGS_VFS_GETATTR)
519 rc
= vfs_getattr(&fp
->f_path
, &stat
);
521 rc
= vfs_getattr(fp
->f_path
.mnt
, fp
->f_dentry
, &stat
);
526 vap
->va_type
= vn_mode_to_vtype(stat
.mode
);
527 vap
->va_mode
= stat
.mode
;
528 vap
->va_uid
= KUID_TO_SUID(stat
.uid
);
529 vap
->va_gid
= KGID_TO_SGID(stat
.gid
);
531 vap
->va_nodeid
= stat
.ino
;
532 vap
->va_nlink
= stat
.nlink
;
533 vap
->va_size
= stat
.size
;
534 vap
->va_blksize
= stat
.blksize
;
535 vap
->va_atime
= stat
.atime
;
536 vap
->va_mtime
= stat
.mtime
;
537 vap
->va_ctime
= stat
.ctime
;
538 vap
->va_rdev
= stat
.rdev
;
539 vap
->va_nblocks
= stat
.blocks
;
543 EXPORT_SYMBOL(vn_getattr
);
545 int vn_fsync(vnode_t
*vp
, int flags
, void *x3
, void *x4
)
558 * May enter XFS which generates a warning when PF_FSTRANS is set.
559 * To avoid this the flag is cleared over vfs_sync() and then reset.
561 fstrans
= spl_fstrans_check();
563 current
->flags
&= ~(PF_FSTRANS
);
565 error
= -spl_filp_fsync(vp
->v_file
, datasync
);
567 current
->flags
|= PF_FSTRANS
;
571 EXPORT_SYMBOL(vn_fsync
);
573 int vn_space(vnode_t
*vp
, int cmd
, struct flock
*bfp
, int flag
,
574 offset_t offset
, void *x6
, void *x7
)
576 int error
= EOPNOTSUPP
;
577 #ifdef FALLOC_FL_PUNCH_HOLE
581 if (cmd
!= F_FREESP
|| bfp
->l_whence
!= 0)
586 ASSERT(bfp
->l_start
>= 0 && bfp
->l_len
> 0);
588 #ifdef FALLOC_FL_PUNCH_HOLE
590 * May enter XFS which generates a warning when PF_FSTRANS is set.
591 * To avoid this the flag is cleared over vfs_sync() and then reset.
593 fstrans
= spl_fstrans_check();
595 current
->flags
&= ~(PF_FSTRANS
);
598 * When supported by the underlying file system preferentially
599 * use the fallocate() callback to preallocate the space.
601 error
= -spl_filp_fallocate(vp
->v_file
,
602 FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
,
603 bfp
->l_start
, bfp
->l_len
);
606 current
->flags
|= PF_FSTRANS
;
612 #ifdef HAVE_INODE_TRUNCATE_RANGE
613 if (vp
->v_file
->f_dentry
&& vp
->v_file
->f_dentry
->d_inode
&&
614 vp
->v_file
->f_dentry
->d_inode
->i_op
&&
615 vp
->v_file
->f_dentry
->d_inode
->i_op
->truncate_range
) {
616 off_t end
= bfp
->l_start
+ bfp
->l_len
;
618 * Judging from the code in shmem_truncate_range(),
619 * it seems the kernel expects the end offset to be
620 * inclusive and aligned to the end of a page.
622 if (end
% PAGE_SIZE
!= 0) {
623 end
&= ~(off_t
)(PAGE_SIZE
- 1);
624 if (end
<= bfp
->l_start
)
629 vp
->v_file
->f_dentry
->d_inode
->i_op
->truncate_range(
630 vp
->v_file
->f_dentry
->d_inode
,
639 EXPORT_SYMBOL(vn_space
);
641 /* Function must be called while holding the vn_file_lock */
643 file_find(int fd
, struct task_struct
*task
)
647 ASSERT(spin_is_locked(&vn_file_lock
));
649 list_for_each_entry(fp
, &vn_file_list
, f_list
) {
650 if (fd
== fp
->f_fd
&& fp
->f_task
== task
) {
651 ASSERT(atomic_read(&fp
->f_ref
) != 0);
671 /* Already open just take an extra reference */
672 spin_lock(&vn_file_lock
);
674 fp
= file_find(fd
, current
);
679 * areleasef() can cause us to see a stale reference when
680 * userspace has reused a file descriptor before areleasef()
681 * has run. fput() the stale reference and replace it. We
682 * retain the original reference count such that the concurrent
683 * areleasef() will decrement its reference and terminate.
685 if (lfp
!= fp
->f_file
) {
687 fp
->f_vnode
->v_file
= lfp
;
689 atomic_inc(&fp
->f_ref
);
690 spin_unlock(&vn_file_lock
);
694 spin_unlock(&vn_file_lock
);
696 /* File was not yet opened create the object and setup */
697 fp
= kmem_cache_alloc(vn_file_cache
, KM_SLEEP
);
701 mutex_enter(&fp
->f_lock
);
704 fp
->f_task
= current
;
706 atomic_inc(&fp
->f_ref
);
712 vp
= vn_alloc(KM_SLEEP
);
716 #if defined(HAVE_4ARGS_VFS_GETATTR)
717 rc
= vfs_getattr(&lfp
->f_path
, &stat
, STATX_TYPE
, AT_STATX_SYNC_AS_STAT
);
718 #elif defined(HAVE_2ARGS_VFS_GETATTR)
719 rc
= vfs_getattr(&lfp
->f_path
, &stat
);
721 rc
= vfs_getattr(lfp
->f_path
.mnt
, lfp
->f_dentry
, &stat
);
726 mutex_enter(&vp
->v_lock
);
727 vp
->v_type
= vn_mode_to_vtype(stat
.mode
);
729 mutex_exit(&vp
->v_lock
);
734 /* Put it on the tracking list */
735 spin_lock(&vn_file_lock
);
736 list_add(&fp
->f_list
, &vn_file_list
);
737 spin_unlock(&vn_file_lock
);
739 mutex_exit(&fp
->f_lock
);
747 mutex_exit(&fp
->f_lock
);
748 kmem_cache_free(vn_file_cache
, fp
);
754 static void releasef_locked(file_t
*fp
)
759 /* Unlinked from list, no refs, safe to free outside mutex */
761 vn_free(fp
->f_vnode
);
763 kmem_cache_free(vn_file_cache
, fp
);
769 areleasef(fd
, P_FINFO(current
));
771 EXPORT_SYMBOL(releasef
);
774 vn_areleasef(int fd
, uf_info_t
*fip
)
777 struct task_struct
*task
= (struct task_struct
*)fip
;
782 spin_lock(&vn_file_lock
);
783 fp
= file_find(fd
, task
);
785 atomic_dec(&fp
->f_ref
);
786 if (atomic_read(&fp
->f_ref
) > 0) {
787 spin_unlock(&vn_file_lock
);
791 list_del(&fp
->f_list
);
794 spin_unlock(&vn_file_lock
);
798 EXPORT_SYMBOL(areleasef
);
802 #ifdef HAVE_SET_FS_PWD_WITH_CONST
803 vn_set_fs_pwd(struct fs_struct
*fs
, const struct path
*path
)
805 vn_set_fs_pwd(struct fs_struct
*fs
, struct path
*path
)
806 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
810 #ifdef HAVE_FS_STRUCT_SPINLOCK
811 spin_lock(&fs
->lock
);
815 spin_unlock(&fs
->lock
);
817 write_lock(&fs
->lock
);
821 write_unlock(&fs
->lock
);
822 #endif /* HAVE_FS_STRUCT_SPINLOCK */
829 vn_set_pwd(const char *filename
)
832 mm_segment_t saved_fs
;
836 * user_path_dir() and __user_walk() both expect 'filename' to be
837 * a user space address so we must briefly increase the data segment
838 * size to ensure strncpy_from_user() does not fail with -EFAULT.
843 rc
= user_path_dir(filename
, &path
);
847 rc
= inode_permission(path
.dentry
->d_inode
, MAY_EXEC
| MAY_ACCESS
);
851 vn_set_fs_pwd(current
->fs
, &path
);
860 EXPORT_SYMBOL(vn_set_pwd
);
863 vn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
865 struct vnode
*vp
= buf
;
867 mutex_init(&vp
->v_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
870 } /* vn_cache_constructor() */
873 vn_cache_destructor(void *buf
, void *cdrarg
)
875 struct vnode
*vp
= buf
;
877 mutex_destroy(&vp
->v_lock
);
878 } /* vn_cache_destructor() */
881 vn_file_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
885 atomic_set(&fp
->f_ref
, 0);
886 mutex_init(&fp
->f_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
887 INIT_LIST_HEAD(&fp
->f_list
);
890 } /* file_cache_constructor() */
893 vn_file_cache_destructor(void *buf
, void *cdrarg
)
897 mutex_destroy(&fp
->f_lock
);
898 } /* vn_file_cache_destructor() */
903 vn_cache
= kmem_cache_create("spl_vn_cache",
904 sizeof(struct vnode
), 64,
905 vn_cache_constructor
,
907 NULL
, NULL
, NULL
, 0);
909 vn_file_cache
= kmem_cache_create("spl_vn_file_cache",
911 vn_file_cache_constructor
,
912 vn_file_cache_destructor
,
913 NULL
, NULL
, NULL
, 0);
920 file_t
*fp
, *next_fp
;
923 spin_lock(&vn_file_lock
);
925 list_for_each_entry_safe(fp
, next_fp
, &vn_file_list
, f_list
) {
926 list_del(&fp
->f_list
);
931 spin_unlock(&vn_file_lock
);
934 printk(KERN_WARNING
"WARNING: %d vnode files leaked\n", leaked
);
936 kmem_cache_destroy(vn_file_cache
);
937 kmem_cache_destroy(vn_cache
);