1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
27 #include <sys/vnode.h>
28 #include <spl-debug.h>
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
34 #define SS_DEBUG_SUBSYS SS_VNODE
36 vnode_t
*rootdir
= (vnode_t
*)0xabcd1234;
37 EXPORT_SYMBOL(rootdir
);
39 static spl_kmem_cache_t
*vn_cache
;
40 static spl_kmem_cache_t
*vn_file_cache
;
42 static spinlock_t vn_file_lock
= SPIN_LOCK_UNLOCKED
;
43 static LIST_HEAD(vn_file_list
);
46 vn_get_sol_type(umode_t mode
)
73 } /* vn_get_sol_type() */
81 vp
= kmem_cache_alloc(vn_cache
, flag
);
89 EXPORT_SYMBOL(vn_alloc
);
95 kmem_cache_free(vn_cache
, vp
);
98 EXPORT_SYMBOL(vn_free
);
101 vn_open(const char *path
, uio_seg_t seg
, int flags
, int mode
,
102 vnode_t
**vpp
, int x1
, void *x2
)
106 int rc
, saved_umask
= 0;
111 ASSERT(flags
& (FWRITE
| FREAD
));
112 ASSERT(seg
== UIO_SYSSPACE
);
116 if (!(flags
& FCREAT
) && (flags
& FWRITE
))
119 /* Note for filp_open() the two low bits must be remapped to mean:
120 * 01 - read-only -> 00 read-only
121 * 10 - write-only -> 01 write-only
122 * 11 - read-write -> 10 read-write
127 saved_umask
= xchg(¤t
->fs
->umask
, 0);
129 fp
= filp_open(path
, flags
, mode
);
132 (void)xchg(¤t
->fs
->umask
, saved_umask
);
135 SRETURN(-PTR_ERR(fp
));
137 rc
= vfs_getattr(fp
->f_vfsmnt
, fp
->f_dentry
, &stat
);
143 vp
= vn_alloc(KM_SLEEP
);
149 saved_gfp
= mapping_gfp_mask(fp
->f_mapping
);
150 mapping_set_gfp_mask(fp
->f_mapping
, saved_gfp
& ~(__GFP_IO
|__GFP_FS
));
152 mutex_enter(&vp
->v_lock
);
153 vp
->v_type
= vn_get_sol_type(stat
.mode
);
155 vp
->v_gfp_mask
= saved_gfp
;
157 mutex_exit(&vp
->v_lock
);
161 EXPORT_SYMBOL(vn_open
);
164 vn_openat(const char *path
, uio_seg_t seg
, int flags
, int mode
,
165 vnode_t
**vpp
, int x1
, void *x2
, vnode_t
*vp
, int fd
)
171 ASSERT(vp
== rootdir
);
173 len
= strlen(path
) + 2;
174 realpath
= kmalloc(len
, GFP_KERNEL
);
178 (void)snprintf(realpath
, len
, "/%s", path
);
179 rc
= vn_open(realpath
, seg
, flags
, mode
, vpp
, x1
, x2
);
184 EXPORT_SYMBOL(vn_openat
);
187 vn_rdwr(uio_rw_t uio
, vnode_t
*vp
, void *addr
, ssize_t len
, offset_t off
,
188 uio_seg_t seg
, int ioflag
, rlim64_t x2
, void *x3
, ssize_t
*residp
)
191 mm_segment_t saved_fs
;
196 ASSERT(uio
== UIO_WRITE
|| uio
== UIO_READ
);
199 ASSERT(seg
== UIO_SYSSPACE
);
200 ASSERT((ioflag
& ~FAPPEND
) == 0);
201 ASSERT(x2
== RLIM64_INFINITY
);
206 if (ioflag
& FAPPEND
)
209 /* Writable user data segment must be briefly increased for this
210 * process so we can use the user space read call paths to write
211 * in to memory allocated by the kernel. */
216 rc
= vfs_write(fp
, addr
, len
, &offset
);
218 rc
= vfs_read(fp
, addr
, len
, &offset
);
234 EXPORT_SYMBOL(vn_rdwr
);
237 vn_close(vnode_t
*vp
, int flags
, int x1
, int x2
, void *x3
, void *x4
)
245 mapping_set_gfp_mask(vp
->v_file
->f_mapping
, vp
->v_gfp_mask
);
246 rc
= filp_close(vp
->v_file
, 0);
251 EXPORT_SYMBOL(vn_close
);
253 /* vn_seek() does not actually seek it only performs bounds checking on the
254 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
255 * anything more serious. */
257 vn_seek(vnode_t
*vp
, offset_t ooff
, offset_t
*noffp
, caller_context_t
*ct
)
259 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
261 EXPORT_SYMBOL(vn_seek
);
263 static struct dentry
*
264 vn_lookup_hash(struct nameidata
*nd
)
266 return lookup_one_len((const char *)nd
->last
.name
,
267 nd
->nd_dentry
, nd
->last
.len
);
268 } /* lookup_hash() */
271 vn_path_release(struct nameidata
*nd
)
277 /* Modified do_unlinkat() from linux/fs/namei.c, only uses exported symbols */
279 vn_remove(const char *path
, uio_seg_t seg
, int flags
)
281 struct dentry
*dentry
;
283 struct inode
*inode
= NULL
;
287 ASSERT(seg
== UIO_SYSSPACE
);
288 ASSERT(flags
== RMFILE
);
290 rc
= path_lookup(path
, LOOKUP_PARENT
, &nd
);
295 if (nd
.last_type
!= LAST_NORM
)
298 spl_inode_lock_nested(nd
.nd_dentry
->d_inode
, I_MUTEX_PARENT
);
299 dentry
= vn_lookup_hash(&nd
);
300 rc
= PTR_ERR(dentry
);
301 if (!IS_ERR(dentry
)) {
302 /* Why not before? Because we want correct rc value */
303 if (nd
.last
.name
[nd
.last
.len
])
306 inode
= dentry
->d_inode
;
308 atomic_inc(&inode
->i_count
);
309 #ifdef HAVE_2ARGS_VFS_UNLINK
310 rc
= vfs_unlink(nd
.nd_dentry
->d_inode
, dentry
);
312 rc
= vfs_unlink(nd
.nd_dentry
->d_inode
, dentry
, nd
.nd_mnt
);
313 #endif /* HAVE_2ARGS_VFS_UNLINK */
318 spl_inode_unlock(nd
.nd_dentry
->d_inode
);
320 iput(inode
); /* truncate the inode here */
322 vn_path_release(&nd
);
327 rc
= !dentry
->d_inode
? -ENOENT
:
328 S_ISDIR(dentry
->d_inode
->i_mode
) ? -EISDIR
: -ENOTDIR
;
331 EXPORT_SYMBOL(vn_remove
);
333 /* Modified do_rename() from linux/fs/namei.c, only uses exported symbols */
335 vn_rename(const char *oldname
, const char *newname
, int x1
)
337 struct dentry
*old_dir
, *new_dir
;
338 struct dentry
*old_dentry
, *new_dentry
;
340 struct nameidata oldnd
, newnd
;
344 rc
= path_lookup(oldname
, LOOKUP_PARENT
, &oldnd
);
348 rc
= path_lookup(newname
, LOOKUP_PARENT
, &newnd
);
353 if (oldnd
.nd_mnt
!= newnd
.nd_mnt
)
356 old_dir
= oldnd
.nd_dentry
;
358 if (oldnd
.last_type
!= LAST_NORM
)
361 new_dir
= newnd
.nd_dentry
;
362 if (newnd
.last_type
!= LAST_NORM
)
365 trap
= lock_rename(new_dir
, old_dir
);
367 old_dentry
= vn_lookup_hash(&oldnd
);
369 rc
= PTR_ERR(old_dentry
);
370 if (IS_ERR(old_dentry
))
373 /* source must exist */
375 if (!old_dentry
->d_inode
)
378 /* unless the source is a directory trailing slashes give -ENOTDIR */
379 if (!S_ISDIR(old_dentry
->d_inode
->i_mode
)) {
381 if (oldnd
.last
.name
[oldnd
.last
.len
])
383 if (newnd
.last
.name
[newnd
.last
.len
])
387 /* source should not be ancestor of target */
389 if (old_dentry
== trap
)
392 new_dentry
= vn_lookup_hash(&newnd
);
393 rc
= PTR_ERR(new_dentry
);
394 if (IS_ERR(new_dentry
))
397 /* target should not be an ancestor of source */
399 if (new_dentry
== trap
)
402 #ifdef HAVE_4ARGS_VFS_RENAME
403 rc
= vfs_rename(old_dir
->d_inode
, old_dentry
,
404 new_dir
->d_inode
, new_dentry
);
406 rc
= vfs_rename(old_dir
->d_inode
, old_dentry
, oldnd
.nd_mnt
,
407 new_dir
->d_inode
, new_dentry
, newnd
.nd_mnt
);
408 #endif /* HAVE_4ARGS_VFS_RENAME */
414 unlock_rename(new_dir
, old_dir
);
416 vn_path_release(&newnd
);
418 vn_path_release(&oldnd
);
422 EXPORT_SYMBOL(vn_rename
);
425 vn_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, void *x3
, void *x4
)
438 rc
= vfs_getattr(fp
->f_vfsmnt
, fp
->f_dentry
, &stat
);
442 vap
->va_type
= vn_get_sol_type(stat
.mode
);
443 vap
->va_mode
= stat
.mode
;
444 vap
->va_uid
= stat
.uid
;
445 vap
->va_gid
= stat
.gid
;
447 vap
->va_nodeid
= stat
.ino
;
448 vap
->va_nlink
= stat
.nlink
;
449 vap
->va_size
= stat
.size
;
450 vap
->va_blocksize
= stat
.blksize
;
451 vap
->va_atime
.tv_sec
= stat
.atime
.tv_sec
;
452 vap
->va_atime
.tv_usec
= stat
.atime
.tv_nsec
/ NSEC_PER_USEC
;
453 vap
->va_mtime
.tv_sec
= stat
.mtime
.tv_sec
;
454 vap
->va_mtime
.tv_usec
= stat
.mtime
.tv_nsec
/ NSEC_PER_USEC
;
455 vap
->va_ctime
.tv_sec
= stat
.ctime
.tv_sec
;
456 vap
->va_ctime
.tv_usec
= stat
.ctime
.tv_nsec
/ NSEC_PER_USEC
;
457 vap
->va_rdev
= stat
.rdev
;
458 vap
->va_blocks
= stat
.blocks
;
462 EXPORT_SYMBOL(vn_getattr
);
464 int vn_fsync(vnode_t
*vp
, int flags
, void *x3
, void *x4
)
475 SRETURN(-spl_filp_fsync(vp
->v_file
, datasync
));
477 EXPORT_SYMBOL(vn_fsync
);
479 /* Function must be called while holding the vn_file_lock */
485 ASSERT(spin_is_locked(&vn_file_lock
));
487 list_for_each_entry(fp
, &vn_file_list
, f_list
) {
488 if (fd
== fp
->f_fd
) {
489 ASSERT(atomic_read(&fp
->f_ref
) != 0);
507 /* Already open just take an extra reference */
508 spin_lock(&vn_file_lock
);
512 atomic_inc(&fp
->f_ref
);
513 spin_unlock(&vn_file_lock
);
517 spin_unlock(&vn_file_lock
);
519 /* File was not yet opened create the object and setup */
520 fp
= kmem_cache_alloc(vn_file_cache
, KM_SLEEP
);
524 mutex_enter(&fp
->f_lock
);
528 atomic_inc(&fp
->f_ref
);
532 SGOTO(out_mutex
, rc
);
534 vp
= vn_alloc(KM_SLEEP
);
538 if (vfs_getattr(lfp
->f_vfsmnt
, lfp
->f_dentry
, &stat
))
539 SGOTO(out_vnode
, rc
);
541 mutex_enter(&vp
->v_lock
);
542 vp
->v_type
= vn_get_sol_type(stat
.mode
);
544 mutex_exit(&vp
->v_lock
);
549 /* Put it on the tracking list */
550 spin_lock(&vn_file_lock
);
551 list_add(&fp
->f_list
, &vn_file_list
);
552 spin_unlock(&vn_file_lock
);
554 mutex_exit(&fp
->f_lock
);
562 mutex_exit(&fp
->f_lock
);
563 kmem_cache_free(vn_file_cache
, fp
);
569 static void releasef_locked(file_t
*fp
)
574 /* Unlinked from list, no refs, safe to free outside mutex */
576 vn_free(fp
->f_vnode
);
578 kmem_cache_free(vn_file_cache
, fp
);
587 spin_lock(&vn_file_lock
);
590 atomic_dec(&fp
->f_ref
);
591 if (atomic_read(&fp
->f_ref
) > 0) {
592 spin_unlock(&vn_file_lock
);
597 list_del(&fp
->f_list
);
600 spin_unlock(&vn_file_lock
);
605 EXPORT_SYMBOL(releasef
);
607 #ifndef HAVE_SET_FS_PWD
608 # ifdef HAVE_2ARGS_SET_FS_PWD
609 /* Used from 2.6.25 - 2.6.31+ */
611 set_fs_pwd(struct fs_struct
*fs
, struct path
*path
)
615 # ifdef HAVE_FS_STRUCT_SPINLOCK
616 spin_lock(&fs
->lock
);
620 spin_unlock(&fs
->lock
);
622 write_lock(&fs
->lock
);
626 write_unlock(&fs
->lock
);
627 # endif /* HAVE_FS_STRUCT_SPINLOCK */
633 /* Used from 2.6.11 - 2.6.24 */
635 set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
, struct dentry
*dentry
)
637 struct dentry
*old_pwd
;
638 struct vfsmount
*old_pwdmnt
;
640 write_lock(&fs
->lock
);
642 old_pwdmnt
= fs
->pwdmnt
;
643 fs
->pwdmnt
= mntget(mnt
);
644 fs
->pwd
= dget(dentry
);
645 write_unlock(&fs
->lock
);
652 # endif /* HAVE_2ARGS_SET_FS_PWD */
653 #endif /* HAVE_SET_FS_PWD */
656 vn_set_pwd(const char *filename
)
658 #if defined(HAVE_2ARGS_SET_FS_PWD) && defined(HAVE_USER_PATH_DIR)
662 #endif /* HAVE_2ARGS_SET_FS_PWD */
663 mm_segment_t saved_fs
;
668 * user_path_dir() and __user_walk() both expect 'filename' to be
669 * a user space address so we must briefly increase the data segment
670 * size to ensure strncpy_from_user() does not fail with -EFAULT.
675 #ifdef HAVE_2ARGS_SET_FS_PWD
676 # ifdef HAVE_USER_PATH_DIR
677 rc
= user_path_dir(filename
, &path
);
681 rc
= inode_permission(path
.dentry
->d_inode
, MAY_EXEC
| MAY_ACCESS
);
683 SGOTO(dput_and_out
, rc
);
685 set_fs_pwd(current
->fs
, &path
);
690 rc
= __user_walk(filename
,
691 LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
|LOOKUP_CHDIR
, &nd
);
695 rc
= vfs_permission(&nd
, MAY_EXEC
);
697 SGOTO(dput_and_out
, rc
);
699 set_fs_pwd(current
->fs
, &nd
.path
);
703 # endif /* HAVE_USER_PATH_DIR */
705 rc
= __user_walk(filename
,
706 LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
|LOOKUP_CHDIR
, &nd
);
710 rc
= vfs_permission(&nd
, MAY_EXEC
);
712 SGOTO(dput_and_out
, rc
);
714 set_fs_pwd(current
->fs
, nd
.nd_mnt
, nd
.nd_dentry
);
717 vn_path_release(&nd
);
718 #endif /* HAVE_2ARGS_SET_FS_PWD */
724 EXPORT_SYMBOL(vn_set_pwd
);
727 vn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
729 struct vnode
*vp
= buf
;
731 mutex_init(&vp
->v_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
734 } /* vn_cache_constructor() */
737 vn_cache_destructor(void *buf
, void *cdrarg
)
739 struct vnode
*vp
= buf
;
741 mutex_destroy(&vp
->v_lock
);
742 } /* vn_cache_destructor() */
745 vn_file_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
749 atomic_set(&fp
->f_ref
, 0);
750 mutex_init(&fp
->f_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
751 INIT_LIST_HEAD(&fp
->f_list
);
754 } /* file_cache_constructor() */
757 vn_file_cache_destructor(void *buf
, void *cdrarg
)
761 mutex_destroy(&fp
->f_lock
);
762 } /* vn_file_cache_destructor() */
768 vn_cache
= kmem_cache_create("spl_vn_cache",
769 sizeof(struct vnode
), 64,
770 vn_cache_constructor
,
772 NULL
, NULL
, NULL
, 0);
774 vn_file_cache
= kmem_cache_create("spl_vn_file_cache",
776 vn_file_cache_constructor
,
777 vn_file_cache_destructor
,
778 NULL
, NULL
, NULL
, 0);
785 file_t
*fp
, *next_fp
;
789 spin_lock(&vn_file_lock
);
791 list_for_each_entry_safe(fp
, next_fp
, &vn_file_list
, f_list
) {
792 list_del(&fp
->f_list
);
797 kmem_cache_destroy(vn_file_cache
);
798 vn_file_cache
= NULL
;
799 spin_unlock(&vn_file_lock
);
802 SWARN("Warning %d files leaked\n", leaked
);
804 kmem_cache_destroy(vn_cache
);