]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-vnode.c
Use kernel slab for vn_cache and vn_file_cache
[mirror_spl.git] / module / spl / spl-vnode.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25\*****************************************************************************/
715f6251 26
f7fd6ddd 27#include <sys/cred.h>
4b171585 28#include <sys/vnode.h>
e5b9b344 29#include <sys/kmem_cache.h>
bbdc6ae4 30#include <linux/falloc.h>
52479ecf 31#include <linux/file_compat.h>
937879f1 32
51a727e9 33vnode_t *rootdir = (vnode_t *)0xabcd1234;
4b171585 34EXPORT_SYMBOL(rootdir);
35
7afde631 36static spl_kmem_cache_t *vn_cache;
37static spl_kmem_cache_t *vn_file_cache;
e4f1d29f 38
83c623aa 39static DEFINE_SPINLOCK(vn_file_lock);
e4f1d29f 40static LIST_HEAD(vn_file_list);
af828292 41
4295b530
BB
42vtype_t
43vn_mode_to_vtype(mode_t mode)
4b171585 44{
45 if (S_ISREG(mode))
46 return VREG;
47
48 if (S_ISDIR(mode))
49 return VDIR;
50
51 if (S_ISCHR(mode))
52 return VCHR;
53
54 if (S_ISBLK(mode))
55 return VBLK;
56
57 if (S_ISFIFO(mode))
58 return VFIFO;
59
60 if (S_ISLNK(mode))
61 return VLNK;
62
63 if (S_ISSOCK(mode))
64 return VSOCK;
65
4b171585 66 return VNON;
4295b530
BB
67} /* vn_mode_to_vtype() */
68EXPORT_SYMBOL(vn_mode_to_vtype);
69
70mode_t
71vn_vtype_to_mode(vtype_t vtype)
72{
73 if (vtype == VREG)
74 return S_IFREG;
75
76 if (vtype == VDIR)
77 return S_IFDIR;
78
79 if (vtype == VCHR)
80 return S_IFCHR;
81
82 if (vtype == VBLK)
83 return S_IFBLK;
84
85 if (vtype == VFIFO)
86 return S_IFIFO;
87
88 if (vtype == VLNK)
89 return S_IFLNK;
90
91 if (vtype == VSOCK)
92 return S_IFSOCK;
93
94 return VNON;
95} /* vn_vtype_to_mode() */
96EXPORT_SYMBOL(vn_vtype_to_mode);
4b171585 97
af828292 98vnode_t *
99vn_alloc(int flag)
100{
101 vnode_t *vp;
102
103 vp = kmem_cache_alloc(vn_cache, flag);
af828292 104 if (vp != NULL) {
e4f1d29f 105 vp->v_file = NULL;
af828292 106 vp->v_type = 0;
107 }
108
8d9a23e8 109 return (vp);
af828292 110} /* vn_alloc() */
111EXPORT_SYMBOL(vn_alloc);
112
113void
114vn_free(vnode_t *vp)
115{
116 kmem_cache_free(vn_cache, vp);
117} /* vn_free() */
118EXPORT_SYMBOL(vn_free);
119
0b3cf046 120int
af828292 121vn_open(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 122 vnode_t **vpp, int x1, void *x2)
0b3cf046 123{
f7e8739c
RC
124 struct file *fp;
125 struct kstat stat;
126 int rc, saved_umask = 0;
4be55565 127 gfp_t saved_gfp;
0b3cf046 128 vnode_t *vp;
0b3cf046 129
937879f1 130 ASSERT(flags & (FWRITE | FREAD));
131 ASSERT(seg == UIO_SYSSPACE);
132 ASSERT(vpp);
4b171585 133 *vpp = NULL;
134
135 if (!(flags & FCREAT) && (flags & FWRITE))
136 flags |= FEXCL;
137
728b9dd8 138 /* Note for filp_open() the two low bits must be remapped to mean:
139 * 01 - read-only -> 00 read-only
140 * 10 - write-only -> 01 write-only
141 * 11 - read-write -> 10 read-write
142 */
143 flags--;
0b3cf046 144
145 if (flags & FCREAT)
4b171585 146 saved_umask = xchg(&current->fs->umask, 0);
0b3cf046 147
f7e8739c 148 fp = filp_open(path, flags, mode);
0b3cf046 149
150 if (flags & FCREAT)
4b171585 151 (void)xchg(&current->fs->umask, saved_umask);
0b3cf046 152
f7e8739c 153 if (IS_ERR(fp))
8d9a23e8 154 return (-PTR_ERR(fp));
0b3cf046 155
2a305c34
RY
156#ifdef HAVE_2ARGS_VFS_GETATTR
157 rc = vfs_getattr(&fp->f_path, &stat);
158#else
bc90df66 159 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
2a305c34 160#endif
4b171585 161 if (rc) {
162 filp_close(fp, 0);
8d9a23e8 163 return (-rc);
0b3cf046 164 }
165
af828292 166 vp = vn_alloc(KM_SLEEP);
4b171585 167 if (!vp) {
168 filp_close(fp, 0);
8d9a23e8 169 return (ENOMEM);
4b171585 170 }
0b3cf046 171
4be55565
LW
172 saved_gfp = mapping_gfp_mask(fp->f_mapping);
173 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
174
e4f1d29f 175 mutex_enter(&vp->v_lock);
4295b530 176 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 177 vp->v_file = fp;
4be55565 178 vp->v_gfp_mask = saved_gfp;
4b171585 179 *vpp = vp;
e4f1d29f 180 mutex_exit(&vp->v_lock);
0b3cf046 181
8d9a23e8 182 return (0);
4b171585 183} /* vn_open() */
184EXPORT_SYMBOL(vn_open);
0b3cf046 185
0b3cf046 186int
af828292 187vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 188 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
0b3cf046 189{
4b171585 190 char *realpath;
12018327 191 int len, rc;
0b3cf046 192
937879f1 193 ASSERT(vp == rootdir);
0b3cf046 194
12018327 195 len = strlen(path) + 2;
54cccfc2 196 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
4b171585 197 if (!realpath)
8d9a23e8 198 return (ENOMEM);
0b3cf046 199
12018327 200 (void)snprintf(realpath, len, "/%s", path);
4b171585 201 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
4b171585 202 kfree(realpath);
203
8d9a23e8 204 return (rc);
4b171585 205} /* vn_openat() */
206EXPORT_SYMBOL(vn_openat);
0b3cf046 207
0b3cf046 208int
4b171585 209vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
663e02a1 210 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
0b3cf046 211{
4b171585 212 loff_t offset;
213 mm_segment_t saved_fs;
214 struct file *fp;
215 int rc;
216
937879f1 217 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
218 ASSERT(vp);
219 ASSERT(vp->v_file);
220 ASSERT(seg == UIO_SYSSPACE);
663e02a1 221 ASSERT((ioflag & ~FAPPEND) == 0);
4b171585 222
e4f1d29f 223 fp = vp->v_file;
4b171585 224
663e02a1
RC
225 offset = off;
226 if (ioflag & FAPPEND)
227 offset = fp->f_pos;
228
4b171585 229 /* Writable user data segment must be briefly increased for this
230 * process so we can use the user space read call paths to write
231 * in to memory allocated by the kernel. */
232 saved_fs = get_fs();
233 set_fs(get_ds());
234
235 if (uio & UIO_WRITE)
236 rc = vfs_write(fp, addr, len, &offset);
237 else
238 rc = vfs_read(fp, addr, len, &offset);
239
240 set_fs(saved_fs);
f3989ed3 241 fp->f_pos = offset;
4b171585 242
243 if (rc < 0)
8d9a23e8 244 return (-rc);
0b3cf046 245
4b171585 246 if (residp) {
247 *residp = len - rc;
0b3cf046 248 } else {
4b171585 249 if (rc != len)
8d9a23e8 250 return (EIO);
0b3cf046 251 }
252
8d9a23e8 253 return (0);
4b171585 254} /* vn_rdwr() */
255EXPORT_SYMBOL(vn_rdwr);
256
257int
2f5d55aa 258vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
4b171585 259{
260 int rc;
261
937879f1 262 ASSERT(vp);
263 ASSERT(vp->v_file);
4b171585 264
4be55565 265 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
97735c39
BB
266 rc = filp_close(vp->v_file, 0);
267 vn_free(vp);
4b171585 268
8d9a23e8 269 return (-rc);
4b171585 270} /* vn_close() */
271EXPORT_SYMBOL(vn_close);
272
97735c39
BB
273/* vn_seek() does not actually seek it only performs bounds checking on the
274 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
275 * anything more serious. */
276int
47995fa6 277vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
97735c39
BB
278{
279 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
280}
281EXPORT_SYMBOL(vn_seek);
282
ec18fe3c
RY
283/*
284 * spl_basename() takes a NULL-terminated string s as input containing a path.
285 * It returns a char pointer to a string and a length that describe the
286 * basename of the path. If the basename is not "." or "/", it will be an index
287 * into the string. While the string should be NULL terminated, the section
288 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
289 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
290 */
291static void
292spl_basename(const char *s, const char **str, int *len)
293{
294 size_t i, end;
295
296 ASSERT(str);
297 ASSERT(len);
298
299 if (!s || !*s) {
300 *str = ".";
301 *len = 1;
302 return;
303 }
304
305 i = strlen(s) - 1;
306
307 while (i && s[i--] == '/');
308
309 if (i == 0) {
310 *str = "/";
311 *len = 1;
312 return;
313 }
314
315 end = i;
316
317 for (end = i; i; i--) {
318 if (s[i] == '/') {
319 *str = &s[i+1];
320 *len = end - i + 1;
321 return;
322 }
323 }
324
325 *str = s;
326 *len = end + 1;
327}
328
329static struct dentry *
330spl_kern_path_locked(const char *name, struct path *path)
331{
332 struct path parent;
333 struct dentry *dentry;
334 const char *basename;
335 int len;
336 int rc;
337
338 ASSERT(name);
339 ASSERT(path);
340
341 spl_basename(name, &basename, &len);
342
343 /* We do not accept "." or ".." */
344 if (len <= 2 && basename[0] == '.')
345 if (len == 1 || basename[1] == '.')
346 return (ERR_PTR(-EACCES));
347
348 rc = kern_path(name, LOOKUP_PARENT, &parent);
349 if (rc)
350 return (ERR_PTR(rc));
351
fdbc1ba9
CC
352 /* use I_MUTEX_PARENT because vfs_unlink needs it */
353 spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
ec18fe3c
RY
354
355 dentry = lookup_one_len(basename, parent.dentry, len);
356 if (IS_ERR(dentry)) {
357 spl_inode_unlock(parent.dentry->d_inode);
358 path_put(&parent);
359 } else {
360 *path = parent;
361 }
362
363 return (dentry);
364}
365
bcb15891
YS
366/* Based on do_unlinkat() from linux/fs/namei.c */
367int
368vn_remove(const char *path, uio_seg_t seg, int flags)
369{
370 struct dentry *dentry;
371 struct path parent;
372 struct inode *inode = NULL;
373 int rc = 0;
bcb15891
YS
374
375 ASSERT(seg == UIO_SYSSPACE);
376 ASSERT(flags == RMFILE);
377
378 dentry = spl_kern_path_locked(path, &parent);
379 rc = PTR_ERR(dentry);
380 if (!IS_ERR(dentry)) {
8d9a23e8
BB
381 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
382 rc = 0;
383 goto slashes;
384 }
bcb15891
YS
385
386 inode = dentry->d_inode;
8d9a23e8 387 if (inode) {
ec18fe3c 388 atomic_inc(&inode->i_count);
8d9a23e8
BB
389 } else {
390 rc = 0;
391 goto slashes;
392 }
bcb15891 393
50a0749e 394#ifdef HAVE_2ARGS_VFS_UNLINK
bcb15891 395 rc = vfs_unlink(parent.dentry->d_inode, dentry);
50a0749e
RY
396#else
397 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
398#endif /* HAVE_2ARGS_VFS_UNLINK */
bcb15891
YS
399exit1:
400 dput(dentry);
053678f3
BB
401 } else {
402 return (-rc);
bcb15891
YS
403 }
404
405 spl_inode_unlock(parent.dentry->d_inode);
406 if (inode)
407 iput(inode); /* truncate the inode here */
408
409 path_put(&parent);
8d9a23e8 410 return (-rc);
bcb15891
YS
411
412slashes:
413 rc = !dentry->d_inode ? -ENOENT :
414 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
8d9a23e8 415 goto exit1;
bcb15891
YS
416} /* vn_remove() */
417EXPORT_SYMBOL(vn_remove);
418
419/* Based on do_rename() from linux/fs/namei.c */
420int
421vn_rename(const char *oldname, const char *newname, int x1)
422{
423 struct dentry *old_dir, *new_dir;
424 struct dentry *old_dentry, *new_dentry;
425 struct dentry *trap;
426 struct path old_parent, new_parent;
427 int rc = 0;
bcb15891
YS
428
429 old_dentry = spl_kern_path_locked(oldname, &old_parent);
8d9a23e8
BB
430 if (IS_ERR(old_dentry)) {
431 rc = PTR_ERR(old_dentry);
432 goto exit;
433 }
bcb15891
YS
434
435 spl_inode_unlock(old_parent.dentry->d_inode);
436
437 new_dentry = spl_kern_path_locked(newname, &new_parent);
8d9a23e8
BB
438 if (IS_ERR(new_dentry)) {
439 rc = PTR_ERR(new_dentry);
440 goto exit2;
441 }
bcb15891
YS
442
443 spl_inode_unlock(new_parent.dentry->d_inode);
444
445 rc = -EXDEV;
446 if (old_parent.mnt != new_parent.mnt)
8d9a23e8 447 goto exit3;
bcb15891
YS
448
449 old_dir = old_parent.dentry;
450 new_dir = new_parent.dentry;
451 trap = lock_rename(new_dir, old_dir);
452
453 /* source should not be ancestor of target */
454 rc = -EINVAL;
455 if (old_dentry == trap)
8d9a23e8 456 goto exit4;
bcb15891
YS
457
458 /* target should not be an ancestor of source */
459 rc = -ENOTEMPTY;
460 if (new_dentry == trap)
8d9a23e8 461 goto exit4;
bcb15891
YS
462
463 /* source must exist */
464 rc = -ENOENT;
465 if (!old_dentry->d_inode)
8d9a23e8 466 goto exit4;
bcb15891
YS
467
468 /* unless the source is a directory trailing slashes give -ENOTDIR */
469 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
470 rc = -ENOTDIR;
471 if (old_dentry->d_name.name[old_dentry->d_name.len])
8d9a23e8 472 goto exit4;
bcb15891 473 if (new_dentry->d_name.name[new_dentry->d_name.len])
8d9a23e8 474 goto exit4;
bcb15891
YS
475 }
476
ad3412ef 477#if defined(HAVE_4ARGS_VFS_RENAME)
bcb15891 478 rc = vfs_rename(old_dir->d_inode, old_dentry,
50a0749e 479 new_dir->d_inode, new_dentry);
ad3412ef 480#elif defined(HAVE_5ARGS_VFS_RENAME)
50a0749e
RY
481 rc = vfs_rename(old_dir->d_inode, old_dentry,
482 new_dir->d_inode, new_dentry, NULL);
ad3412ef
CC
483#else
484 rc = vfs_rename(old_dir->d_inode, old_dentry,
485 new_dir->d_inode, new_dentry, NULL, 0);
486#endif
bcb15891
YS
487exit4:
488 unlock_rename(new_dir, old_dir);
489exit3:
490 dput(new_dentry);
491 path_put(&new_parent);
492exit2:
493 dput(old_dentry);
494 path_put(&old_parent);
495exit:
8d9a23e8 496 return (-rc);
bcb15891
YS
497}
498EXPORT_SYMBOL(vn_rename);
499
4b171585 500int
36e6f861 501vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
0b3cf046 502{
4b171585 503 struct file *fp;
dcd9cb5a 504 struct kstat stat;
4b171585 505 int rc;
506
937879f1 507 ASSERT(vp);
508 ASSERT(vp->v_file);
509 ASSERT(vap);
4b171585 510
e4f1d29f 511 fp = vp->v_file;
4b171585 512
2a305c34
RY
513#ifdef HAVE_2ARGS_VFS_GETATTR
514 rc = vfs_getattr(&fp->f_path, &stat);
515#else
516 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
517#endif
4b171585 518 if (rc)
8d9a23e8 519 return (-rc);
4b171585 520
4295b530 521 vap->va_type = vn_mode_to_vtype(stat.mode);
4b171585 522 vap->va_mode = stat.mode;
f7fd6ddd
RY
523 vap->va_uid = KUID_TO_SUID(stat.uid);
524 vap->va_gid = KGID_TO_SGID(stat.gid);
4b171585 525 vap->va_fsid = 0;
526 vap->va_nodeid = stat.ino;
527 vap->va_nlink = stat.nlink;
528 vap->va_size = stat.size;
47995fa6 529 vap->va_blksize = stat.blksize;
dcd9cb5a
BB
530 vap->va_atime = stat.atime;
531 vap->va_mtime = stat.mtime;
532 vap->va_ctime = stat.ctime;
4b171585 533 vap->va_rdev = stat.rdev;
dcd9cb5a 534 vap->va_nblocks = stat.blocks;
4b171585 535
8d9a23e8 536 return (0);
0b3cf046 537}
4b171585 538EXPORT_SYMBOL(vn_getattr);
539
2f5d55aa 540int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
4b171585 541{
36e6f861 542 int datasync = 0;
2a5d574e
BB
543 int error;
544 int fstrans;
36e6f861 545
937879f1 546 ASSERT(vp);
547 ASSERT(vp->v_file);
4b171585 548
36e6f861 549 if (flags & FDSYNC)
550 datasync = 1;
551
2a5d574e
BB
552 /*
553 * May enter XFS which generates a warning when PF_FSTRANS is set.
554 * To avoid this the flag is cleared over vfs_sync() and then reset.
555 */
556 fstrans = spl_fstrans_check();
557 if (fstrans)
558 current->flags &= ~(PF_FSTRANS);
559
560 error = -spl_filp_fsync(vp->v_file, datasync);
561 if (fstrans)
562 current->flags |= PF_FSTRANS;
563
564 return (error);
4b171585 565} /* vn_fsync() */
566EXPORT_SYMBOL(vn_fsync);
af828292 567
bbdc6ae4
ED
568int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
569 offset_t offset, void *x6, void *x7)
570{
571 int error = EOPNOTSUPP;
ea2633ad
TC
572#ifdef FALLOC_FL_PUNCH_HOLE
573 int fstrans;
574#endif
bbdc6ae4
ED
575
576 if (cmd != F_FREESP || bfp->l_whence != 0)
8d9a23e8 577 return (EOPNOTSUPP);
bbdc6ae4
ED
578
579 ASSERT(vp);
580 ASSERT(vp->v_file);
581 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
582
583#ifdef FALLOC_FL_PUNCH_HOLE
ea2633ad
TC
584 /*
585 * May enter XFS which generates a warning when PF_FSTRANS is set.
586 * To avoid this the flag is cleared over vfs_sync() and then reset.
587 */
588 fstrans = spl_fstrans_check();
589 if (fstrans)
590 current->flags &= ~(PF_FSTRANS);
591
1c7b3eaf
BB
592 /*
593 * When supported by the underlying file system preferentially
594 * use the fallocate() callback to preallocate the space.
595 */
596 error = -spl_filp_fallocate(vp->v_file,
597 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
598 bfp->l_start, bfp->l_len);
ea2633ad
TC
599
600 if (fstrans)
601 current->flags |= PF_FSTRANS;
602
1c7b3eaf 603 if (error == 0)
8d9a23e8 604 return (0);
bbdc6ae4
ED
605#endif
606
607#ifdef HAVE_INODE_TRUNCATE_RANGE
608 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
609 vp->v_file->f_dentry->d_inode->i_op &&
610 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
611 off_t end = bfp->l_start + bfp->l_len;
612 /*
613 * Judging from the code in shmem_truncate_range(),
614 * it seems the kernel expects the end offset to be
615 * inclusive and aligned to the end of a page.
616 */
617 if (end % PAGE_SIZE != 0) {
618 end &= ~(off_t)(PAGE_SIZE - 1);
619 if (end <= bfp->l_start)
8d9a23e8 620 return (0);
bbdc6ae4
ED
621 }
622 --end;
623
624 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
625 vp->v_file->f_dentry->d_inode,
626 bfp->l_start, end
627 );
8d9a23e8 628 return (0);
bbdc6ae4
ED
629 }
630#endif
631
8d9a23e8 632 return (error);
bbdc6ae4
ED
633}
634EXPORT_SYMBOL(vn_space);
635
e4f1d29f 636/* Function must be called while holding the vn_file_lock */
637static file_t *
d3c677bc 638file_find(int fd, struct task_struct *task)
e4f1d29f 639{
640 file_t *fp;
641
937879f1 642 ASSERT(spin_is_locked(&vn_file_lock));
e4f1d29f 643
644 list_for_each_entry(fp, &vn_file_list, f_list) {
d3c677bc 645 if (fd == fp->f_fd && fp->f_task == task) {
937879f1 646 ASSERT(atomic_read(&fp->f_ref) != 0);
e4f1d29f 647 return fp;
648 }
649 }
650
651 return NULL;
652} /* file_find() */
653
654file_t *
655vn_getf(int fd)
656{
657 struct kstat stat;
658 struct file *lfp;
659 file_t *fp;
660 vnode_t *vp;
937879f1 661 int rc = 0;
e4f1d29f 662
313b1ea6
RY
663 if (fd < 0)
664 return (NULL);
665
e4f1d29f 666 /* Already open just take an extra reference */
667 spin_lock(&vn_file_lock);
668
d3c677bc 669 fp = file_find(fd, current);
e4f1d29f 670 if (fp) {
1683e75e
RY
671 lfp = fget(fd);
672 fput(fp->f_file);
673 /*
674 * areleasef() can cause us to see a stale reference when
675 * userspace has reused a file descriptor before areleasef()
676 * has run. fput() the stale reference and replace it. We
677 * retain the original reference count such that the concurrent
678 * areleasef() will decrement its reference and terminate.
679 */
680 if (lfp != fp->f_file) {
681 fp->f_file = lfp;
682 fp->f_vnode->v_file = lfp;
683 }
e4f1d29f 684 atomic_inc(&fp->f_ref);
685 spin_unlock(&vn_file_lock);
8d9a23e8 686 return (fp);
e4f1d29f 687 }
688
689 spin_unlock(&vn_file_lock);
690
691 /* File was not yet opened create the object and setup */
4afaaefa 692 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
e4f1d29f 693 if (fp == NULL)
8d9a23e8 694 goto out;
e4f1d29f 695
696 mutex_enter(&fp->f_lock);
697
698 fp->f_fd = fd;
763b2f3b 699 fp->f_task = current;
e4f1d29f 700 fp->f_offset = 0;
701 atomic_inc(&fp->f_ref);
702
703 lfp = fget(fd);
704 if (lfp == NULL)
8d9a23e8 705 goto out_mutex;
e4f1d29f 706
707 vp = vn_alloc(KM_SLEEP);
708 if (vp == NULL)
8d9a23e8 709 goto out_fget;
e4f1d29f 710
2a305c34
RY
711#ifdef HAVE_2ARGS_VFS_GETATTR
712 rc = vfs_getattr(&lfp->f_path, &stat);
713#else
714 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
715#endif
716 if (rc)
8d9a23e8 717 goto out_vnode;
e4f1d29f 718
719 mutex_enter(&vp->v_lock);
4295b530 720 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 721 vp->v_file = lfp;
722 mutex_exit(&vp->v_lock);
723
724 fp->f_vnode = vp;
725 fp->f_file = lfp;
726
727 /* Put it on the tracking list */
728 spin_lock(&vn_file_lock);
729 list_add(&fp->f_list, &vn_file_list);
730 spin_unlock(&vn_file_lock);
731
732 mutex_exit(&fp->f_lock);
8d9a23e8 733 return (fp);
e4f1d29f 734
735out_vnode:
e4f1d29f 736 vn_free(vp);
737out_fget:
e4f1d29f 738 fput(lfp);
739out_mutex:
e4f1d29f 740 mutex_exit(&fp->f_lock);
741 kmem_cache_free(vn_file_cache, fp);
742out:
8d9a23e8 743 return (NULL);
e4f1d29f 744} /* getf() */
745EXPORT_SYMBOL(getf);
746
747static void releasef_locked(file_t *fp)
748{
937879f1 749 ASSERT(fp->f_file);
750 ASSERT(fp->f_vnode);
e4f1d29f 751
752 /* Unlinked from list, no refs, safe to free outside mutex */
753 fput(fp->f_file);
754 vn_free(fp->f_vnode);
755
756 kmem_cache_free(vn_file_cache, fp);
757}
758
759void
760vn_releasef(int fd)
d3c677bc
RY
761{
762 areleasef(fd, P_FINFO(current));
763}
764EXPORT_SYMBOL(releasef);
765
766void
767vn_areleasef(int fd, uf_info_t *fip)
e4f1d29f 768{
769 file_t *fp;
d3c677bc 770 struct task_struct *task = (struct task_struct *)fip;
e4f1d29f 771
313b1ea6
RY
772 if (fd < 0)
773 return;
774
e4f1d29f 775 spin_lock(&vn_file_lock);
d3c677bc 776 fp = file_find(fd, task);
e4f1d29f 777 if (fp) {
778 atomic_dec(&fp->f_ref);
779 if (atomic_read(&fp->f_ref) > 0) {
780 spin_unlock(&vn_file_lock);
781 return;
782 }
783
784 list_del(&fp->f_list);
785 releasef_locked(fp);
786 }
787 spin_unlock(&vn_file_lock);
788
789 return;
790} /* releasef() */
d3c677bc
RY
791EXPORT_SYMBOL(areleasef);
792
e4f1d29f 793
137af025
BB
794static void
795#ifdef HAVE_SET_FS_PWD_WITH_CONST
796vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
797#else
798vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
799#endif /* HAVE_SET_FS_PWD_WITH_CONST */
51a727e9 800{
9b2048c2
BB
801 struct path old_pwd;
802
137af025 803#ifdef HAVE_FS_STRUCT_SPINLOCK
9b2048c2
BB
804 spin_lock(&fs->lock);
805 old_pwd = fs->pwd;
806 fs->pwd = *path;
807 path_get(path);
808 spin_unlock(&fs->lock);
137af025 809#else
9b2048c2
BB
810 write_lock(&fs->lock);
811 old_pwd = fs->pwd;
812 fs->pwd = *path;
813 path_get(path);
814 write_unlock(&fs->lock);
137af025 815#endif /* HAVE_FS_STRUCT_SPINLOCK */
9b2048c2
BB
816
817 if (old_pwd.dentry)
818 path_put(&old_pwd);
51a727e9 819}
51a727e9
BB
820
821int
822vn_set_pwd(const char *filename)
823{
51a727e9 824 struct path path;
82a358d9 825 mm_segment_t saved_fs;
51a727e9 826 int rc;
51a727e9 827
82a358d9
BB
828 /*
829 * user_path_dir() and __user_walk() both expect 'filename' to be
830 * a user space address so we must briefly increase the data segment
831 * size to ensure strncpy_from_user() does not fail with -EFAULT.
832 */
833 saved_fs = get_fs();
834 set_fs(get_ds());
835
51a727e9
BB
836 rc = user_path_dir(filename, &path);
837 if (rc)
8d9a23e8 838 goto out;
51a727e9
BB
839
840 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
841 if (rc)
8d9a23e8 842 goto dput_and_out;
51a727e9 843
137af025 844 vn_set_fs_pwd(current->fs, &path);
51a727e9
BB
845
846dput_and_out:
847 path_put(&path);
51a727e9 848out:
82a358d9
BB
849 set_fs(saved_fs);
850
8d9a23e8 851 return (-rc);
51a727e9
BB
852} /* vn_set_pwd() */
853EXPORT_SYMBOL(vn_set_pwd);
854
af828292 855static int
856vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
857{
858 struct vnode *vp = buf;
859
860 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
861
862 return (0);
863} /* vn_cache_constructor() */
864
865static void
866vn_cache_destructor(void *buf, void *cdrarg)
867{
868 struct vnode *vp = buf;
869
870 mutex_destroy(&vp->v_lock);
871} /* vn_cache_destructor() */
872
e4f1d29f 873static int
874vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
875{
876 file_t *fp = buf;
877
878 atomic_set(&fp->f_ref, 0);
879 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
4e62fd41 880 INIT_LIST_HEAD(&fp->f_list);
e4f1d29f 881
882 return (0);
883} /* file_cache_constructor() */
884
885static void
886vn_file_cache_destructor(void *buf, void *cdrarg)
887{
888 file_t *fp = buf;
889
890 mutex_destroy(&fp->f_lock);
891} /* vn_file_cache_destructor() */
892
af828292 893int
12ff95ff 894spl_vn_init(void)
af828292 895{
57d86234 896 vn_cache = kmem_cache_create("spl_vn_cache",
897 sizeof(struct vnode), 64,
5d86345d 898 vn_cache_constructor,
899 vn_cache_destructor,
97048200 900 NULL, NULL, NULL, 0);
e4f1d29f 901
902 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
903 sizeof(file_t), 64,
904 vn_file_cache_constructor,
905 vn_file_cache_destructor,
97048200 906 NULL, NULL, NULL, 0);
8d9a23e8 907 return (0);
af828292 908} /* vn_init() */
909
910void
12ff95ff 911spl_vn_fini(void)
af828292 912{
e4f1d29f 913 file_t *fp, *next_fp;
2fb9b26a 914 int leaked = 0;
e4f1d29f 915
916 spin_lock(&vn_file_lock);
917
918 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
919 list_del(&fp->f_list);
920 releasef_locked(fp);
921 leaked++;
922 }
923
e4f1d29f 924 spin_unlock(&vn_file_lock);
925
926 if (leaked > 0)
8d9a23e8 927 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
e4f1d29f 928
2371321e 929 kmem_cache_destroy(vn_file_cache);
2fb9b26a 930 kmem_cache_destroy(vn_cache);
e4f1d29f 931
932 return;
af828292 933} /* vn_fini() */