]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-vnode.c
Limit number of tasks shown in taskq proc
[mirror_spl.git] / module / spl / spl-vnode.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25\*****************************************************************************/
715f6251 26
f7fd6ddd 27#include <sys/cred.h>
4b171585 28#include <sys/vnode.h>
e5b9b344 29#include <sys/kmem_cache.h>
bbdc6ae4 30#include <linux/falloc.h>
52479ecf 31#include <linux/file_compat.h>
937879f1 32
51a727e9 33vnode_t *rootdir = (vnode_t *)0xabcd1234;
4b171585 34EXPORT_SYMBOL(rootdir);
35
7afde631 36static spl_kmem_cache_t *vn_cache;
37static spl_kmem_cache_t *vn_file_cache;
e4f1d29f 38
83c623aa 39static DEFINE_SPINLOCK(vn_file_lock);
e4f1d29f 40static LIST_HEAD(vn_file_list);
af828292 41
4295b530
BB
42vtype_t
43vn_mode_to_vtype(mode_t mode)
4b171585 44{
45 if (S_ISREG(mode))
46 return VREG;
47
48 if (S_ISDIR(mode))
49 return VDIR;
50
51 if (S_ISCHR(mode))
52 return VCHR;
53
54 if (S_ISBLK(mode))
55 return VBLK;
56
57 if (S_ISFIFO(mode))
58 return VFIFO;
59
60 if (S_ISLNK(mode))
61 return VLNK;
62
63 if (S_ISSOCK(mode))
64 return VSOCK;
65
66 if (S_ISCHR(mode))
67 return VCHR;
68
69 return VNON;
4295b530
BB
70} /* vn_mode_to_vtype() */
71EXPORT_SYMBOL(vn_mode_to_vtype);
72
73mode_t
74vn_vtype_to_mode(vtype_t vtype)
75{
76 if (vtype == VREG)
77 return S_IFREG;
78
79 if (vtype == VDIR)
80 return S_IFDIR;
81
82 if (vtype == VCHR)
83 return S_IFCHR;
84
85 if (vtype == VBLK)
86 return S_IFBLK;
87
88 if (vtype == VFIFO)
89 return S_IFIFO;
90
91 if (vtype == VLNK)
92 return S_IFLNK;
93
94 if (vtype == VSOCK)
95 return S_IFSOCK;
96
97 return VNON;
98} /* vn_vtype_to_mode() */
99EXPORT_SYMBOL(vn_vtype_to_mode);
4b171585 100
af828292 101vnode_t *
102vn_alloc(int flag)
103{
104 vnode_t *vp;
105
106 vp = kmem_cache_alloc(vn_cache, flag);
af828292 107 if (vp != NULL) {
e4f1d29f 108 vp->v_file = NULL;
af828292 109 vp->v_type = 0;
110 }
111
8d9a23e8 112 return (vp);
af828292 113} /* vn_alloc() */
114EXPORT_SYMBOL(vn_alloc);
115
116void
117vn_free(vnode_t *vp)
118{
119 kmem_cache_free(vn_cache, vp);
120} /* vn_free() */
121EXPORT_SYMBOL(vn_free);
122
0b3cf046 123int
af828292 124vn_open(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 125 vnode_t **vpp, int x1, void *x2)
0b3cf046 126{
f7e8739c
RC
127 struct file *fp;
128 struct kstat stat;
129 int rc, saved_umask = 0;
4be55565 130 gfp_t saved_gfp;
0b3cf046 131 vnode_t *vp;
0b3cf046 132
937879f1 133 ASSERT(flags & (FWRITE | FREAD));
134 ASSERT(seg == UIO_SYSSPACE);
135 ASSERT(vpp);
4b171585 136 *vpp = NULL;
137
138 if (!(flags & FCREAT) && (flags & FWRITE))
139 flags |= FEXCL;
140
728b9dd8 141 /* Note for filp_open() the two low bits must be remapped to mean:
142 * 01 - read-only -> 00 read-only
143 * 10 - write-only -> 01 write-only
144 * 11 - read-write -> 10 read-write
145 */
146 flags--;
0b3cf046 147
148 if (flags & FCREAT)
4b171585 149 saved_umask = xchg(&current->fs->umask, 0);
0b3cf046 150
f7e8739c 151 fp = filp_open(path, flags, mode);
0b3cf046 152
153 if (flags & FCREAT)
4b171585 154 (void)xchg(&current->fs->umask, saved_umask);
0b3cf046 155
f7e8739c 156 if (IS_ERR(fp))
8d9a23e8 157 return (-PTR_ERR(fp));
0b3cf046 158
2a305c34
RY
159#ifdef HAVE_2ARGS_VFS_GETATTR
160 rc = vfs_getattr(&fp->f_path, &stat);
161#else
bc90df66 162 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
2a305c34 163#endif
4b171585 164 if (rc) {
165 filp_close(fp, 0);
8d9a23e8 166 return (-rc);
0b3cf046 167 }
168
af828292 169 vp = vn_alloc(KM_SLEEP);
4b171585 170 if (!vp) {
171 filp_close(fp, 0);
8d9a23e8 172 return (ENOMEM);
4b171585 173 }
0b3cf046 174
4be55565
LW
175 saved_gfp = mapping_gfp_mask(fp->f_mapping);
176 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
177
e4f1d29f 178 mutex_enter(&vp->v_lock);
4295b530 179 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 180 vp->v_file = fp;
4be55565 181 vp->v_gfp_mask = saved_gfp;
4b171585 182 *vpp = vp;
e4f1d29f 183 mutex_exit(&vp->v_lock);
0b3cf046 184
8d9a23e8 185 return (0);
4b171585 186} /* vn_open() */
187EXPORT_SYMBOL(vn_open);
0b3cf046 188
0b3cf046 189int
af828292 190vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 191 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
0b3cf046 192{
4b171585 193 char *realpath;
12018327 194 int len, rc;
0b3cf046 195
937879f1 196 ASSERT(vp == rootdir);
0b3cf046 197
12018327 198 len = strlen(path) + 2;
54cccfc2 199 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
4b171585 200 if (!realpath)
8d9a23e8 201 return (ENOMEM);
0b3cf046 202
12018327 203 (void)snprintf(realpath, len, "/%s", path);
4b171585 204 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
4b171585 205 kfree(realpath);
206
8d9a23e8 207 return (rc);
4b171585 208} /* vn_openat() */
209EXPORT_SYMBOL(vn_openat);
0b3cf046 210
0b3cf046 211int
4b171585 212vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
663e02a1 213 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
0b3cf046 214{
4b171585 215 loff_t offset;
216 mm_segment_t saved_fs;
217 struct file *fp;
218 int rc;
219
937879f1 220 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
221 ASSERT(vp);
222 ASSERT(vp->v_file);
223 ASSERT(seg == UIO_SYSSPACE);
663e02a1 224 ASSERT((ioflag & ~FAPPEND) == 0);
4b171585 225
e4f1d29f 226 fp = vp->v_file;
4b171585 227
663e02a1
RC
228 offset = off;
229 if (ioflag & FAPPEND)
230 offset = fp->f_pos;
231
4b171585 232 /* Writable user data segment must be briefly increased for this
233 * process so we can use the user space read call paths to write
234 * in to memory allocated by the kernel. */
235 saved_fs = get_fs();
236 set_fs(get_ds());
237
238 if (uio & UIO_WRITE)
239 rc = vfs_write(fp, addr, len, &offset);
240 else
241 rc = vfs_read(fp, addr, len, &offset);
242
243 set_fs(saved_fs);
f3989ed3 244 fp->f_pos = offset;
4b171585 245
246 if (rc < 0)
8d9a23e8 247 return (-rc);
0b3cf046 248
4b171585 249 if (residp) {
250 *residp = len - rc;
0b3cf046 251 } else {
4b171585 252 if (rc != len)
8d9a23e8 253 return (EIO);
0b3cf046 254 }
255
8d9a23e8 256 return (0);
4b171585 257} /* vn_rdwr() */
258EXPORT_SYMBOL(vn_rdwr);
259
260int
2f5d55aa 261vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
4b171585 262{
263 int rc;
264
937879f1 265 ASSERT(vp);
266 ASSERT(vp->v_file);
4b171585 267
4be55565 268 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
97735c39
BB
269 rc = filp_close(vp->v_file, 0);
270 vn_free(vp);
4b171585 271
8d9a23e8 272 return (-rc);
4b171585 273} /* vn_close() */
274EXPORT_SYMBOL(vn_close);
275
97735c39
BB
276/* vn_seek() does not actually seek it only performs bounds checking on the
277 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
278 * anything more serious. */
279int
47995fa6 280vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
97735c39
BB
281{
282 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
283}
284EXPORT_SYMBOL(vn_seek);
285
ec18fe3c
RY
286/*
287 * spl_basename() takes a NULL-terminated string s as input containing a path.
288 * It returns a char pointer to a string and a length that describe the
289 * basename of the path. If the basename is not "." or "/", it will be an index
290 * into the string. While the string should be NULL terminated, the section
291 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
292 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
293 */
294static void
295spl_basename(const char *s, const char **str, int *len)
296{
297 size_t i, end;
298
299 ASSERT(str);
300 ASSERT(len);
301
302 if (!s || !*s) {
303 *str = ".";
304 *len = 1;
305 return;
306 }
307
308 i = strlen(s) - 1;
309
310 while (i && s[i--] == '/');
311
312 if (i == 0) {
313 *str = "/";
314 *len = 1;
315 return;
316 }
317
318 end = i;
319
320 for (end = i; i; i--) {
321 if (s[i] == '/') {
322 *str = &s[i+1];
323 *len = end - i + 1;
324 return;
325 }
326 }
327
328 *str = s;
329 *len = end + 1;
330}
331
332static struct dentry *
333spl_kern_path_locked(const char *name, struct path *path)
334{
335 struct path parent;
336 struct dentry *dentry;
337 const char *basename;
338 int len;
339 int rc;
340
341 ASSERT(name);
342 ASSERT(path);
343
344 spl_basename(name, &basename, &len);
345
346 /* We do not accept "." or ".." */
347 if (len <= 2 && basename[0] == '.')
348 if (len == 1 || basename[1] == '.')
349 return (ERR_PTR(-EACCES));
350
351 rc = kern_path(name, LOOKUP_PARENT, &parent);
352 if (rc)
353 return (ERR_PTR(rc));
354
fdbc1ba9
CC
355 /* use I_MUTEX_PARENT because vfs_unlink needs it */
356 spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
ec18fe3c
RY
357
358 dentry = lookup_one_len(basename, parent.dentry, len);
359 if (IS_ERR(dentry)) {
360 spl_inode_unlock(parent.dentry->d_inode);
361 path_put(&parent);
362 } else {
363 *path = parent;
364 }
365
366 return (dentry);
367}
368
bcb15891
YS
369/* Based on do_unlinkat() from linux/fs/namei.c */
370int
371vn_remove(const char *path, uio_seg_t seg, int flags)
372{
373 struct dentry *dentry;
374 struct path parent;
375 struct inode *inode = NULL;
376 int rc = 0;
bcb15891
YS
377
378 ASSERT(seg == UIO_SYSSPACE);
379 ASSERT(flags == RMFILE);
380
381 dentry = spl_kern_path_locked(path, &parent);
382 rc = PTR_ERR(dentry);
383 if (!IS_ERR(dentry)) {
8d9a23e8
BB
384 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
385 rc = 0;
386 goto slashes;
387 }
bcb15891
YS
388
389 inode = dentry->d_inode;
8d9a23e8 390 if (inode) {
ec18fe3c 391 atomic_inc(&inode->i_count);
8d9a23e8
BB
392 } else {
393 rc = 0;
394 goto slashes;
395 }
bcb15891 396
50a0749e 397#ifdef HAVE_2ARGS_VFS_UNLINK
bcb15891 398 rc = vfs_unlink(parent.dentry->d_inode, dentry);
50a0749e
RY
399#else
400 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
401#endif /* HAVE_2ARGS_VFS_UNLINK */
bcb15891
YS
402exit1:
403 dput(dentry);
053678f3
BB
404 } else {
405 return (-rc);
bcb15891
YS
406 }
407
408 spl_inode_unlock(parent.dentry->d_inode);
409 if (inode)
410 iput(inode); /* truncate the inode here */
411
412 path_put(&parent);
8d9a23e8 413 return (-rc);
bcb15891
YS
414
415slashes:
416 rc = !dentry->d_inode ? -ENOENT :
417 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
8d9a23e8 418 goto exit1;
bcb15891
YS
419} /* vn_remove() */
420EXPORT_SYMBOL(vn_remove);
421
422/* Based on do_rename() from linux/fs/namei.c */
423int
424vn_rename(const char *oldname, const char *newname, int x1)
425{
426 struct dentry *old_dir, *new_dir;
427 struct dentry *old_dentry, *new_dentry;
428 struct dentry *trap;
429 struct path old_parent, new_parent;
430 int rc = 0;
bcb15891
YS
431
432 old_dentry = spl_kern_path_locked(oldname, &old_parent);
8d9a23e8
BB
433 if (IS_ERR(old_dentry)) {
434 rc = PTR_ERR(old_dentry);
435 goto exit;
436 }
bcb15891
YS
437
438 spl_inode_unlock(old_parent.dentry->d_inode);
439
440 new_dentry = spl_kern_path_locked(newname, &new_parent);
8d9a23e8
BB
441 if (IS_ERR(new_dentry)) {
442 rc = PTR_ERR(new_dentry);
443 goto exit2;
444 }
bcb15891
YS
445
446 spl_inode_unlock(new_parent.dentry->d_inode);
447
448 rc = -EXDEV;
449 if (old_parent.mnt != new_parent.mnt)
8d9a23e8 450 goto exit3;
bcb15891
YS
451
452 old_dir = old_parent.dentry;
453 new_dir = new_parent.dentry;
454 trap = lock_rename(new_dir, old_dir);
455
456 /* source should not be ancestor of target */
457 rc = -EINVAL;
458 if (old_dentry == trap)
8d9a23e8 459 goto exit4;
bcb15891
YS
460
461 /* target should not be an ancestor of source */
462 rc = -ENOTEMPTY;
463 if (new_dentry == trap)
8d9a23e8 464 goto exit4;
bcb15891
YS
465
466 /* source must exist */
467 rc = -ENOENT;
468 if (!old_dentry->d_inode)
8d9a23e8 469 goto exit4;
bcb15891
YS
470
471 /* unless the source is a directory trailing slashes give -ENOTDIR */
472 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
473 rc = -ENOTDIR;
474 if (old_dentry->d_name.name[old_dentry->d_name.len])
8d9a23e8 475 goto exit4;
bcb15891 476 if (new_dentry->d_name.name[new_dentry->d_name.len])
8d9a23e8 477 goto exit4;
bcb15891
YS
478 }
479
ad3412ef 480#if defined(HAVE_4ARGS_VFS_RENAME)
bcb15891 481 rc = vfs_rename(old_dir->d_inode, old_dentry,
50a0749e 482 new_dir->d_inode, new_dentry);
ad3412ef 483#elif defined(HAVE_5ARGS_VFS_RENAME)
50a0749e
RY
484 rc = vfs_rename(old_dir->d_inode, old_dentry,
485 new_dir->d_inode, new_dentry, NULL);
ad3412ef
CC
486#else
487 rc = vfs_rename(old_dir->d_inode, old_dentry,
488 new_dir->d_inode, new_dentry, NULL, 0);
489#endif
bcb15891
YS
490exit4:
491 unlock_rename(new_dir, old_dir);
492exit3:
493 dput(new_dentry);
494 path_put(&new_parent);
495exit2:
496 dput(old_dentry);
497 path_put(&old_parent);
498exit:
8d9a23e8 499 return (-rc);
bcb15891
YS
500}
501EXPORT_SYMBOL(vn_rename);
502
4b171585 503int
36e6f861 504vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
0b3cf046 505{
4b171585 506 struct file *fp;
dcd9cb5a 507 struct kstat stat;
4b171585 508 int rc;
509
937879f1 510 ASSERT(vp);
511 ASSERT(vp->v_file);
512 ASSERT(vap);
4b171585 513
e4f1d29f 514 fp = vp->v_file;
4b171585 515
2a305c34
RY
516#ifdef HAVE_2ARGS_VFS_GETATTR
517 rc = vfs_getattr(&fp->f_path, &stat);
518#else
519 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
520#endif
4b171585 521 if (rc)
8d9a23e8 522 return (-rc);
4b171585 523
4295b530 524 vap->va_type = vn_mode_to_vtype(stat.mode);
4b171585 525 vap->va_mode = stat.mode;
f7fd6ddd
RY
526 vap->va_uid = KUID_TO_SUID(stat.uid);
527 vap->va_gid = KGID_TO_SGID(stat.gid);
4b171585 528 vap->va_fsid = 0;
529 vap->va_nodeid = stat.ino;
530 vap->va_nlink = stat.nlink;
531 vap->va_size = stat.size;
47995fa6 532 vap->va_blksize = stat.blksize;
dcd9cb5a
BB
533 vap->va_atime = stat.atime;
534 vap->va_mtime = stat.mtime;
535 vap->va_ctime = stat.ctime;
4b171585 536 vap->va_rdev = stat.rdev;
dcd9cb5a 537 vap->va_nblocks = stat.blocks;
4b171585 538
8d9a23e8 539 return (0);
0b3cf046 540}
4b171585 541EXPORT_SYMBOL(vn_getattr);
542
2f5d55aa 543int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
4b171585 544{
36e6f861 545 int datasync = 0;
2a5d574e
BB
546 int error;
547 int fstrans;
36e6f861 548
937879f1 549 ASSERT(vp);
550 ASSERT(vp->v_file);
4b171585 551
36e6f861 552 if (flags & FDSYNC)
553 datasync = 1;
554
2a5d574e
BB
555 /*
556 * May enter XFS which generates a warning when PF_FSTRANS is set.
557 * To avoid this the flag is cleared over vfs_sync() and then reset.
558 */
559 fstrans = spl_fstrans_check();
560 if (fstrans)
561 current->flags &= ~(PF_FSTRANS);
562
563 error = -spl_filp_fsync(vp->v_file, datasync);
564 if (fstrans)
565 current->flags |= PF_FSTRANS;
566
567 return (error);
4b171585 568} /* vn_fsync() */
569EXPORT_SYMBOL(vn_fsync);
af828292 570
bbdc6ae4
ED
571int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
572 offset_t offset, void *x6, void *x7)
573{
574 int error = EOPNOTSUPP;
ea2633ad
TC
575#ifdef FALLOC_FL_PUNCH_HOLE
576 int fstrans;
577#endif
bbdc6ae4
ED
578
579 if (cmd != F_FREESP || bfp->l_whence != 0)
8d9a23e8 580 return (EOPNOTSUPP);
bbdc6ae4
ED
581
582 ASSERT(vp);
583 ASSERT(vp->v_file);
584 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
585
586#ifdef FALLOC_FL_PUNCH_HOLE
ea2633ad
TC
587 /*
588 * May enter XFS which generates a warning when PF_FSTRANS is set.
589 * To avoid this the flag is cleared over vfs_sync() and then reset.
590 */
591 fstrans = spl_fstrans_check();
592 if (fstrans)
593 current->flags &= ~(PF_FSTRANS);
594
1c7b3eaf
BB
595 /*
596 * When supported by the underlying file system preferentially
597 * use the fallocate() callback to preallocate the space.
598 */
599 error = -spl_filp_fallocate(vp->v_file,
600 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
601 bfp->l_start, bfp->l_len);
ea2633ad
TC
602
603 if (fstrans)
604 current->flags |= PF_FSTRANS;
605
1c7b3eaf 606 if (error == 0)
8d9a23e8 607 return (0);
bbdc6ae4
ED
608#endif
609
610#ifdef HAVE_INODE_TRUNCATE_RANGE
611 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
612 vp->v_file->f_dentry->d_inode->i_op &&
613 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
614 off_t end = bfp->l_start + bfp->l_len;
615 /*
616 * Judging from the code in shmem_truncate_range(),
617 * it seems the kernel expects the end offset to be
618 * inclusive and aligned to the end of a page.
619 */
620 if (end % PAGE_SIZE != 0) {
621 end &= ~(off_t)(PAGE_SIZE - 1);
622 if (end <= bfp->l_start)
8d9a23e8 623 return (0);
bbdc6ae4
ED
624 }
625 --end;
626
627 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
628 vp->v_file->f_dentry->d_inode,
629 bfp->l_start, end
630 );
8d9a23e8 631 return (0);
bbdc6ae4
ED
632 }
633#endif
634
8d9a23e8 635 return (error);
bbdc6ae4
ED
636}
637EXPORT_SYMBOL(vn_space);
638
e4f1d29f 639/* Function must be called while holding the vn_file_lock */
640static file_t *
d3c677bc 641file_find(int fd, struct task_struct *task)
e4f1d29f 642{
643 file_t *fp;
644
937879f1 645 ASSERT(spin_is_locked(&vn_file_lock));
e4f1d29f 646
647 list_for_each_entry(fp, &vn_file_list, f_list) {
d3c677bc 648 if (fd == fp->f_fd && fp->f_task == task) {
937879f1 649 ASSERT(atomic_read(&fp->f_ref) != 0);
e4f1d29f 650 return fp;
651 }
652 }
653
654 return NULL;
655} /* file_find() */
656
657file_t *
658vn_getf(int fd)
659{
660 struct kstat stat;
661 struct file *lfp;
662 file_t *fp;
663 vnode_t *vp;
937879f1 664 int rc = 0;
e4f1d29f 665
313b1ea6
RY
666 if (fd < 0)
667 return (NULL);
668
e4f1d29f 669 /* Already open just take an extra reference */
670 spin_lock(&vn_file_lock);
671
d3c677bc 672 fp = file_find(fd, current);
e4f1d29f 673 if (fp) {
1683e75e
RY
674 lfp = fget(fd);
675 fput(fp->f_file);
676 /*
677 * areleasef() can cause us to see a stale reference when
678 * userspace has reused a file descriptor before areleasef()
679 * has run. fput() the stale reference and replace it. We
680 * retain the original reference count such that the concurrent
681 * areleasef() will decrement its reference and terminate.
682 */
683 if (lfp != fp->f_file) {
684 fp->f_file = lfp;
685 fp->f_vnode->v_file = lfp;
686 }
e4f1d29f 687 atomic_inc(&fp->f_ref);
688 spin_unlock(&vn_file_lock);
8d9a23e8 689 return (fp);
e4f1d29f 690 }
691
692 spin_unlock(&vn_file_lock);
693
694 /* File was not yet opened create the object and setup */
4afaaefa 695 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
e4f1d29f 696 if (fp == NULL)
8d9a23e8 697 goto out;
e4f1d29f 698
699 mutex_enter(&fp->f_lock);
700
701 fp->f_fd = fd;
763b2f3b 702 fp->f_task = current;
e4f1d29f 703 fp->f_offset = 0;
704 atomic_inc(&fp->f_ref);
705
706 lfp = fget(fd);
707 if (lfp == NULL)
8d9a23e8 708 goto out_mutex;
e4f1d29f 709
710 vp = vn_alloc(KM_SLEEP);
711 if (vp == NULL)
8d9a23e8 712 goto out_fget;
e4f1d29f 713
2a305c34
RY
714#ifdef HAVE_2ARGS_VFS_GETATTR
715 rc = vfs_getattr(&lfp->f_path, &stat);
716#else
717 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
718#endif
719 if (rc)
8d9a23e8 720 goto out_vnode;
e4f1d29f 721
722 mutex_enter(&vp->v_lock);
4295b530 723 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 724 vp->v_file = lfp;
725 mutex_exit(&vp->v_lock);
726
727 fp->f_vnode = vp;
728 fp->f_file = lfp;
729
730 /* Put it on the tracking list */
731 spin_lock(&vn_file_lock);
732 list_add(&fp->f_list, &vn_file_list);
733 spin_unlock(&vn_file_lock);
734
735 mutex_exit(&fp->f_lock);
8d9a23e8 736 return (fp);
e4f1d29f 737
738out_vnode:
e4f1d29f 739 vn_free(vp);
740out_fget:
e4f1d29f 741 fput(lfp);
742out_mutex:
e4f1d29f 743 mutex_exit(&fp->f_lock);
744 kmem_cache_free(vn_file_cache, fp);
745out:
8d9a23e8 746 return (NULL);
e4f1d29f 747} /* getf() */
748EXPORT_SYMBOL(getf);
749
750static void releasef_locked(file_t *fp)
751{
937879f1 752 ASSERT(fp->f_file);
753 ASSERT(fp->f_vnode);
e4f1d29f 754
755 /* Unlinked from list, no refs, safe to free outside mutex */
756 fput(fp->f_file);
757 vn_free(fp->f_vnode);
758
759 kmem_cache_free(vn_file_cache, fp);
760}
761
762void
763vn_releasef(int fd)
d3c677bc
RY
764{
765 areleasef(fd, P_FINFO(current));
766}
767EXPORT_SYMBOL(releasef);
768
769void
770vn_areleasef(int fd, uf_info_t *fip)
e4f1d29f 771{
772 file_t *fp;
d3c677bc 773 struct task_struct *task = (struct task_struct *)fip;
e4f1d29f 774
313b1ea6
RY
775 if (fd < 0)
776 return;
777
e4f1d29f 778 spin_lock(&vn_file_lock);
d3c677bc 779 fp = file_find(fd, task);
e4f1d29f 780 if (fp) {
781 atomic_dec(&fp->f_ref);
782 if (atomic_read(&fp->f_ref) > 0) {
783 spin_unlock(&vn_file_lock);
784 return;
785 }
786
787 list_del(&fp->f_list);
788 releasef_locked(fp);
789 }
790 spin_unlock(&vn_file_lock);
791
792 return;
793} /* releasef() */
d3c677bc
RY
794EXPORT_SYMBOL(areleasef);
795
e4f1d29f 796
137af025
BB
797static void
798#ifdef HAVE_SET_FS_PWD_WITH_CONST
799vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
800#else
801vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
802#endif /* HAVE_SET_FS_PWD_WITH_CONST */
51a727e9 803{
9b2048c2
BB
804 struct path old_pwd;
805
137af025 806#ifdef HAVE_FS_STRUCT_SPINLOCK
9b2048c2
BB
807 spin_lock(&fs->lock);
808 old_pwd = fs->pwd;
809 fs->pwd = *path;
810 path_get(path);
811 spin_unlock(&fs->lock);
137af025 812#else
9b2048c2
BB
813 write_lock(&fs->lock);
814 old_pwd = fs->pwd;
815 fs->pwd = *path;
816 path_get(path);
817 write_unlock(&fs->lock);
137af025 818#endif /* HAVE_FS_STRUCT_SPINLOCK */
9b2048c2
BB
819
820 if (old_pwd.dentry)
821 path_put(&old_pwd);
51a727e9 822}
51a727e9
BB
823
824int
825vn_set_pwd(const char *filename)
826{
51a727e9 827 struct path path;
82a358d9 828 mm_segment_t saved_fs;
51a727e9 829 int rc;
51a727e9 830
82a358d9
BB
831 /*
832 * user_path_dir() and __user_walk() both expect 'filename' to be
833 * a user space address so we must briefly increase the data segment
834 * size to ensure strncpy_from_user() does not fail with -EFAULT.
835 */
836 saved_fs = get_fs();
837 set_fs(get_ds());
838
51a727e9
BB
839 rc = user_path_dir(filename, &path);
840 if (rc)
8d9a23e8 841 goto out;
51a727e9
BB
842
843 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
844 if (rc)
8d9a23e8 845 goto dput_and_out;
51a727e9 846
137af025 847 vn_set_fs_pwd(current->fs, &path);
51a727e9
BB
848
849dput_and_out:
850 path_put(&path);
51a727e9 851out:
82a358d9
BB
852 set_fs(saved_fs);
853
8d9a23e8 854 return (-rc);
51a727e9
BB
855} /* vn_set_pwd() */
856EXPORT_SYMBOL(vn_set_pwd);
857
af828292 858static int
859vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
860{
861 struct vnode *vp = buf;
862
863 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
864
865 return (0);
866} /* vn_cache_constructor() */
867
868static void
869vn_cache_destructor(void *buf, void *cdrarg)
870{
871 struct vnode *vp = buf;
872
873 mutex_destroy(&vp->v_lock);
874} /* vn_cache_destructor() */
875
e4f1d29f 876static int
877vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
878{
879 file_t *fp = buf;
880
881 atomic_set(&fp->f_ref, 0);
882 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
4e62fd41 883 INIT_LIST_HEAD(&fp->f_list);
e4f1d29f 884
885 return (0);
886} /* file_cache_constructor() */
887
888static void
889vn_file_cache_destructor(void *buf, void *cdrarg)
890{
891 file_t *fp = buf;
892
893 mutex_destroy(&fp->f_lock);
894} /* vn_file_cache_destructor() */
895
af828292 896int
12ff95ff 897spl_vn_init(void)
af828292 898{
57d86234 899 vn_cache = kmem_cache_create("spl_vn_cache",
900 sizeof(struct vnode), 64,
5d86345d 901 vn_cache_constructor,
902 vn_cache_destructor,
a5b40eed 903 NULL, NULL, NULL, KMC_KMEM);
e4f1d29f 904
905 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
906 sizeof(file_t), 64,
907 vn_file_cache_constructor,
908 vn_file_cache_destructor,
a5b40eed 909 NULL, NULL, NULL, KMC_KMEM);
8d9a23e8 910 return (0);
af828292 911} /* vn_init() */
912
913void
12ff95ff 914spl_vn_fini(void)
af828292 915{
e4f1d29f 916 file_t *fp, *next_fp;
2fb9b26a 917 int leaked = 0;
e4f1d29f 918
919 spin_lock(&vn_file_lock);
920
921 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
922 list_del(&fp->f_list);
923 releasef_locked(fp);
924 leaked++;
925 }
926
e4f1d29f 927 spin_unlock(&vn_file_lock);
928
929 if (leaked > 0)
8d9a23e8 930 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
e4f1d29f 931
2371321e 932 kmem_cache_destroy(vn_file_cache);
2fb9b26a 933 kmem_cache_destroy(vn_cache);
e4f1d29f 934
935 return;
af828292 936} /* vn_fini() */