]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-vnode.c
New upstream version 0.7.4
[mirror_spl-debian.git] / module / spl / spl-vnode.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25\*****************************************************************************/
715f6251 26
80093b6f 27#include <sys/cred.h>
4b171585 28#include <sys/vnode.h>
10946b02 29#include <sys/kmem_cache.h>
bbdc6ae4 30#include <linux/falloc.h>
10946b02 31#include <linux/file_compat.h>
937879f1 32
51a727e9 33vnode_t *rootdir = (vnode_t *)0xabcd1234;
4b171585 34EXPORT_SYMBOL(rootdir);
35
7afde631 36static spl_kmem_cache_t *vn_cache;
37static spl_kmem_cache_t *vn_file_cache;
e4f1d29f 38
83c623aa 39static DEFINE_SPINLOCK(vn_file_lock);
e4f1d29f 40static LIST_HEAD(vn_file_list);
af828292 41
4295b530
BB
42vtype_t
43vn_mode_to_vtype(mode_t mode)
4b171585 44{
45 if (S_ISREG(mode))
46 return VREG;
47
48 if (S_ISDIR(mode))
49 return VDIR;
50
51 if (S_ISCHR(mode))
52 return VCHR;
53
54 if (S_ISBLK(mode))
55 return VBLK;
56
57 if (S_ISFIFO(mode))
58 return VFIFO;
59
60 if (S_ISLNK(mode))
61 return VLNK;
62
63 if (S_ISSOCK(mode))
64 return VSOCK;
65
4b171585 66 return VNON;
4295b530
BB
67} /* vn_mode_to_vtype() */
68EXPORT_SYMBOL(vn_mode_to_vtype);
69
70mode_t
71vn_vtype_to_mode(vtype_t vtype)
72{
73 if (vtype == VREG)
74 return S_IFREG;
75
76 if (vtype == VDIR)
77 return S_IFDIR;
78
79 if (vtype == VCHR)
80 return S_IFCHR;
81
82 if (vtype == VBLK)
83 return S_IFBLK;
84
85 if (vtype == VFIFO)
86 return S_IFIFO;
87
88 if (vtype == VLNK)
89 return S_IFLNK;
90
91 if (vtype == VSOCK)
92 return S_IFSOCK;
93
94 return VNON;
95} /* vn_vtype_to_mode() */
96EXPORT_SYMBOL(vn_vtype_to_mode);
4b171585 97
af828292 98vnode_t *
99vn_alloc(int flag)
100{
101 vnode_t *vp;
102
103 vp = kmem_cache_alloc(vn_cache, flag);
af828292 104 if (vp != NULL) {
e4f1d29f 105 vp->v_file = NULL;
af828292 106 vp->v_type = 0;
107 }
108
10946b02 109 return (vp);
af828292 110} /* vn_alloc() */
111EXPORT_SYMBOL(vn_alloc);
112
113void
114vn_free(vnode_t *vp)
115{
116 kmem_cache_free(vn_cache, vp);
117} /* vn_free() */
118EXPORT_SYMBOL(vn_free);
119
0b3cf046 120int
af828292 121vn_open(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 122 vnode_t **vpp, int x1, void *x2)
0b3cf046 123{
f7e8739c
RC
124 struct file *fp;
125 struct kstat stat;
126 int rc, saved_umask = 0;
4be55565 127 gfp_t saved_gfp;
0b3cf046 128 vnode_t *vp;
0b3cf046 129
937879f1 130 ASSERT(flags & (FWRITE | FREAD));
131 ASSERT(seg == UIO_SYSSPACE);
132 ASSERT(vpp);
4b171585 133 *vpp = NULL;
134
135 if (!(flags & FCREAT) && (flags & FWRITE))
136 flags |= FEXCL;
137
728b9dd8 138 /* Note for filp_open() the two low bits must be remapped to mean:
139 * 01 - read-only -> 00 read-only
140 * 10 - write-only -> 01 write-only
141 * 11 - read-write -> 10 read-write
142 */
143 flags--;
0b3cf046 144
145 if (flags & FCREAT)
4b171585 146 saved_umask = xchg(&current->fs->umask, 0);
0b3cf046 147
f7e8739c 148 fp = filp_open(path, flags, mode);
0b3cf046 149
150 if (flags & FCREAT)
4b171585 151 (void)xchg(&current->fs->umask, saved_umask);
0b3cf046 152
f7e8739c 153 if (IS_ERR(fp))
10946b02 154 return (-PTR_ERR(fp));
0b3cf046 155
2ea56c1d
AX
156#if defined(HAVE_4ARGS_VFS_GETATTR)
157 rc = vfs_getattr(&fp->f_path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
158#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
159 rc = vfs_getattr(&fp->f_path, &stat);
160#else
bc90df66 161 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
2a305c34 162#endif
4b171585 163 if (rc) {
164 filp_close(fp, 0);
10946b02 165 return (-rc);
0b3cf046 166 }
167
af828292 168 vp = vn_alloc(KM_SLEEP);
4b171585 169 if (!vp) {
170 filp_close(fp, 0);
10946b02 171 return (ENOMEM);
4b171585 172 }
0b3cf046 173
4be55565
LW
174 saved_gfp = mapping_gfp_mask(fp->f_mapping);
175 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
176
e4f1d29f 177 mutex_enter(&vp->v_lock);
4295b530 178 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 179 vp->v_file = fp;
4be55565 180 vp->v_gfp_mask = saved_gfp;
4b171585 181 *vpp = vp;
e4f1d29f 182 mutex_exit(&vp->v_lock);
0b3cf046 183
10946b02 184 return (0);
4b171585 185} /* vn_open() */
186EXPORT_SYMBOL(vn_open);
0b3cf046 187
0b3cf046 188int
af828292 189vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
4b171585 190 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
0b3cf046 191{
4b171585 192 char *realpath;
12018327 193 int len, rc;
0b3cf046 194
937879f1 195 ASSERT(vp == rootdir);
0b3cf046 196
12018327 197 len = strlen(path) + 2;
10946b02 198 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
4b171585 199 if (!realpath)
10946b02 200 return (ENOMEM);
0b3cf046 201
12018327 202 (void)snprintf(realpath, len, "/%s", path);
4b171585 203 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
4b171585 204 kfree(realpath);
205
10946b02 206 return (rc);
4b171585 207} /* vn_openat() */
208EXPORT_SYMBOL(vn_openat);
0b3cf046 209
0b3cf046 210int
4b171585 211vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
663e02a1 212 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
0b3cf046 213{
99d6d8dc
AX
214 struct file *fp = vp->v_file;
215 loff_t offset = off;
4b171585 216 int rc;
217
937879f1 218 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
937879f1 219 ASSERT(seg == UIO_SYSSPACE);
663e02a1 220 ASSERT((ioflag & ~FAPPEND) == 0);
4b171585 221
663e02a1
RC
222 if (ioflag & FAPPEND)
223 offset = fp->f_pos;
224
4b171585 225 if (uio & UIO_WRITE)
99d6d8dc 226 rc = spl_kernel_write(fp, addr, len, &offset);
4b171585 227 else
99d6d8dc 228 rc = spl_kernel_read(fp, addr, len, &offset);
4b171585 229
f3989ed3 230 fp->f_pos = offset;
4b171585 231
232 if (rc < 0)
10946b02 233 return (-rc);
0b3cf046 234
4b171585 235 if (residp) {
236 *residp = len - rc;
0b3cf046 237 } else {
4b171585 238 if (rc != len)
10946b02 239 return (EIO);
0b3cf046 240 }
241
10946b02 242 return (0);
4b171585 243} /* vn_rdwr() */
244EXPORT_SYMBOL(vn_rdwr);
245
246int
2f5d55aa 247vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
4b171585 248{
249 int rc;
250
937879f1 251 ASSERT(vp);
252 ASSERT(vp->v_file);
4b171585 253
4be55565 254 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
97735c39
BB
255 rc = filp_close(vp->v_file, 0);
256 vn_free(vp);
4b171585 257
10946b02 258 return (-rc);
4b171585 259} /* vn_close() */
260EXPORT_SYMBOL(vn_close);
261
97735c39
BB
262/* vn_seek() does not actually seek it only performs bounds checking on the
263 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
264 * anything more serious. */
265int
47995fa6 266vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
97735c39
BB
267{
268 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
269}
270EXPORT_SYMBOL(vn_seek);
271
10946b02
AX
272/*
273 * spl_basename() takes a NULL-terminated string s as input containing a path.
274 * It returns a char pointer to a string and a length that describe the
275 * basename of the path. If the basename is not "." or "/", it will be an index
276 * into the string. While the string should be NULL terminated, the section
277 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
278 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
279 */
280static void
281spl_basename(const char *s, const char **str, int *len)
282{
283 size_t i, end;
284
285 ASSERT(str);
286 ASSERT(len);
287
288 if (!s || !*s) {
289 *str = ".";
290 *len = 1;
291 return;
292 }
293
294 i = strlen(s) - 1;
295
296 while (i && s[i--] == '/');
297
298 if (i == 0) {
299 *str = "/";
300 *len = 1;
301 return;
302 }
303
304 end = i;
305
306 for (end = i; i; i--) {
307 if (s[i] == '/') {
308 *str = &s[i+1];
309 *len = end - i + 1;
310 return;
311 }
312 }
313
314 *str = s;
315 *len = end + 1;
316}
317
318static struct dentry *
319spl_kern_path_locked(const char *name, struct path *path)
320{
321 struct path parent;
322 struct dentry *dentry;
323 const char *basename;
324 int len;
325 int rc;
326
327 ASSERT(name);
328 ASSERT(path);
329
330 spl_basename(name, &basename, &len);
331
332 /* We do not accept "." or ".." */
333 if (len <= 2 && basename[0] == '.')
334 if (len == 1 || basename[1] == '.')
335 return (ERR_PTR(-EACCES));
336
337 rc = kern_path(name, LOOKUP_PARENT, &parent);
338 if (rc)
339 return (ERR_PTR(rc));
340
0f836a62
AX
341 /* use I_MUTEX_PARENT because vfs_unlink needs it */
342 spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
10946b02
AX
343
344 dentry = lookup_one_len(basename, parent.dentry, len);
345 if (IS_ERR(dentry)) {
346 spl_inode_unlock(parent.dentry->d_inode);
347 path_put(&parent);
348 } else {
349 *path = parent;
350 }
351
352 return (dentry);
353}
354
bcb15891
YS
355/* Based on do_unlinkat() from linux/fs/namei.c */
356int
357vn_remove(const char *path, uio_seg_t seg, int flags)
358{
359 struct dentry *dentry;
360 struct path parent;
361 struct inode *inode = NULL;
362 int rc = 0;
bcb15891
YS
363
364 ASSERT(seg == UIO_SYSSPACE);
365 ASSERT(flags == RMFILE);
366
367 dentry = spl_kern_path_locked(path, &parent);
368 rc = PTR_ERR(dentry);
369 if (!IS_ERR(dentry)) {
10946b02
AX
370 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
371 rc = 0;
372 goto slashes;
373 }
bcb15891
YS
374
375 inode = dentry->d_inode;
10946b02
AX
376 if (inode) {
377 atomic_inc(&inode->i_count);
378 } else {
379 rc = 0;
380 goto slashes;
381 }
bcb15891 382
33a20369 383#ifdef HAVE_2ARGS_VFS_UNLINK
bcb15891 384 rc = vfs_unlink(parent.dentry->d_inode, dentry);
33a20369
LG
385#else
386 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
387#endif /* HAVE_2ARGS_VFS_UNLINK */
bcb15891
YS
388exit1:
389 dput(dentry);
053678f3
BB
390 } else {
391 return (-rc);
bcb15891
YS
392 }
393
394 spl_inode_unlock(parent.dentry->d_inode);
395 if (inode)
396 iput(inode); /* truncate the inode here */
397
398 path_put(&parent);
10946b02 399 return (-rc);
bcb15891
YS
400
401slashes:
402 rc = !dentry->d_inode ? -ENOENT :
403 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
10946b02 404 goto exit1;
bcb15891
YS
405} /* vn_remove() */
406EXPORT_SYMBOL(vn_remove);
407
408/* Based on do_rename() from linux/fs/namei.c */
409int
410vn_rename(const char *oldname, const char *newname, int x1)
411{
412 struct dentry *old_dir, *new_dir;
413 struct dentry *old_dentry, *new_dentry;
414 struct dentry *trap;
415 struct path old_parent, new_parent;
416 int rc = 0;
bcb15891
YS
417
418 old_dentry = spl_kern_path_locked(oldname, &old_parent);
10946b02
AX
419 if (IS_ERR(old_dentry)) {
420 rc = PTR_ERR(old_dentry);
421 goto exit;
422 }
bcb15891
YS
423
424 spl_inode_unlock(old_parent.dentry->d_inode);
425
426 new_dentry = spl_kern_path_locked(newname, &new_parent);
10946b02
AX
427 if (IS_ERR(new_dentry)) {
428 rc = PTR_ERR(new_dentry);
429 goto exit2;
430 }
bcb15891
YS
431
432 spl_inode_unlock(new_parent.dentry->d_inode);
433
434 rc = -EXDEV;
435 if (old_parent.mnt != new_parent.mnt)
10946b02 436 goto exit3;
bcb15891
YS
437
438 old_dir = old_parent.dentry;
439 new_dir = new_parent.dentry;
440 trap = lock_rename(new_dir, old_dir);
441
442 /* source should not be ancestor of target */
443 rc = -EINVAL;
444 if (old_dentry == trap)
10946b02 445 goto exit4;
bcb15891
YS
446
447 /* target should not be an ancestor of source */
448 rc = -ENOTEMPTY;
449 if (new_dentry == trap)
10946b02 450 goto exit4;
bcb15891
YS
451
452 /* source must exist */
453 rc = -ENOENT;
454 if (!old_dentry->d_inode)
10946b02 455 goto exit4;
bcb15891
YS
456
457 /* unless the source is a directory trailing slashes give -ENOTDIR */
458 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
459 rc = -ENOTDIR;
460 if (old_dentry->d_name.name[old_dentry->d_name.len])
10946b02 461 goto exit4;
bcb15891 462 if (new_dentry->d_name.name[new_dentry->d_name.len])
10946b02 463 goto exit4;
bcb15891
YS
464 }
465
9e4fb5c2 466#if defined(HAVE_4ARGS_VFS_RENAME)
bcb15891 467 rc = vfs_rename(old_dir->d_inode, old_dentry,
33a20369 468 new_dir->d_inode, new_dentry);
9e4fb5c2 469#elif defined(HAVE_5ARGS_VFS_RENAME)
33a20369
LG
470 rc = vfs_rename(old_dir->d_inode, old_dentry,
471 new_dir->d_inode, new_dentry, NULL);
9e4fb5c2
LG
472#else
473 rc = vfs_rename(old_dir->d_inode, old_dentry,
474 new_dir->d_inode, new_dentry, NULL, 0);
475#endif
bcb15891
YS
476exit4:
477 unlock_rename(new_dir, old_dir);
478exit3:
479 dput(new_dentry);
480 path_put(&new_parent);
481exit2:
482 dput(old_dentry);
483 path_put(&old_parent);
484exit:
10946b02 485 return (-rc);
0b3cf046 486}
4b171585 487EXPORT_SYMBOL(vn_rename);
0b3cf046 488
4b171585 489int
36e6f861 490vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
0b3cf046 491{
4b171585 492 struct file *fp;
dcd9cb5a 493 struct kstat stat;
4b171585 494 int rc;
495
937879f1 496 ASSERT(vp);
497 ASSERT(vp->v_file);
498 ASSERT(vap);
4b171585 499
e4f1d29f 500 fp = vp->v_file;
4b171585 501
2ea56c1d
AX
502#if defined(HAVE_4ARGS_VFS_GETATTR)
503 rc = vfs_getattr(&fp->f_path, &stat, STATX_BASIC_STATS,
504 AT_STATX_SYNC_AS_STAT);
505#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
506 rc = vfs_getattr(&fp->f_path, &stat);
507#else
508 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
509#endif
4b171585 510 if (rc)
10946b02 511 return (-rc);
4b171585 512
4295b530 513 vap->va_type = vn_mode_to_vtype(stat.mode);
4b171585 514 vap->va_mode = stat.mode;
80093b6f
AX
515 vap->va_uid = KUID_TO_SUID(stat.uid);
516 vap->va_gid = KGID_TO_SGID(stat.gid);
4b171585 517 vap->va_fsid = 0;
518 vap->va_nodeid = stat.ino;
519 vap->va_nlink = stat.nlink;
520 vap->va_size = stat.size;
47995fa6 521 vap->va_blksize = stat.blksize;
dcd9cb5a
BB
522 vap->va_atime = stat.atime;
523 vap->va_mtime = stat.mtime;
524 vap->va_ctime = stat.ctime;
4b171585 525 vap->va_rdev = stat.rdev;
dcd9cb5a 526 vap->va_nblocks = stat.blocks;
4b171585 527
10946b02 528 return (0);
0b3cf046 529}
4b171585 530EXPORT_SYMBOL(vn_getattr);
531
2f5d55aa 532int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
4b171585 533{
36e6f861 534 int datasync = 0;
10946b02
AX
535 int error;
536 int fstrans;
36e6f861 537
937879f1 538 ASSERT(vp);
539 ASSERT(vp->v_file);
4b171585 540
36e6f861 541 if (flags & FDSYNC)
542 datasync = 1;
543
10946b02
AX
544 /*
545 * May enter XFS which generates a warning when PF_FSTRANS is set.
546 * To avoid this the flag is cleared over vfs_sync() and then reset.
547 */
2ea56c1d 548 fstrans = __spl_pf_fstrans_check();
10946b02 549 if (fstrans)
2ea56c1d 550 current->flags &= ~(__SPL_PF_FSTRANS);
10946b02
AX
551
552 error = -spl_filp_fsync(vp->v_file, datasync);
553 if (fstrans)
2ea56c1d 554 current->flags |= __SPL_PF_FSTRANS;
10946b02
AX
555
556 return (error);
4b171585 557} /* vn_fsync() */
558EXPORT_SYMBOL(vn_fsync);
af828292 559
bbdc6ae4
ED
560int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
561 offset_t offset, void *x6, void *x7)
562{
563 int error = EOPNOTSUPP;
2ea56c1d
AX
564#ifdef FALLOC_FL_PUNCH_HOLE
565 int fstrans;
566#endif
bbdc6ae4
ED
567
568 if (cmd != F_FREESP || bfp->l_whence != 0)
10946b02 569 return (EOPNOTSUPP);
bbdc6ae4
ED
570
571 ASSERT(vp);
572 ASSERT(vp->v_file);
573 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
574
575#ifdef FALLOC_FL_PUNCH_HOLE
2ea56c1d
AX
576 /*
577 * May enter XFS which generates a warning when PF_FSTRANS is set.
578 * To avoid this the flag is cleared over vfs_sync() and then reset.
579 */
580 fstrans = __spl_pf_fstrans_check();
581 if (fstrans)
582 current->flags &= ~(__SPL_PF_FSTRANS);
583
1c7b3eaf
BB
584 /*
585 * When supported by the underlying file system preferentially
586 * use the fallocate() callback to preallocate the space.
587 */
588 error = -spl_filp_fallocate(vp->v_file,
589 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
590 bfp->l_start, bfp->l_len);
2ea56c1d
AX
591
592 if (fstrans)
593 current->flags |= __SPL_PF_FSTRANS;
594
1c7b3eaf 595 if (error == 0)
10946b02 596 return (0);
bbdc6ae4
ED
597#endif
598
599#ifdef HAVE_INODE_TRUNCATE_RANGE
600 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
601 vp->v_file->f_dentry->d_inode->i_op &&
602 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
603 off_t end = bfp->l_start + bfp->l_len;
604 /*
605 * Judging from the code in shmem_truncate_range(),
606 * it seems the kernel expects the end offset to be
607 * inclusive and aligned to the end of a page.
608 */
609 if (end % PAGE_SIZE != 0) {
610 end &= ~(off_t)(PAGE_SIZE - 1);
611 if (end <= bfp->l_start)
10946b02 612 return (0);
bbdc6ae4
ED
613 }
614 --end;
615
616 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
617 vp->v_file->f_dentry->d_inode,
618 bfp->l_start, end
619 );
10946b02 620 return (0);
bbdc6ae4
ED
621 }
622#endif
623
10946b02 624 return (error);
bbdc6ae4
ED
625}
626EXPORT_SYMBOL(vn_space);
627
e4f1d29f 628/* Function must be called while holding the vn_file_lock */
629static file_t *
f6188ddd 630file_find(int fd, struct task_struct *task)
e4f1d29f 631{
632 file_t *fp;
633
937879f1 634 ASSERT(spin_is_locked(&vn_file_lock));
e4f1d29f 635
636 list_for_each_entry(fp, &vn_file_list, f_list) {
f6188ddd 637 if (fd == fp->f_fd && fp->f_task == task) {
937879f1 638 ASSERT(atomic_read(&fp->f_ref) != 0);
e4f1d29f 639 return fp;
640 }
641 }
642
643 return NULL;
644} /* file_find() */
645
646file_t *
647vn_getf(int fd)
648{
649 struct kstat stat;
650 struct file *lfp;
651 file_t *fp;
652 vnode_t *vp;
937879f1 653 int rc = 0;
e4f1d29f 654
f6188ddd
AX
655 if (fd < 0)
656 return (NULL);
657
e4f1d29f 658 /* Already open just take an extra reference */
659 spin_lock(&vn_file_lock);
660
f6188ddd 661 fp = file_find(fd, current);
e4f1d29f 662 if (fp) {
ec06701b
AX
663 lfp = fget(fd);
664 fput(fp->f_file);
665 /*
666 * areleasef() can cause us to see a stale reference when
667 * userspace has reused a file descriptor before areleasef()
668 * has run. fput() the stale reference and replace it. We
669 * retain the original reference count such that the concurrent
670 * areleasef() will decrement its reference and terminate.
671 */
672 if (lfp != fp->f_file) {
673 fp->f_file = lfp;
674 fp->f_vnode->v_file = lfp;
675 }
e4f1d29f 676 atomic_inc(&fp->f_ref);
677 spin_unlock(&vn_file_lock);
10946b02 678 return (fp);
e4f1d29f 679 }
680
681 spin_unlock(&vn_file_lock);
682
683 /* File was not yet opened create the object and setup */
4afaaefa 684 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
e4f1d29f 685 if (fp == NULL)
10946b02 686 goto out;
e4f1d29f 687
688 mutex_enter(&fp->f_lock);
689
690 fp->f_fd = fd;
763b2f3b 691 fp->f_task = current;
e4f1d29f 692 fp->f_offset = 0;
693 atomic_inc(&fp->f_ref);
694
695 lfp = fget(fd);
696 if (lfp == NULL)
10946b02 697 goto out_mutex;
e4f1d29f 698
699 vp = vn_alloc(KM_SLEEP);
700 if (vp == NULL)
10946b02 701 goto out_fget;
e4f1d29f 702
2ea56c1d
AX
703#if defined(HAVE_4ARGS_VFS_GETATTR)
704 rc = vfs_getattr(&lfp->f_path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
705#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
706 rc = vfs_getattr(&lfp->f_path, &stat);
707#else
708 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
709#endif
710 if (rc)
10946b02 711 goto out_vnode;
e4f1d29f 712
713 mutex_enter(&vp->v_lock);
4295b530 714 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 715 vp->v_file = lfp;
716 mutex_exit(&vp->v_lock);
717
718 fp->f_vnode = vp;
719 fp->f_file = lfp;
720
721 /* Put it on the tracking list */
722 spin_lock(&vn_file_lock);
723 list_add(&fp->f_list, &vn_file_list);
724 spin_unlock(&vn_file_lock);
725
726 mutex_exit(&fp->f_lock);
10946b02 727 return (fp);
e4f1d29f 728
729out_vnode:
e4f1d29f 730 vn_free(vp);
731out_fget:
e4f1d29f 732 fput(lfp);
733out_mutex:
e4f1d29f 734 mutex_exit(&fp->f_lock);
735 kmem_cache_free(vn_file_cache, fp);
736out:
10946b02 737 return (NULL);
e4f1d29f 738} /* getf() */
739EXPORT_SYMBOL(getf);
740
741static void releasef_locked(file_t *fp)
742{
937879f1 743 ASSERT(fp->f_file);
744 ASSERT(fp->f_vnode);
e4f1d29f 745
746 /* Unlinked from list, no refs, safe to free outside mutex */
747 fput(fp->f_file);
748 vn_free(fp->f_vnode);
749
750 kmem_cache_free(vn_file_cache, fp);
751}
752
753void
754vn_releasef(int fd)
f6188ddd
AX
755{
756 areleasef(fd, P_FINFO(current));
757}
758EXPORT_SYMBOL(releasef);
759
760void
761vn_areleasef(int fd, uf_info_t *fip)
e4f1d29f 762{
763 file_t *fp;
f6188ddd
AX
764 struct task_struct *task = (struct task_struct *)fip;
765
766 if (fd < 0)
767 return;
e4f1d29f 768
769 spin_lock(&vn_file_lock);
f6188ddd 770 fp = file_find(fd, task);
e4f1d29f 771 if (fp) {
772 atomic_dec(&fp->f_ref);
773 if (atomic_read(&fp->f_ref) > 0) {
774 spin_unlock(&vn_file_lock);
775 return;
776 }
777
778 list_del(&fp->f_list);
779 releasef_locked(fp);
780 }
781 spin_unlock(&vn_file_lock);
782
783 return;
784} /* releasef() */
f6188ddd
AX
785EXPORT_SYMBOL(areleasef);
786
e4f1d29f 787
10946b02
AX
788static void
789#ifdef HAVE_SET_FS_PWD_WITH_CONST
790vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
791#else
792vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
793#endif /* HAVE_SET_FS_PWD_WITH_CONST */
51a727e9 794{
9b2048c2
BB
795 struct path old_pwd;
796
10946b02 797#ifdef HAVE_FS_STRUCT_SPINLOCK
9b2048c2
BB
798 spin_lock(&fs->lock);
799 old_pwd = fs->pwd;
800 fs->pwd = *path;
801 path_get(path);
802 spin_unlock(&fs->lock);
10946b02 803#else
9b2048c2
BB
804 write_lock(&fs->lock);
805 old_pwd = fs->pwd;
806 fs->pwd = *path;
807 path_get(path);
808 write_unlock(&fs->lock);
10946b02 809#endif /* HAVE_FS_STRUCT_SPINLOCK */
9b2048c2
BB
810
811 if (old_pwd.dentry)
812 path_put(&old_pwd);
51a727e9 813}
51a727e9
BB
814
815int
816vn_set_pwd(const char *filename)
817{
51a727e9 818 struct path path;
82a358d9 819 mm_segment_t saved_fs;
51a727e9 820 int rc;
51a727e9 821
82a358d9
BB
822 /*
823 * user_path_dir() and __user_walk() both expect 'filename' to be
824 * a user space address so we must briefly increase the data segment
825 * size to ensure strncpy_from_user() does not fail with -EFAULT.
826 */
827 saved_fs = get_fs();
828 set_fs(get_ds());
829
51a727e9
BB
830 rc = user_path_dir(filename, &path);
831 if (rc)
10946b02 832 goto out;
51a727e9
BB
833
834 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
835 if (rc)
10946b02 836 goto dput_and_out;
51a727e9 837
10946b02 838 vn_set_fs_pwd(current->fs, &path);
51a727e9
BB
839
840dput_and_out:
841 path_put(&path);
51a727e9 842out:
82a358d9
BB
843 set_fs(saved_fs);
844
10946b02 845 return (-rc);
51a727e9
BB
846} /* vn_set_pwd() */
847EXPORT_SYMBOL(vn_set_pwd);
848
af828292 849static int
850vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
851{
852 struct vnode *vp = buf;
853
854 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
855
856 return (0);
857} /* vn_cache_constructor() */
858
859static void
860vn_cache_destructor(void *buf, void *cdrarg)
861{
862 struct vnode *vp = buf;
863
864 mutex_destroy(&vp->v_lock);
865} /* vn_cache_destructor() */
866
e4f1d29f 867static int
868vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
869{
870 file_t *fp = buf;
871
872 atomic_set(&fp->f_ref, 0);
873 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
4e62fd41 874 INIT_LIST_HEAD(&fp->f_list);
e4f1d29f 875
876 return (0);
877} /* file_cache_constructor() */
878
879static void
880vn_file_cache_destructor(void *buf, void *cdrarg)
881{
882 file_t *fp = buf;
883
884 mutex_destroy(&fp->f_lock);
885} /* vn_file_cache_destructor() */
886
af828292 887int
12ff95ff 888spl_vn_init(void)
af828292 889{
57d86234 890 vn_cache = kmem_cache_create("spl_vn_cache",
891 sizeof(struct vnode), 64,
5d86345d 892 vn_cache_constructor,
893 vn_cache_destructor,
ac9cc135 894 NULL, NULL, NULL, 0);
e4f1d29f 895
896 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
897 sizeof(file_t), 64,
898 vn_file_cache_constructor,
899 vn_file_cache_destructor,
ac9cc135 900 NULL, NULL, NULL, 0);
10946b02 901 return (0);
af828292 902} /* vn_init() */
903
904void
12ff95ff 905spl_vn_fini(void)
af828292 906{
e4f1d29f 907 file_t *fp, *next_fp;
2fb9b26a 908 int leaked = 0;
e4f1d29f 909
910 spin_lock(&vn_file_lock);
911
912 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
913 list_del(&fp->f_list);
914 releasef_locked(fp);
915 leaked++;
916 }
917
e4f1d29f 918 spin_unlock(&vn_file_lock);
919
920 if (leaked > 0)
10946b02 921 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
e4f1d29f 922
2371321e 923 kmem_cache_destroy(vn_file_cache);
2fb9b26a 924 kmem_cache_destroy(vn_cache);
e4f1d29f 925
926 return;
af828292 927} /* vn_fini() */