]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-vnode.c
Fix more cstyle warnings
[mirror_spl.git] / module / spl / spl-vnode.c
CommitLineData
4b393c50 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
5461eefe 23 *
716154c5 24 * Solaris Porting Layer (SPL) Vnode Implementation.
4b393c50 25 */
715f6251 26
f7fd6ddd 27#include <sys/cred.h>
4b171585 28#include <sys/vnode.h>
e5b9b344 29#include <sys/kmem_cache.h>
bbdc6ae4 30#include <linux/falloc.h>
52479ecf 31#include <linux/file_compat.h>
937879f1 32
51a727e9 33vnode_t *rootdir = (vnode_t *)0xabcd1234;
4b171585 34EXPORT_SYMBOL(rootdir);
35
7afde631 36static spl_kmem_cache_t *vn_cache;
37static spl_kmem_cache_t *vn_file_cache;
e4f1d29f 38
83c623aa 39static DEFINE_SPINLOCK(vn_file_lock);
e4f1d29f 40static LIST_HEAD(vn_file_list);
af828292 41
4295b530
BB
42vtype_t
43vn_mode_to_vtype(mode_t mode)
4b171585 44{
45 if (S_ISREG(mode))
5461eefe 46 return (VREG);
4b171585 47
48 if (S_ISDIR(mode))
5461eefe 49 return (VDIR);
4b171585 50
51 if (S_ISCHR(mode))
5461eefe 52 return (VCHR);
4b171585 53
54 if (S_ISBLK(mode))
5461eefe 55 return (VBLK);
4b171585 56
57 if (S_ISFIFO(mode))
5461eefe 58 return (VFIFO);
4b171585 59
60 if (S_ISLNK(mode))
5461eefe 61 return (VLNK);
4b171585 62
63 if (S_ISSOCK(mode))
5461eefe 64 return (VSOCK);
4b171585 65
5461eefe 66 return (VNON);
4295b530
BB
67} /* vn_mode_to_vtype() */
68EXPORT_SYMBOL(vn_mode_to_vtype);
69
70mode_t
71vn_vtype_to_mode(vtype_t vtype)
72{
73 if (vtype == VREG)
5461eefe 74 return (S_IFREG);
4295b530
BB
75
76 if (vtype == VDIR)
5461eefe 77 return (S_IFDIR);
4295b530
BB
78
79 if (vtype == VCHR)
5461eefe 80 return (S_IFCHR);
4295b530
BB
81
82 if (vtype == VBLK)
5461eefe 83 return (S_IFBLK);
4295b530
BB
84
85 if (vtype == VFIFO)
5461eefe 86 return (S_IFIFO);
4295b530
BB
87
88 if (vtype == VLNK)
5461eefe 89 return (S_IFLNK);
4295b530
BB
90
91 if (vtype == VSOCK)
5461eefe 92 return (S_IFSOCK);
4295b530 93
5461eefe 94 return (VNON);
4295b530
BB
95} /* vn_vtype_to_mode() */
96EXPORT_SYMBOL(vn_vtype_to_mode);
4b171585 97
af828292 98vnode_t *
99vn_alloc(int flag)
100{
101 vnode_t *vp;
102
103 vp = kmem_cache_alloc(vn_cache, flag);
af828292 104 if (vp != NULL) {
e4f1d29f 105 vp->v_file = NULL;
af828292 106 vp->v_type = 0;
107 }
108
8d9a23e8 109 return (vp);
af828292 110} /* vn_alloc() */
111EXPORT_SYMBOL(vn_alloc);
112
113void
114vn_free(vnode_t *vp)
115{
116 kmem_cache_free(vn_cache, vp);
117} /* vn_free() */
118EXPORT_SYMBOL(vn_free);
119
0b3cf046 120int
3673d032
BB
121vn_open(const char *path, uio_seg_t seg, int flags, int mode, vnode_t **vpp,
122 int x1, void *x2)
0b3cf046 123{
f7e8739c
RC
124 struct file *fp;
125 struct kstat stat;
126 int rc, saved_umask = 0;
4be55565 127 gfp_t saved_gfp;
0b3cf046 128 vnode_t *vp;
0b3cf046 129
937879f1 130 ASSERT(flags & (FWRITE | FREAD));
131 ASSERT(seg == UIO_SYSSPACE);
132 ASSERT(vpp);
4b171585 133 *vpp = NULL;
134
135 if (!(flags & FCREAT) && (flags & FWRITE))
136 flags |= FEXCL;
137
5461eefe
BB
138 /*
139 * Note for filp_open() the two low bits must be remapped to mean:
728b9dd8 140 * 01 - read-only -> 00 read-only
141 * 10 - write-only -> 01 write-only
142 * 11 - read-write -> 10 read-write
143 */
144 flags--;
0b3cf046 145
146 if (flags & FCREAT)
4b171585 147 saved_umask = xchg(&current->fs->umask, 0);
0b3cf046 148
f7e8739c 149 fp = filp_open(path, flags, mode);
0b3cf046 150
151 if (flags & FCREAT)
5461eefe 152 (void) xchg(&current->fs->umask, saved_umask);
0b3cf046 153
f7e8739c 154 if (IS_ERR(fp))
8d9a23e8 155 return (-PTR_ERR(fp));
0b3cf046 156
94b1ab2a
OF
157#if defined(HAVE_4ARGS_VFS_GETATTR)
158 rc = vfs_getattr(&fp->f_path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
159#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
160 rc = vfs_getattr(&fp->f_path, &stat);
161#else
bc90df66 162 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
2a305c34 163#endif
4b171585 164 if (rc) {
165 filp_close(fp, 0);
8d9a23e8 166 return (-rc);
0b3cf046 167 }
168
af828292 169 vp = vn_alloc(KM_SLEEP);
4b171585 170 if (!vp) {
171 filp_close(fp, 0);
8d9a23e8 172 return (ENOMEM);
4b171585 173 }
0b3cf046 174
4be55565
LW
175 saved_gfp = mapping_gfp_mask(fp->f_mapping);
176 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
177
e4f1d29f 178 mutex_enter(&vp->v_lock);
4295b530 179 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 180 vp->v_file = fp;
4be55565 181 vp->v_gfp_mask = saved_gfp;
4b171585 182 *vpp = vp;
e4f1d29f 183 mutex_exit(&vp->v_lock);
0b3cf046 184
8d9a23e8 185 return (0);
4b171585 186} /* vn_open() */
187EXPORT_SYMBOL(vn_open);
0b3cf046 188
0b3cf046 189int
af828292 190vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
5461eefe 191 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
0b3cf046 192{
4b171585 193 char *realpath;
12018327 194 int len, rc;
0b3cf046 195
937879f1 196 ASSERT(vp == rootdir);
0b3cf046 197
12018327 198 len = strlen(path) + 2;
54cccfc2 199 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
4b171585 200 if (!realpath)
8d9a23e8 201 return (ENOMEM);
0b3cf046 202
5461eefe 203 (void) snprintf(realpath, len, "/%s", path);
4b171585 204 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
4b171585 205 kfree(realpath);
206
8d9a23e8 207 return (rc);
4b171585 208} /* vn_openat() */
209EXPORT_SYMBOL(vn_openat);
0b3cf046 210
0b3cf046 211int
4b171585 212vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
3673d032 213 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
0b3cf046 214{
ed19bccf
BB
215 struct file *fp = vp->v_file;
216 loff_t offset = off;
4b171585 217 int rc;
218
937879f1 219 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
937879f1 220 ASSERT(seg == UIO_SYSSPACE);
663e02a1 221 ASSERT((ioflag & ~FAPPEND) == 0);
4b171585 222
663e02a1
RC
223 if (ioflag & FAPPEND)
224 offset = fp->f_pos;
225
4b171585 226 if (uio & UIO_WRITE)
ed19bccf 227 rc = spl_kernel_write(fp, addr, len, &offset);
4b171585 228 else
ed19bccf 229 rc = spl_kernel_read(fp, addr, len, &offset);
4b171585 230
f3989ed3 231 fp->f_pos = offset;
4b171585 232
233 if (rc < 0)
8d9a23e8 234 return (-rc);
0b3cf046 235
4b171585 236 if (residp) {
237 *residp = len - rc;
0b3cf046 238 } else {
4b171585 239 if (rc != len)
8d9a23e8 240 return (EIO);
0b3cf046 241 }
242
8d9a23e8 243 return (0);
4b171585 244} /* vn_rdwr() */
245EXPORT_SYMBOL(vn_rdwr);
246
247int
2f5d55aa 248vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
4b171585 249{
250 int rc;
251
937879f1 252 ASSERT(vp);
253 ASSERT(vp->v_file);
4b171585 254
4be55565 255 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
97735c39
BB
256 rc = filp_close(vp->v_file, 0);
257 vn_free(vp);
4b171585 258
8d9a23e8 259 return (-rc);
4b171585 260} /* vn_close() */
261EXPORT_SYMBOL(vn_close);
262
5461eefe
BB
263/*
264 * vn_seek() does not actually seek it only performs bounds checking on the
97735c39 265 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
5461eefe
BB
266 * anything more serious.
267 */
97735c39 268int
47995fa6 269vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
97735c39
BB
270{
271 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
272}
273EXPORT_SYMBOL(vn_seek);
274
4b171585 275int
36e6f861 276vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
0b3cf046 277{
4b171585 278 struct file *fp;
dcd9cb5a 279 struct kstat stat;
4b171585 280 int rc;
281
937879f1 282 ASSERT(vp);
283 ASSERT(vp->v_file);
284 ASSERT(vap);
4b171585 285
e4f1d29f 286 fp = vp->v_file;
4b171585 287
94b1ab2a
OF
288#if defined(HAVE_4ARGS_VFS_GETATTR)
289 rc = vfs_getattr(&fp->f_path, &stat, STATX_BASIC_STATS,
290 AT_STATX_SYNC_AS_STAT);
291#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
292 rc = vfs_getattr(&fp->f_path, &stat);
293#else
294 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
295#endif
4b171585 296 if (rc)
8d9a23e8 297 return (-rc);
4b171585 298
5461eefe
BB
299 vap->va_type = vn_mode_to_vtype(stat.mode);
300 vap->va_mode = stat.mode;
301 vap->va_uid = KUID_TO_SUID(stat.uid);
302 vap->va_gid = KGID_TO_SGID(stat.gid);
303 vap->va_fsid = 0;
304 vap->va_nodeid = stat.ino;
305 vap->va_nlink = stat.nlink;
306 vap->va_size = stat.size;
307 vap->va_blksize = stat.blksize;
308 vap->va_atime = stat.atime;
309 vap->va_mtime = stat.mtime;
310 vap->va_ctime = stat.ctime;
311 vap->va_rdev = stat.rdev;
312 vap->va_nblocks = stat.blocks;
4b171585 313
8d9a23e8 314 return (0);
0b3cf046 315}
4b171585 316EXPORT_SYMBOL(vn_getattr);
317
5461eefe
BB
318int
319vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
4b171585 320{
36e6f861 321 int datasync = 0;
2a5d574e
BB
322 int error;
323 int fstrans;
36e6f861 324
937879f1 325 ASSERT(vp);
326 ASSERT(vp->v_file);
4b171585 327
36e6f861 328 if (flags & FDSYNC)
329 datasync = 1;
330
2a5d574e
BB
331 /*
332 * May enter XFS which generates a warning when PF_FSTRANS is set.
333 * To avoid this the flag is cleared over vfs_sync() and then reset.
334 */
8f87971e 335 fstrans = __spl_pf_fstrans_check();
2a5d574e 336 if (fstrans)
8f87971e 337 current->flags &= ~(__SPL_PF_FSTRANS);
2a5d574e
BB
338
339 error = -spl_filp_fsync(vp->v_file, datasync);
340 if (fstrans)
8f87971e 341 current->flags |= __SPL_PF_FSTRANS;
2a5d574e
BB
342
343 return (error);
4b171585 344} /* vn_fsync() */
345EXPORT_SYMBOL(vn_fsync);
af828292 346
bbdc6ae4
ED
347int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
348 offset_t offset, void *x6, void *x7)
349{
350 int error = EOPNOTSUPP;
ea2633ad
TC
351#ifdef FALLOC_FL_PUNCH_HOLE
352 int fstrans;
353#endif
bbdc6ae4
ED
354
355 if (cmd != F_FREESP || bfp->l_whence != 0)
8d9a23e8 356 return (EOPNOTSUPP);
bbdc6ae4
ED
357
358 ASSERT(vp);
359 ASSERT(vp->v_file);
360 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
361
362#ifdef FALLOC_FL_PUNCH_HOLE
ea2633ad
TC
363 /*
364 * May enter XFS which generates a warning when PF_FSTRANS is set.
365 * To avoid this the flag is cleared over vfs_sync() and then reset.
366 */
8f87971e 367 fstrans = __spl_pf_fstrans_check();
ea2633ad 368 if (fstrans)
8f87971e 369 current->flags &= ~(__SPL_PF_FSTRANS);
ea2633ad 370
1c7b3eaf
BB
371 /*
372 * When supported by the underlying file system preferentially
373 * use the fallocate() callback to preallocate the space.
374 */
375 error = -spl_filp_fallocate(vp->v_file,
376 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
377 bfp->l_start, bfp->l_len);
ea2633ad
TC
378
379 if (fstrans)
8f87971e 380 current->flags |= __SPL_PF_FSTRANS;
ea2633ad 381
1c7b3eaf 382 if (error == 0)
8d9a23e8 383 return (0);
bbdc6ae4
ED
384#endif
385
386#ifdef HAVE_INODE_TRUNCATE_RANGE
387 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
388 vp->v_file->f_dentry->d_inode->i_op &&
389 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
390 off_t end = bfp->l_start + bfp->l_len;
391 /*
392 * Judging from the code in shmem_truncate_range(),
393 * it seems the kernel expects the end offset to be
394 * inclusive and aligned to the end of a page.
395 */
396 if (end % PAGE_SIZE != 0) {
397 end &= ~(off_t)(PAGE_SIZE - 1);
398 if (end <= bfp->l_start)
8d9a23e8 399 return (0);
bbdc6ae4
ED
400 }
401 --end;
402
403 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
3673d032
BB
404 vp->v_file->f_dentry->d_inode, bfp->l_start, end);
405
8d9a23e8 406 return (0);
bbdc6ae4
ED
407 }
408#endif
409
8d9a23e8 410 return (error);
bbdc6ae4
ED
411}
412EXPORT_SYMBOL(vn_space);
413
e4f1d29f 414/* Function must be called while holding the vn_file_lock */
415static file_t *
d3c677bc 416file_find(int fd, struct task_struct *task)
e4f1d29f 417{
5461eefe 418 file_t *fp;
e4f1d29f 419
5461eefe 420 list_for_each_entry(fp, &vn_file_list, f_list) {
d3c677bc 421 if (fd == fp->f_fd && fp->f_task == task) {
937879f1 422 ASSERT(atomic_read(&fp->f_ref) != 0);
5461eefe 423 return (fp);
e4f1d29f 424 }
425 }
426
5461eefe 427 return (NULL);
e4f1d29f 428} /* file_find() */
429
430file_t *
431vn_getf(int fd)
432{
5461eefe 433 struct kstat stat;
e4f1d29f 434 struct file *lfp;
435 file_t *fp;
436 vnode_t *vp;
937879f1 437 int rc = 0;
e4f1d29f 438
313b1ea6
RY
439 if (fd < 0)
440 return (NULL);
441
e4f1d29f 442 /* Already open just take an extra reference */
443 spin_lock(&vn_file_lock);
444
d3c677bc 445 fp = file_find(fd, current);
e4f1d29f 446 if (fp) {
1683e75e
RY
447 lfp = fget(fd);
448 fput(fp->f_file);
449 /*
450 * areleasef() can cause us to see a stale reference when
451 * userspace has reused a file descriptor before areleasef()
452 * has run. fput() the stale reference and replace it. We
453 * retain the original reference count such that the concurrent
454 * areleasef() will decrement its reference and terminate.
455 */
456 if (lfp != fp->f_file) {
457 fp->f_file = lfp;
458 fp->f_vnode->v_file = lfp;
459 }
e4f1d29f 460 atomic_inc(&fp->f_ref);
461 spin_unlock(&vn_file_lock);
8d9a23e8 462 return (fp);
e4f1d29f 463 }
464
465 spin_unlock(&vn_file_lock);
466
467 /* File was not yet opened create the object and setup */
4afaaefa 468 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
e4f1d29f 469 if (fp == NULL)
8d9a23e8 470 goto out;
e4f1d29f 471
472 mutex_enter(&fp->f_lock);
473
474 fp->f_fd = fd;
763b2f3b 475 fp->f_task = current;
e4f1d29f 476 fp->f_offset = 0;
477 atomic_inc(&fp->f_ref);
478
479 lfp = fget(fd);
480 if (lfp == NULL)
8d9a23e8 481 goto out_mutex;
e4f1d29f 482
483 vp = vn_alloc(KM_SLEEP);
484 if (vp == NULL)
8d9a23e8 485 goto out_fget;
e4f1d29f 486
94b1ab2a 487#if defined(HAVE_4ARGS_VFS_GETATTR)
5461eefe
BB
488 rc = vfs_getattr(&lfp->f_path, &stat, STATX_TYPE,
489 AT_STATX_SYNC_AS_STAT);
94b1ab2a 490#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
491 rc = vfs_getattr(&lfp->f_path, &stat);
492#else
493 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
494#endif
5461eefe 495 if (rc)
8d9a23e8 496 goto out_vnode;
e4f1d29f 497
498 mutex_enter(&vp->v_lock);
4295b530 499 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 500 vp->v_file = lfp;
501 mutex_exit(&vp->v_lock);
502
503 fp->f_vnode = vp;
504 fp->f_file = lfp;
505
506 /* Put it on the tracking list */
507 spin_lock(&vn_file_lock);
508 list_add(&fp->f_list, &vn_file_list);
509 spin_unlock(&vn_file_lock);
510
511 mutex_exit(&fp->f_lock);
8d9a23e8 512 return (fp);
e4f1d29f 513
514out_vnode:
e4f1d29f 515 vn_free(vp);
516out_fget:
e4f1d29f 517 fput(lfp);
518out_mutex:
e4f1d29f 519 mutex_exit(&fp->f_lock);
520 kmem_cache_free(vn_file_cache, fp);
521out:
5461eefe 522 return (NULL);
e4f1d29f 523} /* getf() */
524EXPORT_SYMBOL(getf);
525
526static void releasef_locked(file_t *fp)
527{
937879f1 528 ASSERT(fp->f_file);
529 ASSERT(fp->f_vnode);
e4f1d29f 530
531 /* Unlinked from list, no refs, safe to free outside mutex */
532 fput(fp->f_file);
533 vn_free(fp->f_vnode);
534
535 kmem_cache_free(vn_file_cache, fp);
536}
537
538void
539vn_releasef(int fd)
d3c677bc
RY
540{
541 areleasef(fd, P_FINFO(current));
542}
543EXPORT_SYMBOL(releasef);
544
545void
546vn_areleasef(int fd, uf_info_t *fip)
e4f1d29f 547{
548 file_t *fp;
d3c677bc 549 struct task_struct *task = (struct task_struct *)fip;
e4f1d29f 550
313b1ea6
RY
551 if (fd < 0)
552 return;
553
e4f1d29f 554 spin_lock(&vn_file_lock);
d3c677bc 555 fp = file_find(fd, task);
e4f1d29f 556 if (fp) {
557 atomic_dec(&fp->f_ref);
558 if (atomic_read(&fp->f_ref) > 0) {
559 spin_unlock(&vn_file_lock);
560 return;
561 }
562
5461eefe 563 list_del(&fp->f_list);
e4f1d29f 564 releasef_locked(fp);
565 }
566 spin_unlock(&vn_file_lock);
e4f1d29f 567} /* releasef() */
d3c677bc
RY
568EXPORT_SYMBOL(areleasef);
569
e4f1d29f 570
137af025
BB
571static void
572#ifdef HAVE_SET_FS_PWD_WITH_CONST
573vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
574#else
575vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
576#endif /* HAVE_SET_FS_PWD_WITH_CONST */
51a727e9 577{
9b2048c2
BB
578 struct path old_pwd;
579
137af025 580#ifdef HAVE_FS_STRUCT_SPINLOCK
9b2048c2
BB
581 spin_lock(&fs->lock);
582 old_pwd = fs->pwd;
583 fs->pwd = *path;
584 path_get(path);
585 spin_unlock(&fs->lock);
137af025 586#else
9b2048c2
BB
587 write_lock(&fs->lock);
588 old_pwd = fs->pwd;
589 fs->pwd = *path;
590 path_get(path);
591 write_unlock(&fs->lock);
137af025 592#endif /* HAVE_FS_STRUCT_SPINLOCK */
9b2048c2
BB
593
594 if (old_pwd.dentry)
595 path_put(&old_pwd);
51a727e9 596}
51a727e9
BB
597
598int
599vn_set_pwd(const char *filename)
600{
5461eefe
BB
601 struct path path;
602 mm_segment_t saved_fs;
603 int rc;
604
605 /*
606 * user_path_dir() and __user_walk() both expect 'filename' to be
607 * a user space address so we must briefly increase the data segment
608 * size to ensure strncpy_from_user() does not fail with -EFAULT.
609 */
610 saved_fs = get_fs();
611 set_fs(get_ds());
612
613 rc = user_path_dir(filename, &path);
614 if (rc)
8d9a23e8 615 goto out;
51a727e9 616
5461eefe
BB
617 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
618 if (rc)
8d9a23e8 619 goto dput_and_out;
51a727e9 620
5461eefe 621 vn_set_fs_pwd(current->fs, &path);
51a727e9
BB
622
623dput_and_out:
5461eefe 624 path_put(&path);
51a727e9 625out:
82a358d9
BB
626 set_fs(saved_fs);
627
5461eefe 628 return (-rc);
51a727e9
BB
629} /* vn_set_pwd() */
630EXPORT_SYMBOL(vn_set_pwd);
631
af828292 632static int
633vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
634{
635 struct vnode *vp = buf;
636
637 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
638
639 return (0);
640} /* vn_cache_constructor() */
641
642static void
643vn_cache_destructor(void *buf, void *cdrarg)
644{
645 struct vnode *vp = buf;
646
647 mutex_destroy(&vp->v_lock);
648} /* vn_cache_destructor() */
649
e4f1d29f 650static int
651vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
652{
653 file_t *fp = buf;
654
655 atomic_set(&fp->f_ref, 0);
5461eefe 656 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
4e62fd41 657 INIT_LIST_HEAD(&fp->f_list);
e4f1d29f 658
5461eefe 659 return (0);
378c6ed5 660} /* vn_file_cache_constructor() */
e4f1d29f 661
662static void
663vn_file_cache_destructor(void *buf, void *cdrarg)
664{
665 file_t *fp = buf;
666
667 mutex_destroy(&fp->f_lock);
668} /* vn_file_cache_destructor() */
669
af828292 670int
12ff95ff 671spl_vn_init(void)
af828292 672{
57d86234 673 vn_cache = kmem_cache_create("spl_vn_cache",
5461eefe
BB
674 sizeof (struct vnode), 64, vn_cache_constructor,
675 vn_cache_destructor, NULL, NULL, NULL, 0);
e4f1d29f 676
677 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
5461eefe
BB
678 sizeof (file_t), 64, vn_file_cache_constructor,
679 vn_file_cache_destructor, NULL, NULL, NULL, 0);
680
8d9a23e8 681 return (0);
378c6ed5 682} /* spl_vn_init() */
af828292 683
684void
12ff95ff 685spl_vn_fini(void)
af828292 686{
5461eefe 687 file_t *fp, *next_fp;
2fb9b26a 688 int leaked = 0;
e4f1d29f 689
690 spin_lock(&vn_file_lock);
691
5461eefe
BB
692 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
693 list_del(&fp->f_list);
e4f1d29f 694 releasef_locked(fp);
695 leaked++;
696 }
697
e4f1d29f 698 spin_unlock(&vn_file_lock);
699
700 if (leaked > 0)
8d9a23e8 701 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
e4f1d29f 702
2371321e 703 kmem_cache_destroy(vn_file_cache);
2fb9b26a 704 kmem_cache_destroy(vn_cache);
378c6ed5 705} /* spl_vn_fini() */