]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-vnode.c
New upstream version 0.7.11
[mirror_spl-debian.git] / module / spl / spl-vnode.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
34ee731f 23 *
716154c5
BB
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25\*****************************************************************************/
715f6251 26
80093b6f 27#include <sys/cred.h>
4b171585 28#include <sys/vnode.h>
10946b02 29#include <sys/kmem_cache.h>
bbdc6ae4 30#include <linux/falloc.h>
10946b02 31#include <linux/file_compat.h>
937879f1 32
51a727e9 33vnode_t *rootdir = (vnode_t *)0xabcd1234;
4b171585 34EXPORT_SYMBOL(rootdir);
35
7afde631 36static spl_kmem_cache_t *vn_cache;
37static spl_kmem_cache_t *vn_file_cache;
e4f1d29f 38
83c623aa 39static DEFINE_SPINLOCK(vn_file_lock);
e4f1d29f 40static LIST_HEAD(vn_file_list);
af828292 41
4295b530
BB
42vtype_t
43vn_mode_to_vtype(mode_t mode)
4b171585 44{
45 if (S_ISREG(mode))
34ee731f 46 return (VREG);
4b171585 47
48 if (S_ISDIR(mode))
34ee731f 49 return (VDIR);
4b171585 50
51 if (S_ISCHR(mode))
34ee731f 52 return (VCHR);
4b171585 53
54 if (S_ISBLK(mode))
34ee731f 55 return (VBLK);
4b171585 56
57 if (S_ISFIFO(mode))
34ee731f 58 return (VFIFO);
4b171585 59
60 if (S_ISLNK(mode))
34ee731f 61 return (VLNK);
4b171585 62
63 if (S_ISSOCK(mode))
34ee731f 64 return (VSOCK);
4b171585 65
34ee731f 66 return (VNON);
4295b530
BB
67} /* vn_mode_to_vtype() */
68EXPORT_SYMBOL(vn_mode_to_vtype);
69
70mode_t
71vn_vtype_to_mode(vtype_t vtype)
72{
73 if (vtype == VREG)
34ee731f 74 return (S_IFREG);
4295b530
BB
75
76 if (vtype == VDIR)
34ee731f 77 return (S_IFDIR);
4295b530
BB
78
79 if (vtype == VCHR)
34ee731f 80 return (S_IFCHR);
4295b530
BB
81
82 if (vtype == VBLK)
34ee731f 83 return (S_IFBLK);
4295b530
BB
84
85 if (vtype == VFIFO)
34ee731f 86 return (S_IFIFO);
4295b530
BB
87
88 if (vtype == VLNK)
34ee731f 89 return (S_IFLNK);
4295b530
BB
90
91 if (vtype == VSOCK)
34ee731f 92 return (S_IFSOCK);
4295b530 93
34ee731f 94 return (VNON);
4295b530
BB
95} /* vn_vtype_to_mode() */
96EXPORT_SYMBOL(vn_vtype_to_mode);
4b171585 97
af828292 98vnode_t *
99vn_alloc(int flag)
100{
101 vnode_t *vp;
102
103 vp = kmem_cache_alloc(vn_cache, flag);
af828292 104 if (vp != NULL) {
e4f1d29f 105 vp->v_file = NULL;
af828292 106 vp->v_type = 0;
107 }
108
10946b02 109 return (vp);
af828292 110} /* vn_alloc() */
111EXPORT_SYMBOL(vn_alloc);
112
113void
114vn_free(vnode_t *vp)
115{
116 kmem_cache_free(vn_cache, vp);
117} /* vn_free() */
118EXPORT_SYMBOL(vn_free);
119
0b3cf046 120int
34ee731f
AX
121vn_open(const char *path, uio_seg_t seg, int flags, int mode, vnode_t **vpp,
122 int x1, void *x2)
0b3cf046 123{
f7e8739c
RC
124 struct file *fp;
125 struct kstat stat;
126 int rc, saved_umask = 0;
4be55565 127 gfp_t saved_gfp;
0b3cf046 128 vnode_t *vp;
0b3cf046 129
937879f1 130 ASSERT(flags & (FWRITE | FREAD));
131 ASSERT(seg == UIO_SYSSPACE);
132 ASSERT(vpp);
4b171585 133 *vpp = NULL;
134
135 if (!(flags & FCREAT) && (flags & FWRITE))
136 flags |= FEXCL;
137
34ee731f
AX
138 /*
139 * Note for filp_open() the two low bits must be remapped to mean:
728b9dd8 140 * 01 - read-only -> 00 read-only
141 * 10 - write-only -> 01 write-only
142 * 11 - read-write -> 10 read-write
143 */
144 flags--;
0b3cf046 145
146 if (flags & FCREAT)
4b171585 147 saved_umask = xchg(&current->fs->umask, 0);
0b3cf046 148
f7e8739c 149 fp = filp_open(path, flags, mode);
0b3cf046 150
151 if (flags & FCREAT)
34ee731f 152 (void) xchg(&current->fs->umask, saved_umask);
0b3cf046 153
f7e8739c 154 if (IS_ERR(fp))
10946b02 155 return (-PTR_ERR(fp));
0b3cf046 156
2ea56c1d
AX
157#if defined(HAVE_4ARGS_VFS_GETATTR)
158 rc = vfs_getattr(&fp->f_path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
159#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
160 rc = vfs_getattr(&fp->f_path, &stat);
161#else
bc90df66 162 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
2a305c34 163#endif
4b171585 164 if (rc) {
165 filp_close(fp, 0);
10946b02 166 return (-rc);
0b3cf046 167 }
168
af828292 169 vp = vn_alloc(KM_SLEEP);
4b171585 170 if (!vp) {
171 filp_close(fp, 0);
10946b02 172 return (ENOMEM);
4b171585 173 }
0b3cf046 174
4be55565
LW
175 saved_gfp = mapping_gfp_mask(fp->f_mapping);
176 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
177
e4f1d29f 178 mutex_enter(&vp->v_lock);
4295b530 179 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 180 vp->v_file = fp;
4be55565 181 vp->v_gfp_mask = saved_gfp;
4b171585 182 *vpp = vp;
e4f1d29f 183 mutex_exit(&vp->v_lock);
0b3cf046 184
10946b02 185 return (0);
4b171585 186} /* vn_open() */
187EXPORT_SYMBOL(vn_open);
0b3cf046 188
0b3cf046 189int
af828292 190vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
34ee731f 191 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
0b3cf046 192{
4b171585 193 char *realpath;
12018327 194 int len, rc;
0b3cf046 195
937879f1 196 ASSERT(vp == rootdir);
0b3cf046 197
12018327 198 len = strlen(path) + 2;
10946b02 199 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
4b171585 200 if (!realpath)
10946b02 201 return (ENOMEM);
0b3cf046 202
34ee731f 203 (void) snprintf(realpath, len, "/%s", path);
4b171585 204 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
4b171585 205 kfree(realpath);
206
10946b02 207 return (rc);
4b171585 208} /* vn_openat() */
209EXPORT_SYMBOL(vn_openat);
0b3cf046 210
0b3cf046 211int
4b171585 212vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
34ee731f 213 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
0b3cf046 214{
99d6d8dc
AX
215 struct file *fp = vp->v_file;
216 loff_t offset = off;
4b171585 217 int rc;
218
937879f1 219 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
937879f1 220 ASSERT(seg == UIO_SYSSPACE);
663e02a1 221 ASSERT((ioflag & ~FAPPEND) == 0);
4b171585 222
663e02a1
RC
223 if (ioflag & FAPPEND)
224 offset = fp->f_pos;
225
4b171585 226 if (uio & UIO_WRITE)
99d6d8dc 227 rc = spl_kernel_write(fp, addr, len, &offset);
4b171585 228 else
99d6d8dc 229 rc = spl_kernel_read(fp, addr, len, &offset);
4b171585 230
f3989ed3 231 fp->f_pos = offset;
4b171585 232
233 if (rc < 0)
10946b02 234 return (-rc);
0b3cf046 235
4b171585 236 if (residp) {
237 *residp = len - rc;
0b3cf046 238 } else {
4b171585 239 if (rc != len)
10946b02 240 return (EIO);
0b3cf046 241 }
242
10946b02 243 return (0);
4b171585 244} /* vn_rdwr() */
245EXPORT_SYMBOL(vn_rdwr);
246
247int
2f5d55aa 248vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
4b171585 249{
250 int rc;
251
937879f1 252 ASSERT(vp);
253 ASSERT(vp->v_file);
4b171585 254
4be55565 255 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
97735c39
BB
256 rc = filp_close(vp->v_file, 0);
257 vn_free(vp);
4b171585 258
10946b02 259 return (-rc);
4b171585 260} /* vn_close() */
261EXPORT_SYMBOL(vn_close);
262
34ee731f
AX
263/*
264 * vn_seek() does not actually seek it only performs bounds checking on the
97735c39 265 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
34ee731f
AX
266 * anything more serious.
267 */
97735c39 268int
47995fa6 269vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
97735c39
BB
270{
271 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
272}
273EXPORT_SYMBOL(vn_seek);
274
4b171585 275int
36e6f861 276vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
0b3cf046 277{
4b171585 278 struct file *fp;
dcd9cb5a 279 struct kstat stat;
4b171585 280 int rc;
281
937879f1 282 ASSERT(vp);
283 ASSERT(vp->v_file);
284 ASSERT(vap);
4b171585 285
e4f1d29f 286 fp = vp->v_file;
4b171585 287
2ea56c1d
AX
288#if defined(HAVE_4ARGS_VFS_GETATTR)
289 rc = vfs_getattr(&fp->f_path, &stat, STATX_BASIC_STATS,
290 AT_STATX_SYNC_AS_STAT);
291#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
292 rc = vfs_getattr(&fp->f_path, &stat);
293#else
294 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
295#endif
4b171585 296 if (rc)
10946b02 297 return (-rc);
4b171585 298
34ee731f
AX
299 vap->va_type = vn_mode_to_vtype(stat.mode);
300 vap->va_mode = stat.mode;
301 vap->va_uid = KUID_TO_SUID(stat.uid);
302 vap->va_gid = KGID_TO_SGID(stat.gid);
303 vap->va_fsid = 0;
304 vap->va_nodeid = stat.ino;
305 vap->va_nlink = stat.nlink;
306 vap->va_size = stat.size;
307 vap->va_blksize = stat.blksize;
308 vap->va_atime = stat.atime;
309 vap->va_mtime = stat.mtime;
310 vap->va_ctime = stat.ctime;
311 vap->va_rdev = stat.rdev;
312 vap->va_nblocks = stat.blocks;
4b171585 313
10946b02 314 return (0);
0b3cf046 315}
4b171585 316EXPORT_SYMBOL(vn_getattr);
317
34ee731f
AX
318int
319vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
4b171585 320{
36e6f861 321 int datasync = 0;
10946b02
AX
322 int error;
323 int fstrans;
36e6f861 324
937879f1 325 ASSERT(vp);
326 ASSERT(vp->v_file);
4b171585 327
36e6f861 328 if (flags & FDSYNC)
329 datasync = 1;
330
10946b02
AX
331 /*
332 * May enter XFS which generates a warning when PF_FSTRANS is set.
333 * To avoid this the flag is cleared over vfs_sync() and then reset.
334 */
2ea56c1d 335 fstrans = __spl_pf_fstrans_check();
10946b02 336 if (fstrans)
2ea56c1d 337 current->flags &= ~(__SPL_PF_FSTRANS);
10946b02
AX
338
339 error = -spl_filp_fsync(vp->v_file, datasync);
340 if (fstrans)
2ea56c1d 341 current->flags |= __SPL_PF_FSTRANS;
10946b02
AX
342
343 return (error);
4b171585 344} /* vn_fsync() */
345EXPORT_SYMBOL(vn_fsync);
af828292 346
bbdc6ae4
ED
347int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
348 offset_t offset, void *x6, void *x7)
349{
350 int error = EOPNOTSUPP;
2ea56c1d
AX
351#ifdef FALLOC_FL_PUNCH_HOLE
352 int fstrans;
353#endif
bbdc6ae4
ED
354
355 if (cmd != F_FREESP || bfp->l_whence != 0)
10946b02 356 return (EOPNOTSUPP);
bbdc6ae4
ED
357
358 ASSERT(vp);
359 ASSERT(vp->v_file);
360 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
361
362#ifdef FALLOC_FL_PUNCH_HOLE
2ea56c1d
AX
363 /*
364 * May enter XFS which generates a warning when PF_FSTRANS is set.
365 * To avoid this the flag is cleared over vfs_sync() and then reset.
366 */
367 fstrans = __spl_pf_fstrans_check();
368 if (fstrans)
369 current->flags &= ~(__SPL_PF_FSTRANS);
370
1c7b3eaf
BB
371 /*
372 * When supported by the underlying file system preferentially
373 * use the fallocate() callback to preallocate the space.
374 */
375 error = -spl_filp_fallocate(vp->v_file,
376 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
377 bfp->l_start, bfp->l_len);
2ea56c1d
AX
378
379 if (fstrans)
380 current->flags |= __SPL_PF_FSTRANS;
381
1c7b3eaf 382 if (error == 0)
10946b02 383 return (0);
bbdc6ae4
ED
384#endif
385
386#ifdef HAVE_INODE_TRUNCATE_RANGE
387 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
388 vp->v_file->f_dentry->d_inode->i_op &&
389 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
390 off_t end = bfp->l_start + bfp->l_len;
391 /*
392 * Judging from the code in shmem_truncate_range(),
393 * it seems the kernel expects the end offset to be
394 * inclusive and aligned to the end of a page.
395 */
396 if (end % PAGE_SIZE != 0) {
397 end &= ~(off_t)(PAGE_SIZE - 1);
398 if (end <= bfp->l_start)
10946b02 399 return (0);
bbdc6ae4
ED
400 }
401 --end;
402
403 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
34ee731f
AX
404 vp->v_file->f_dentry->d_inode, bfp->l_start, end);
405
10946b02 406 return (0);
bbdc6ae4
ED
407 }
408#endif
409
10946b02 410 return (error);
bbdc6ae4
ED
411}
412EXPORT_SYMBOL(vn_space);
413
e4f1d29f 414/* Function must be called while holding the vn_file_lock */
415static file_t *
f6188ddd 416file_find(int fd, struct task_struct *task)
e4f1d29f 417{
34ee731f 418 file_t *fp;
e4f1d29f 419
34ee731f 420 list_for_each_entry(fp, &vn_file_list, f_list) {
f6188ddd 421 if (fd == fp->f_fd && fp->f_task == task) {
937879f1 422 ASSERT(atomic_read(&fp->f_ref) != 0);
34ee731f 423 return (fp);
e4f1d29f 424 }
425 }
426
34ee731f 427 return (NULL);
e4f1d29f 428} /* file_find() */
429
430file_t *
431vn_getf(int fd)
432{
34ee731f 433 struct kstat stat;
e4f1d29f 434 struct file *lfp;
435 file_t *fp;
436 vnode_t *vp;
937879f1 437 int rc = 0;
e4f1d29f 438
f6188ddd
AX
439 if (fd < 0)
440 return (NULL);
441
e4f1d29f 442 /* Already open just take an extra reference */
443 spin_lock(&vn_file_lock);
444
f6188ddd 445 fp = file_find(fd, current);
e4f1d29f 446 if (fp) {
ec06701b
AX
447 lfp = fget(fd);
448 fput(fp->f_file);
449 /*
450 * areleasef() can cause us to see a stale reference when
451 * userspace has reused a file descriptor before areleasef()
452 * has run. fput() the stale reference and replace it. We
453 * retain the original reference count such that the concurrent
454 * areleasef() will decrement its reference and terminate.
455 */
456 if (lfp != fp->f_file) {
457 fp->f_file = lfp;
458 fp->f_vnode->v_file = lfp;
459 }
e4f1d29f 460 atomic_inc(&fp->f_ref);
461 spin_unlock(&vn_file_lock);
10946b02 462 return (fp);
e4f1d29f 463 }
464
465 spin_unlock(&vn_file_lock);
466
467 /* File was not yet opened create the object and setup */
4afaaefa 468 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
e4f1d29f 469 if (fp == NULL)
10946b02 470 goto out;
e4f1d29f 471
472 mutex_enter(&fp->f_lock);
473
474 fp->f_fd = fd;
763b2f3b 475 fp->f_task = current;
e4f1d29f 476 fp->f_offset = 0;
477 atomic_inc(&fp->f_ref);
478
479 lfp = fget(fd);
480 if (lfp == NULL)
10946b02 481 goto out_mutex;
e4f1d29f 482
483 vp = vn_alloc(KM_SLEEP);
484 if (vp == NULL)
10946b02 485 goto out_fget;
e4f1d29f 486
2ea56c1d 487#if defined(HAVE_4ARGS_VFS_GETATTR)
34ee731f
AX
488 rc = vfs_getattr(&lfp->f_path, &stat, STATX_TYPE,
489 AT_STATX_SYNC_AS_STAT);
2ea56c1d 490#elif defined(HAVE_2ARGS_VFS_GETATTR)
2a305c34
RY
491 rc = vfs_getattr(&lfp->f_path, &stat);
492#else
493 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
494#endif
34ee731f 495 if (rc)
10946b02 496 goto out_vnode;
e4f1d29f 497
498 mutex_enter(&vp->v_lock);
4295b530 499 vp->v_type = vn_mode_to_vtype(stat.mode);
e4f1d29f 500 vp->v_file = lfp;
501 mutex_exit(&vp->v_lock);
502
503 fp->f_vnode = vp;
504 fp->f_file = lfp;
505
506 /* Put it on the tracking list */
507 spin_lock(&vn_file_lock);
508 list_add(&fp->f_list, &vn_file_list);
509 spin_unlock(&vn_file_lock);
510
511 mutex_exit(&fp->f_lock);
10946b02 512 return (fp);
e4f1d29f 513
514out_vnode:
e4f1d29f 515 vn_free(vp);
516out_fget:
e4f1d29f 517 fput(lfp);
518out_mutex:
e4f1d29f 519 mutex_exit(&fp->f_lock);
520 kmem_cache_free(vn_file_cache, fp);
521out:
34ee731f 522 return (NULL);
e4f1d29f 523} /* getf() */
524EXPORT_SYMBOL(getf);
525
526static void releasef_locked(file_t *fp)
527{
937879f1 528 ASSERT(fp->f_file);
529 ASSERT(fp->f_vnode);
e4f1d29f 530
531 /* Unlinked from list, no refs, safe to free outside mutex */
532 fput(fp->f_file);
533 vn_free(fp->f_vnode);
534
535 kmem_cache_free(vn_file_cache, fp);
536}
537
538void
539vn_releasef(int fd)
f6188ddd
AX
540{
541 areleasef(fd, P_FINFO(current));
542}
543EXPORT_SYMBOL(releasef);
544
545void
546vn_areleasef(int fd, uf_info_t *fip)
e4f1d29f 547{
548 file_t *fp;
f6188ddd
AX
549 struct task_struct *task = (struct task_struct *)fip;
550
551 if (fd < 0)
552 return;
e4f1d29f 553
554 spin_lock(&vn_file_lock);
f6188ddd 555 fp = file_find(fd, task);
e4f1d29f 556 if (fp) {
557 atomic_dec(&fp->f_ref);
558 if (atomic_read(&fp->f_ref) > 0) {
559 spin_unlock(&vn_file_lock);
560 return;
561 }
562
34ee731f 563 list_del(&fp->f_list);
e4f1d29f 564 releasef_locked(fp);
565 }
566 spin_unlock(&vn_file_lock);
e4f1d29f 567} /* releasef() */
f6188ddd
AX
568EXPORT_SYMBOL(areleasef);
569
e4f1d29f 570
10946b02
AX
571static void
572#ifdef HAVE_SET_FS_PWD_WITH_CONST
573vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
574#else
575vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
576#endif /* HAVE_SET_FS_PWD_WITH_CONST */
51a727e9 577{
9b2048c2
BB
578 struct path old_pwd;
579
10946b02 580#ifdef HAVE_FS_STRUCT_SPINLOCK
9b2048c2
BB
581 spin_lock(&fs->lock);
582 old_pwd = fs->pwd;
583 fs->pwd = *path;
584 path_get(path);
585 spin_unlock(&fs->lock);
10946b02 586#else
9b2048c2
BB
587 write_lock(&fs->lock);
588 old_pwd = fs->pwd;
589 fs->pwd = *path;
590 path_get(path);
591 write_unlock(&fs->lock);
10946b02 592#endif /* HAVE_FS_STRUCT_SPINLOCK */
9b2048c2
BB
593
594 if (old_pwd.dentry)
595 path_put(&old_pwd);
51a727e9 596}
51a727e9
BB
597
598int
599vn_set_pwd(const char *filename)
600{
34ee731f
AX
601 struct path path;
602 mm_segment_t saved_fs;
603 int rc;
604
605 /*
606 * user_path_dir() and __user_walk() both expect 'filename' to be
607 * a user space address so we must briefly increase the data segment
608 * size to ensure strncpy_from_user() does not fail with -EFAULT.
609 */
610 saved_fs = get_fs();
611 set_fs(get_ds());
612
613 rc = user_path_dir(filename, &path);
614 if (rc)
10946b02 615 goto out;
51a727e9 616
34ee731f
AX
617 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
618 if (rc)
10946b02 619 goto dput_and_out;
51a727e9 620
34ee731f 621 vn_set_fs_pwd(current->fs, &path);
51a727e9
BB
622
623dput_and_out:
34ee731f 624 path_put(&path);
51a727e9 625out:
82a358d9
BB
626 set_fs(saved_fs);
627
34ee731f 628 return (-rc);
51a727e9
BB
629} /* vn_set_pwd() */
630EXPORT_SYMBOL(vn_set_pwd);
631
af828292 632static int
633vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
634{
635 struct vnode *vp = buf;
636
637 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
638
639 return (0);
640} /* vn_cache_constructor() */
641
642static void
643vn_cache_destructor(void *buf, void *cdrarg)
644{
645 struct vnode *vp = buf;
646
647 mutex_destroy(&vp->v_lock);
648} /* vn_cache_destructor() */
649
e4f1d29f 650static int
651vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
652{
653 file_t *fp = buf;
654
655 atomic_set(&fp->f_ref, 0);
34ee731f 656 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
4e62fd41 657 INIT_LIST_HEAD(&fp->f_list);
e4f1d29f 658
34ee731f
AX
659 return (0);
660} /* vn_file_cache_constructor() */
e4f1d29f 661
662static void
663vn_file_cache_destructor(void *buf, void *cdrarg)
664{
665 file_t *fp = buf;
666
667 mutex_destroy(&fp->f_lock);
668} /* vn_file_cache_destructor() */
669
af828292 670int
12ff95ff 671spl_vn_init(void)
af828292 672{
b97c7791
MZ
673 spin_lock_init(&vn_file_lock);
674
57d86234 675 vn_cache = kmem_cache_create("spl_vn_cache",
34ee731f
AX
676 sizeof (struct vnode), 64, vn_cache_constructor,
677 vn_cache_destructor, NULL, NULL, NULL, 0);
e4f1d29f 678
679 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
34ee731f
AX
680 sizeof (file_t), 64, vn_file_cache_constructor,
681 vn_file_cache_destructor, NULL, NULL, NULL, 0);
682
10946b02 683 return (0);
34ee731f 684} /* spl_vn_init() */
af828292 685
686void
12ff95ff 687spl_vn_fini(void)
af828292 688{
34ee731f 689 file_t *fp, *next_fp;
2fb9b26a 690 int leaked = 0;
e4f1d29f 691
692 spin_lock(&vn_file_lock);
693
34ee731f
AX
694 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
695 list_del(&fp->f_list);
e4f1d29f 696 releasef_locked(fp);
697 leaked++;
698 }
699
e4f1d29f 700 spin_unlock(&vn_file_lock);
701
702 if (leaked > 0)
10946b02 703 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
e4f1d29f 704
2371321e 705 kmem_cache_destroy(vn_file_cache);
2fb9b26a 706 kmem_cache_destroy(vn_cache);
34ee731f 707} /* spl_vn_fini() */