]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-vnode.c
Refactor existing code
[mirror_spl.git] / module / spl / spl-vnode.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
26
27 #include <sys/cred.h>
28 #include <sys/vnode.h>
29 #include <sys/kmem_cache.h>
30 #include <linux/falloc.h>
31 #include <linux/file_compat.h>
32
33 vnode_t *rootdir = (vnode_t *)0xabcd1234;
34 EXPORT_SYMBOL(rootdir);
35
36 static spl_kmem_cache_t *vn_cache;
37 static spl_kmem_cache_t *vn_file_cache;
38
39 static DEFINE_SPINLOCK(vn_file_lock);
40 static LIST_HEAD(vn_file_list);
41
42 vtype_t
43 vn_mode_to_vtype(mode_t mode)
44 {
45 if (S_ISREG(mode))
46 return VREG;
47
48 if (S_ISDIR(mode))
49 return VDIR;
50
51 if (S_ISCHR(mode))
52 return VCHR;
53
54 if (S_ISBLK(mode))
55 return VBLK;
56
57 if (S_ISFIFO(mode))
58 return VFIFO;
59
60 if (S_ISLNK(mode))
61 return VLNK;
62
63 if (S_ISSOCK(mode))
64 return VSOCK;
65
66 if (S_ISCHR(mode))
67 return VCHR;
68
69 return VNON;
70 } /* vn_mode_to_vtype() */
71 EXPORT_SYMBOL(vn_mode_to_vtype);
72
73 mode_t
74 vn_vtype_to_mode(vtype_t vtype)
75 {
76 if (vtype == VREG)
77 return S_IFREG;
78
79 if (vtype == VDIR)
80 return S_IFDIR;
81
82 if (vtype == VCHR)
83 return S_IFCHR;
84
85 if (vtype == VBLK)
86 return S_IFBLK;
87
88 if (vtype == VFIFO)
89 return S_IFIFO;
90
91 if (vtype == VLNK)
92 return S_IFLNK;
93
94 if (vtype == VSOCK)
95 return S_IFSOCK;
96
97 return VNON;
98 } /* vn_vtype_to_mode() */
99 EXPORT_SYMBOL(vn_vtype_to_mode);
100
101 vnode_t *
102 vn_alloc(int flag)
103 {
104 vnode_t *vp;
105
106 vp = kmem_cache_alloc(vn_cache, flag);
107 if (vp != NULL) {
108 vp->v_file = NULL;
109 vp->v_type = 0;
110 }
111
112 return (vp);
113 } /* vn_alloc() */
114 EXPORT_SYMBOL(vn_alloc);
115
116 void
117 vn_free(vnode_t *vp)
118 {
119 kmem_cache_free(vn_cache, vp);
120 } /* vn_free() */
121 EXPORT_SYMBOL(vn_free);
122
123 int
124 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
125 vnode_t **vpp, int x1, void *x2)
126 {
127 struct file *fp;
128 struct kstat stat;
129 int rc, saved_umask = 0;
130 gfp_t saved_gfp;
131 vnode_t *vp;
132
133 ASSERT(flags & (FWRITE | FREAD));
134 ASSERT(seg == UIO_SYSSPACE);
135 ASSERT(vpp);
136 *vpp = NULL;
137
138 if (!(flags & FCREAT) && (flags & FWRITE))
139 flags |= FEXCL;
140
141 /* Note for filp_open() the two low bits must be remapped to mean:
142 * 01 - read-only -> 00 read-only
143 * 10 - write-only -> 01 write-only
144 * 11 - read-write -> 10 read-write
145 */
146 flags--;
147
148 if (flags & FCREAT)
149 saved_umask = xchg(&current->fs->umask, 0);
150
151 fp = filp_open(path, flags, mode);
152
153 if (flags & FCREAT)
154 (void)xchg(&current->fs->umask, saved_umask);
155
156 if (IS_ERR(fp))
157 return (-PTR_ERR(fp));
158
159 #ifdef HAVE_2ARGS_VFS_GETATTR
160 rc = vfs_getattr(&fp->f_path, &stat);
161 #else
162 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
163 #endif
164 if (rc) {
165 filp_close(fp, 0);
166 return (-rc);
167 }
168
169 vp = vn_alloc(KM_SLEEP);
170 if (!vp) {
171 filp_close(fp, 0);
172 return (ENOMEM);
173 }
174
175 saved_gfp = mapping_gfp_mask(fp->f_mapping);
176 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
177
178 mutex_enter(&vp->v_lock);
179 vp->v_type = vn_mode_to_vtype(stat.mode);
180 vp->v_file = fp;
181 vp->v_gfp_mask = saved_gfp;
182 *vpp = vp;
183 mutex_exit(&vp->v_lock);
184
185 return (0);
186 } /* vn_open() */
187 EXPORT_SYMBOL(vn_open);
188
189 int
190 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
191 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
192 {
193 char *realpath;
194 int len, rc;
195
196 ASSERT(vp == rootdir);
197
198 len = strlen(path) + 2;
199 realpath = kmalloc(len, GFP_KERNEL);
200 if (!realpath)
201 return (ENOMEM);
202
203 (void)snprintf(realpath, len, "/%s", path);
204 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
205 kfree(realpath);
206
207 return (rc);
208 } /* vn_openat() */
209 EXPORT_SYMBOL(vn_openat);
210
211 int
212 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
213 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
214 {
215 loff_t offset;
216 mm_segment_t saved_fs;
217 struct file *fp;
218 int rc;
219
220 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
221 ASSERT(vp);
222 ASSERT(vp->v_file);
223 ASSERT(seg == UIO_SYSSPACE);
224 ASSERT((ioflag & ~FAPPEND) == 0);
225 ASSERT(x2 == RLIM64_INFINITY);
226
227 fp = vp->v_file;
228
229 offset = off;
230 if (ioflag & FAPPEND)
231 offset = fp->f_pos;
232
233 /* Writable user data segment must be briefly increased for this
234 * process so we can use the user space read call paths to write
235 * in to memory allocated by the kernel. */
236 saved_fs = get_fs();
237 set_fs(get_ds());
238
239 if (uio & UIO_WRITE)
240 rc = vfs_write(fp, addr, len, &offset);
241 else
242 rc = vfs_read(fp, addr, len, &offset);
243
244 set_fs(saved_fs);
245 fp->f_pos = offset;
246
247 if (rc < 0)
248 return (-rc);
249
250 if (residp) {
251 *residp = len - rc;
252 } else {
253 if (rc != len)
254 return (EIO);
255 }
256
257 return (0);
258 } /* vn_rdwr() */
259 EXPORT_SYMBOL(vn_rdwr);
260
261 int
262 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
263 {
264 int rc;
265
266 ASSERT(vp);
267 ASSERT(vp->v_file);
268
269 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
270 rc = filp_close(vp->v_file, 0);
271 vn_free(vp);
272
273 return (-rc);
274 } /* vn_close() */
275 EXPORT_SYMBOL(vn_close);
276
277 /* vn_seek() does not actually seek it only performs bounds checking on the
278 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
279 * anything more serious. */
280 int
281 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
282 {
283 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
284 }
285 EXPORT_SYMBOL(vn_seek);
286
287 /*
288 * spl_basename() takes a NULL-terminated string s as input containing a path.
289 * It returns a char pointer to a string and a length that describe the
290 * basename of the path. If the basename is not "." or "/", it will be an index
291 * into the string. While the string should be NULL terminated, the section
292 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
293 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
294 */
295 static void
296 spl_basename(const char *s, const char **str, int *len)
297 {
298 size_t i, end;
299
300 ASSERT(str);
301 ASSERT(len);
302
303 if (!s || !*s) {
304 *str = ".";
305 *len = 1;
306 return;
307 }
308
309 i = strlen(s) - 1;
310
311 while (i && s[i--] == '/');
312
313 if (i == 0) {
314 *str = "/";
315 *len = 1;
316 return;
317 }
318
319 end = i;
320
321 for (end = i; i; i--) {
322 if (s[i] == '/') {
323 *str = &s[i+1];
324 *len = end - i + 1;
325 return;
326 }
327 }
328
329 *str = s;
330 *len = end + 1;
331 }
332
333 static struct dentry *
334 spl_kern_path_locked(const char *name, struct path *path)
335 {
336 struct path parent;
337 struct dentry *dentry;
338 const char *basename;
339 int len;
340 int rc;
341
342 ASSERT(name);
343 ASSERT(path);
344
345 spl_basename(name, &basename, &len);
346
347 /* We do not accept "." or ".." */
348 if (len <= 2 && basename[0] == '.')
349 if (len == 1 || basename[1] == '.')
350 return (ERR_PTR(-EACCES));
351
352 rc = kern_path(name, LOOKUP_PARENT, &parent);
353 if (rc)
354 return (ERR_PTR(rc));
355
356 spl_inode_lock(parent.dentry->d_inode);
357
358 dentry = lookup_one_len(basename, parent.dentry, len);
359 if (IS_ERR(dentry)) {
360 spl_inode_unlock(parent.dentry->d_inode);
361 path_put(&parent);
362 } else {
363 *path = parent;
364 }
365
366 return (dentry);
367 }
368
369 /* Based on do_unlinkat() from linux/fs/namei.c */
370 int
371 vn_remove(const char *path, uio_seg_t seg, int flags)
372 {
373 struct dentry *dentry;
374 struct path parent;
375 struct inode *inode = NULL;
376 int rc = 0;
377
378 ASSERT(seg == UIO_SYSSPACE);
379 ASSERT(flags == RMFILE);
380
381 dentry = spl_kern_path_locked(path, &parent);
382 rc = PTR_ERR(dentry);
383 if (!IS_ERR(dentry)) {
384 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
385 rc = 0;
386 goto slashes;
387 }
388
389 inode = dentry->d_inode;
390 if (inode) {
391 atomic_inc(&inode->i_count);
392 } else {
393 rc = 0;
394 goto slashes;
395 }
396
397 #ifdef HAVE_2ARGS_VFS_UNLINK
398 rc = vfs_unlink(parent.dentry->d_inode, dentry);
399 #else
400 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
401 #endif /* HAVE_2ARGS_VFS_UNLINK */
402 exit1:
403 dput(dentry);
404 } else {
405 return (-rc);
406 }
407
408 spl_inode_unlock(parent.dentry->d_inode);
409 if (inode)
410 iput(inode); /* truncate the inode here */
411
412 path_put(&parent);
413 return (-rc);
414
415 slashes:
416 rc = !dentry->d_inode ? -ENOENT :
417 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
418 goto exit1;
419 } /* vn_remove() */
420 EXPORT_SYMBOL(vn_remove);
421
422 /* Based on do_rename() from linux/fs/namei.c */
423 int
424 vn_rename(const char *oldname, const char *newname, int x1)
425 {
426 struct dentry *old_dir, *new_dir;
427 struct dentry *old_dentry, *new_dentry;
428 struct dentry *trap;
429 struct path old_parent, new_parent;
430 int rc = 0;
431
432 old_dentry = spl_kern_path_locked(oldname, &old_parent);
433 if (IS_ERR(old_dentry)) {
434 rc = PTR_ERR(old_dentry);
435 goto exit;
436 }
437
438 spl_inode_unlock(old_parent.dentry->d_inode);
439
440 new_dentry = spl_kern_path_locked(newname, &new_parent);
441 if (IS_ERR(new_dentry)) {
442 rc = PTR_ERR(new_dentry);
443 goto exit2;
444 }
445
446 spl_inode_unlock(new_parent.dentry->d_inode);
447
448 rc = -EXDEV;
449 if (old_parent.mnt != new_parent.mnt)
450 goto exit3;
451
452 old_dir = old_parent.dentry;
453 new_dir = new_parent.dentry;
454 trap = lock_rename(new_dir, old_dir);
455
456 /* source should not be ancestor of target */
457 rc = -EINVAL;
458 if (old_dentry == trap)
459 goto exit4;
460
461 /* target should not be an ancestor of source */
462 rc = -ENOTEMPTY;
463 if (new_dentry == trap)
464 goto exit4;
465
466 /* source must exist */
467 rc = -ENOENT;
468 if (!old_dentry->d_inode)
469 goto exit4;
470
471 /* unless the source is a directory trailing slashes give -ENOTDIR */
472 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
473 rc = -ENOTDIR;
474 if (old_dentry->d_name.name[old_dentry->d_name.len])
475 goto exit4;
476 if (new_dentry->d_name.name[new_dentry->d_name.len])
477 goto exit4;
478 }
479
480 #if defined(HAVE_4ARGS_VFS_RENAME)
481 rc = vfs_rename(old_dir->d_inode, old_dentry,
482 new_dir->d_inode, new_dentry);
483 #elif defined(HAVE_5ARGS_VFS_RENAME)
484 rc = vfs_rename(old_dir->d_inode, old_dentry,
485 new_dir->d_inode, new_dentry, NULL);
486 #else
487 rc = vfs_rename(old_dir->d_inode, old_dentry,
488 new_dir->d_inode, new_dentry, NULL, 0);
489 #endif
490 exit4:
491 unlock_rename(new_dir, old_dir);
492 exit3:
493 dput(new_dentry);
494 path_put(&new_parent);
495 exit2:
496 dput(old_dentry);
497 path_put(&old_parent);
498 exit:
499 return (-rc);
500 }
501 EXPORT_SYMBOL(vn_rename);
502
503 int
504 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
505 {
506 struct file *fp;
507 struct kstat stat;
508 int rc;
509
510 ASSERT(vp);
511 ASSERT(vp->v_file);
512 ASSERT(vap);
513
514 fp = vp->v_file;
515
516 #ifdef HAVE_2ARGS_VFS_GETATTR
517 rc = vfs_getattr(&fp->f_path, &stat);
518 #else
519 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
520 #endif
521 if (rc)
522 return (-rc);
523
524 vap->va_type = vn_mode_to_vtype(stat.mode);
525 vap->va_mode = stat.mode;
526 vap->va_uid = KUID_TO_SUID(stat.uid);
527 vap->va_gid = KGID_TO_SGID(stat.gid);
528 vap->va_fsid = 0;
529 vap->va_nodeid = stat.ino;
530 vap->va_nlink = stat.nlink;
531 vap->va_size = stat.size;
532 vap->va_blksize = stat.blksize;
533 vap->va_atime = stat.atime;
534 vap->va_mtime = stat.mtime;
535 vap->va_ctime = stat.ctime;
536 vap->va_rdev = stat.rdev;
537 vap->va_nblocks = stat.blocks;
538
539 return (0);
540 }
541 EXPORT_SYMBOL(vn_getattr);
542
543 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
544 {
545 int datasync = 0;
546
547 ASSERT(vp);
548 ASSERT(vp->v_file);
549
550 if (flags & FDSYNC)
551 datasync = 1;
552
553 return (-spl_filp_fsync(vp->v_file, datasync));
554 } /* vn_fsync() */
555 EXPORT_SYMBOL(vn_fsync);
556
557 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
558 offset_t offset, void *x6, void *x7)
559 {
560 int error = EOPNOTSUPP;
561
562 if (cmd != F_FREESP || bfp->l_whence != 0)
563 return (EOPNOTSUPP);
564
565 ASSERT(vp);
566 ASSERT(vp->v_file);
567 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
568
569 #ifdef FALLOC_FL_PUNCH_HOLE
570 /*
571 * When supported by the underlying file system preferentially
572 * use the fallocate() callback to preallocate the space.
573 */
574 error = -spl_filp_fallocate(vp->v_file,
575 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
576 bfp->l_start, bfp->l_len);
577 if (error == 0)
578 return (0);
579 #endif
580
581 #ifdef HAVE_INODE_TRUNCATE_RANGE
582 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
583 vp->v_file->f_dentry->d_inode->i_op &&
584 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
585 off_t end = bfp->l_start + bfp->l_len;
586 /*
587 * Judging from the code in shmem_truncate_range(),
588 * it seems the kernel expects the end offset to be
589 * inclusive and aligned to the end of a page.
590 */
591 if (end % PAGE_SIZE != 0) {
592 end &= ~(off_t)(PAGE_SIZE - 1);
593 if (end <= bfp->l_start)
594 return (0);
595 }
596 --end;
597
598 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
599 vp->v_file->f_dentry->d_inode,
600 bfp->l_start, end
601 );
602 return (0);
603 }
604 #endif
605
606 return (error);
607 }
608 EXPORT_SYMBOL(vn_space);
609
610 /* Function must be called while holding the vn_file_lock */
611 static file_t *
612 file_find(int fd)
613 {
614 file_t *fp;
615
616 ASSERT(spin_is_locked(&vn_file_lock));
617
618 list_for_each_entry(fp, &vn_file_list, f_list) {
619 if (fd == fp->f_fd && fp->f_task == current) {
620 ASSERT(atomic_read(&fp->f_ref) != 0);
621 return fp;
622 }
623 }
624
625 return NULL;
626 } /* file_find() */
627
628 file_t *
629 vn_getf(int fd)
630 {
631 struct kstat stat;
632 struct file *lfp;
633 file_t *fp;
634 vnode_t *vp;
635 int rc = 0;
636
637 /* Already open just take an extra reference */
638 spin_lock(&vn_file_lock);
639
640 fp = file_find(fd);
641 if (fp) {
642 atomic_inc(&fp->f_ref);
643 spin_unlock(&vn_file_lock);
644 return (fp);
645 }
646
647 spin_unlock(&vn_file_lock);
648
649 /* File was not yet opened create the object and setup */
650 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
651 if (fp == NULL)
652 goto out;
653
654 mutex_enter(&fp->f_lock);
655
656 fp->f_fd = fd;
657 fp->f_task = current;
658 fp->f_offset = 0;
659 atomic_inc(&fp->f_ref);
660
661 lfp = fget(fd);
662 if (lfp == NULL)
663 goto out_mutex;
664
665 vp = vn_alloc(KM_SLEEP);
666 if (vp == NULL)
667 goto out_fget;
668
669 #ifdef HAVE_2ARGS_VFS_GETATTR
670 rc = vfs_getattr(&lfp->f_path, &stat);
671 #else
672 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
673 #endif
674 if (rc)
675 goto out_vnode;
676
677 mutex_enter(&vp->v_lock);
678 vp->v_type = vn_mode_to_vtype(stat.mode);
679 vp->v_file = lfp;
680 mutex_exit(&vp->v_lock);
681
682 fp->f_vnode = vp;
683 fp->f_file = lfp;
684
685 /* Put it on the tracking list */
686 spin_lock(&vn_file_lock);
687 list_add(&fp->f_list, &vn_file_list);
688 spin_unlock(&vn_file_lock);
689
690 mutex_exit(&fp->f_lock);
691 return (fp);
692
693 out_vnode:
694 vn_free(vp);
695 out_fget:
696 fput(lfp);
697 out_mutex:
698 mutex_exit(&fp->f_lock);
699 kmem_cache_free(vn_file_cache, fp);
700 out:
701 return (NULL);
702 } /* getf() */
703 EXPORT_SYMBOL(getf);
704
705 static void releasef_locked(file_t *fp)
706 {
707 ASSERT(fp->f_file);
708 ASSERT(fp->f_vnode);
709
710 /* Unlinked from list, no refs, safe to free outside mutex */
711 fput(fp->f_file);
712 vn_free(fp->f_vnode);
713
714 kmem_cache_free(vn_file_cache, fp);
715 }
716
717 void
718 vn_releasef(int fd)
719 {
720 file_t *fp;
721
722 spin_lock(&vn_file_lock);
723 fp = file_find(fd);
724 if (fp) {
725 atomic_dec(&fp->f_ref);
726 if (atomic_read(&fp->f_ref) > 0) {
727 spin_unlock(&vn_file_lock);
728 return;
729 }
730
731 list_del(&fp->f_list);
732 releasef_locked(fp);
733 }
734 spin_unlock(&vn_file_lock);
735
736 return;
737 } /* releasef() */
738 EXPORT_SYMBOL(releasef);
739
740 static void
741 #ifdef HAVE_SET_FS_PWD_WITH_CONST
742 vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
743 #else
744 vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
745 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
746 {
747 struct path old_pwd;
748
749 #ifdef HAVE_FS_STRUCT_SPINLOCK
750 spin_lock(&fs->lock);
751 old_pwd = fs->pwd;
752 fs->pwd = *path;
753 path_get(path);
754 spin_unlock(&fs->lock);
755 #else
756 write_lock(&fs->lock);
757 old_pwd = fs->pwd;
758 fs->pwd = *path;
759 path_get(path);
760 write_unlock(&fs->lock);
761 #endif /* HAVE_FS_STRUCT_SPINLOCK */
762
763 if (old_pwd.dentry)
764 path_put(&old_pwd);
765 }
766
767 int
768 vn_set_pwd(const char *filename)
769 {
770 struct path path;
771 mm_segment_t saved_fs;
772 int rc;
773
774 /*
775 * user_path_dir() and __user_walk() both expect 'filename' to be
776 * a user space address so we must briefly increase the data segment
777 * size to ensure strncpy_from_user() does not fail with -EFAULT.
778 */
779 saved_fs = get_fs();
780 set_fs(get_ds());
781
782 rc = user_path_dir(filename, &path);
783 if (rc)
784 goto out;
785
786 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
787 if (rc)
788 goto dput_and_out;
789
790 vn_set_fs_pwd(current->fs, &path);
791
792 dput_and_out:
793 path_put(&path);
794 out:
795 set_fs(saved_fs);
796
797 return (-rc);
798 } /* vn_set_pwd() */
799 EXPORT_SYMBOL(vn_set_pwd);
800
801 static int
802 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
803 {
804 struct vnode *vp = buf;
805
806 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
807
808 return (0);
809 } /* vn_cache_constructor() */
810
811 static void
812 vn_cache_destructor(void *buf, void *cdrarg)
813 {
814 struct vnode *vp = buf;
815
816 mutex_destroy(&vp->v_lock);
817 } /* vn_cache_destructor() */
818
819 static int
820 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
821 {
822 file_t *fp = buf;
823
824 atomic_set(&fp->f_ref, 0);
825 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
826 INIT_LIST_HEAD(&fp->f_list);
827
828 return (0);
829 } /* file_cache_constructor() */
830
831 static void
832 vn_file_cache_destructor(void *buf, void *cdrarg)
833 {
834 file_t *fp = buf;
835
836 mutex_destroy(&fp->f_lock);
837 } /* vn_file_cache_destructor() */
838
839 int
840 spl_vn_init(void)
841 {
842 vn_cache = kmem_cache_create("spl_vn_cache",
843 sizeof(struct vnode), 64,
844 vn_cache_constructor,
845 vn_cache_destructor,
846 NULL, NULL, NULL, KMC_KMEM);
847
848 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
849 sizeof(file_t), 64,
850 vn_file_cache_constructor,
851 vn_file_cache_destructor,
852 NULL, NULL, NULL, KMC_KMEM);
853 return (0);
854 } /* vn_init() */
855
856 void
857 spl_vn_fini(void)
858 {
859 file_t *fp, *next_fp;
860 int leaked = 0;
861
862 spin_lock(&vn_file_lock);
863
864 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
865 list_del(&fp->f_list);
866 releasef_locked(fp);
867 leaked++;
868 }
869
870 spin_unlock(&vn_file_lock);
871
872 if (leaked > 0)
873 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
874
875 kmem_cache_destroy(vn_file_cache);
876 kmem_cache_destroy(vn_cache);
877
878 return;
879 } /* vn_fini() */