]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-vnode.c
4c62097dcc574b008d36abd1f9c079f793fd1b3b
[mirror_spl.git] / module / spl / spl-vnode.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
26
27 #include <sys/cred.h>
28 #include <sys/vnode.h>
29 #include <sys/kmem_cache.h>
30 #include <linux/falloc.h>
31 #include <linux/file_compat.h>
32
33 vnode_t *rootdir = (vnode_t *)0xabcd1234;
34 EXPORT_SYMBOL(rootdir);
35
36 static spl_kmem_cache_t *vn_cache;
37 static spl_kmem_cache_t *vn_file_cache;
38
39 static DEFINE_SPINLOCK(vn_file_lock);
40 static LIST_HEAD(vn_file_list);
41
42 vtype_t
43 vn_mode_to_vtype(mode_t mode)
44 {
45 if (S_ISREG(mode))
46 return VREG;
47
48 if (S_ISDIR(mode))
49 return VDIR;
50
51 if (S_ISCHR(mode))
52 return VCHR;
53
54 if (S_ISBLK(mode))
55 return VBLK;
56
57 if (S_ISFIFO(mode))
58 return VFIFO;
59
60 if (S_ISLNK(mode))
61 return VLNK;
62
63 if (S_ISSOCK(mode))
64 return VSOCK;
65
66 if (S_ISCHR(mode))
67 return VCHR;
68
69 return VNON;
70 } /* vn_mode_to_vtype() */
71 EXPORT_SYMBOL(vn_mode_to_vtype);
72
73 mode_t
74 vn_vtype_to_mode(vtype_t vtype)
75 {
76 if (vtype == VREG)
77 return S_IFREG;
78
79 if (vtype == VDIR)
80 return S_IFDIR;
81
82 if (vtype == VCHR)
83 return S_IFCHR;
84
85 if (vtype == VBLK)
86 return S_IFBLK;
87
88 if (vtype == VFIFO)
89 return S_IFIFO;
90
91 if (vtype == VLNK)
92 return S_IFLNK;
93
94 if (vtype == VSOCK)
95 return S_IFSOCK;
96
97 return VNON;
98 } /* vn_vtype_to_mode() */
99 EXPORT_SYMBOL(vn_vtype_to_mode);
100
101 vnode_t *
102 vn_alloc(int flag)
103 {
104 vnode_t *vp;
105
106 vp = kmem_cache_alloc(vn_cache, flag);
107 if (vp != NULL) {
108 vp->v_file = NULL;
109 vp->v_type = 0;
110 }
111
112 return (vp);
113 } /* vn_alloc() */
114 EXPORT_SYMBOL(vn_alloc);
115
116 void
117 vn_free(vnode_t *vp)
118 {
119 kmem_cache_free(vn_cache, vp);
120 } /* vn_free() */
121 EXPORT_SYMBOL(vn_free);
122
123 int
124 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
125 vnode_t **vpp, int x1, void *x2)
126 {
127 struct file *fp;
128 struct kstat stat;
129 int rc, saved_umask = 0;
130 gfp_t saved_gfp;
131 vnode_t *vp;
132
133 ASSERT(flags & (FWRITE | FREAD));
134 ASSERT(seg == UIO_SYSSPACE);
135 ASSERT(vpp);
136 *vpp = NULL;
137
138 if (!(flags & FCREAT) && (flags & FWRITE))
139 flags |= FEXCL;
140
141 /* Note for filp_open() the two low bits must be remapped to mean:
142 * 01 - read-only -> 00 read-only
143 * 10 - write-only -> 01 write-only
144 * 11 - read-write -> 10 read-write
145 */
146 flags--;
147
148 if (flags & FCREAT)
149 saved_umask = xchg(&current->fs->umask, 0);
150
151 fp = filp_open(path, flags, mode);
152
153 if (flags & FCREAT)
154 (void)xchg(&current->fs->umask, saved_umask);
155
156 if (IS_ERR(fp))
157 return (-PTR_ERR(fp));
158
159 #ifdef HAVE_2ARGS_VFS_GETATTR
160 rc = vfs_getattr(&fp->f_path, &stat);
161 #else
162 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
163 #endif
164 if (rc) {
165 filp_close(fp, 0);
166 return (-rc);
167 }
168
169 vp = vn_alloc(KM_SLEEP);
170 if (!vp) {
171 filp_close(fp, 0);
172 return (ENOMEM);
173 }
174
175 saved_gfp = mapping_gfp_mask(fp->f_mapping);
176 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
177
178 mutex_enter(&vp->v_lock);
179 vp->v_type = vn_mode_to_vtype(stat.mode);
180 vp->v_file = fp;
181 vp->v_gfp_mask = saved_gfp;
182 *vpp = vp;
183 mutex_exit(&vp->v_lock);
184
185 return (0);
186 } /* vn_open() */
187 EXPORT_SYMBOL(vn_open);
188
189 int
190 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
191 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
192 {
193 char *realpath;
194 int len, rc;
195
196 ASSERT(vp == rootdir);
197
198 len = strlen(path) + 2;
199 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
200 if (!realpath)
201 return (ENOMEM);
202
203 (void)snprintf(realpath, len, "/%s", path);
204 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
205 kfree(realpath);
206
207 return (rc);
208 } /* vn_openat() */
209 EXPORT_SYMBOL(vn_openat);
210
211 int
212 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
213 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
214 {
215 loff_t offset;
216 mm_segment_t saved_fs;
217 struct file *fp;
218 int rc;
219
220 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
221 ASSERT(vp);
222 ASSERT(vp->v_file);
223 ASSERT(seg == UIO_SYSSPACE);
224 ASSERT((ioflag & ~FAPPEND) == 0);
225 ASSERT(x2 == RLIM64_INFINITY);
226
227 fp = vp->v_file;
228
229 offset = off;
230 if (ioflag & FAPPEND)
231 offset = fp->f_pos;
232
233 /* Writable user data segment must be briefly increased for this
234 * process so we can use the user space read call paths to write
235 * in to memory allocated by the kernel. */
236 saved_fs = get_fs();
237 set_fs(get_ds());
238
239 if (uio & UIO_WRITE)
240 rc = vfs_write(fp, addr, len, &offset);
241 else
242 rc = vfs_read(fp, addr, len, &offset);
243
244 set_fs(saved_fs);
245 fp->f_pos = offset;
246
247 if (rc < 0)
248 return (-rc);
249
250 if (residp) {
251 *residp = len - rc;
252 } else {
253 if (rc != len)
254 return (EIO);
255 }
256
257 return (0);
258 } /* vn_rdwr() */
259 EXPORT_SYMBOL(vn_rdwr);
260
261 int
262 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
263 {
264 int rc;
265
266 ASSERT(vp);
267 ASSERT(vp->v_file);
268
269 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
270 rc = filp_close(vp->v_file, 0);
271 vn_free(vp);
272
273 return (-rc);
274 } /* vn_close() */
275 EXPORT_SYMBOL(vn_close);
276
277 /* vn_seek() does not actually seek it only performs bounds checking on the
278 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
279 * anything more serious. */
280 int
281 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
282 {
283 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
284 }
285 EXPORT_SYMBOL(vn_seek);
286
287 /*
288 * spl_basename() takes a NULL-terminated string s as input containing a path.
289 * It returns a char pointer to a string and a length that describe the
290 * basename of the path. If the basename is not "." or "/", it will be an index
291 * into the string. While the string should be NULL terminated, the section
292 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
293 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
294 */
295 static void
296 spl_basename(const char *s, const char **str, int *len)
297 {
298 size_t i, end;
299
300 ASSERT(str);
301 ASSERT(len);
302
303 if (!s || !*s) {
304 *str = ".";
305 *len = 1;
306 return;
307 }
308
309 i = strlen(s) - 1;
310
311 while (i && s[i--] == '/');
312
313 if (i == 0) {
314 *str = "/";
315 *len = 1;
316 return;
317 }
318
319 end = i;
320
321 for (end = i; i; i--) {
322 if (s[i] == '/') {
323 *str = &s[i+1];
324 *len = end - i + 1;
325 return;
326 }
327 }
328
329 *str = s;
330 *len = end + 1;
331 }
332
333 static struct dentry *
334 spl_kern_path_locked(const char *name, struct path *path)
335 {
336 struct path parent;
337 struct dentry *dentry;
338 const char *basename;
339 int len;
340 int rc;
341
342 ASSERT(name);
343 ASSERT(path);
344
345 spl_basename(name, &basename, &len);
346
347 /* We do not accept "." or ".." */
348 if (len <= 2 && basename[0] == '.')
349 if (len == 1 || basename[1] == '.')
350 return (ERR_PTR(-EACCES));
351
352 rc = kern_path(name, LOOKUP_PARENT, &parent);
353 if (rc)
354 return (ERR_PTR(rc));
355
356 spl_inode_lock(parent.dentry->d_inode);
357
358 dentry = lookup_one_len(basename, parent.dentry, len);
359 if (IS_ERR(dentry)) {
360 spl_inode_unlock(parent.dentry->d_inode);
361 path_put(&parent);
362 } else {
363 *path = parent;
364 }
365
366 return (dentry);
367 }
368
369 /* Based on do_unlinkat() from linux/fs/namei.c */
370 int
371 vn_remove(const char *path, uio_seg_t seg, int flags)
372 {
373 struct dentry *dentry;
374 struct path parent;
375 struct inode *inode = NULL;
376 int rc = 0;
377
378 ASSERT(seg == UIO_SYSSPACE);
379 ASSERT(flags == RMFILE);
380
381 dentry = spl_kern_path_locked(path, &parent);
382 rc = PTR_ERR(dentry);
383 if (!IS_ERR(dentry)) {
384 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
385 rc = 0;
386 goto slashes;
387 }
388
389 inode = dentry->d_inode;
390 if (inode) {
391 atomic_inc(&inode->i_count);
392 } else {
393 rc = 0;
394 goto slashes;
395 }
396
397 #ifdef HAVE_2ARGS_VFS_UNLINK
398 rc = vfs_unlink(parent.dentry->d_inode, dentry);
399 #else
400 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
401 #endif /* HAVE_2ARGS_VFS_UNLINK */
402 exit1:
403 dput(dentry);
404 } else {
405 return (-rc);
406 }
407
408 spl_inode_unlock(parent.dentry->d_inode);
409 if (inode)
410 iput(inode); /* truncate the inode here */
411
412 path_put(&parent);
413 return (-rc);
414
415 slashes:
416 rc = !dentry->d_inode ? -ENOENT :
417 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
418 goto exit1;
419 } /* vn_remove() */
420 EXPORT_SYMBOL(vn_remove);
421
422 /* Based on do_rename() from linux/fs/namei.c */
423 int
424 vn_rename(const char *oldname, const char *newname, int x1)
425 {
426 struct dentry *old_dir, *new_dir;
427 struct dentry *old_dentry, *new_dentry;
428 struct dentry *trap;
429 struct path old_parent, new_parent;
430 int rc = 0;
431
432 old_dentry = spl_kern_path_locked(oldname, &old_parent);
433 if (IS_ERR(old_dentry)) {
434 rc = PTR_ERR(old_dentry);
435 goto exit;
436 }
437
438 spl_inode_unlock(old_parent.dentry->d_inode);
439
440 new_dentry = spl_kern_path_locked(newname, &new_parent);
441 if (IS_ERR(new_dentry)) {
442 rc = PTR_ERR(new_dentry);
443 goto exit2;
444 }
445
446 spl_inode_unlock(new_parent.dentry->d_inode);
447
448 rc = -EXDEV;
449 if (old_parent.mnt != new_parent.mnt)
450 goto exit3;
451
452 old_dir = old_parent.dentry;
453 new_dir = new_parent.dentry;
454 trap = lock_rename(new_dir, old_dir);
455
456 /* source should not be ancestor of target */
457 rc = -EINVAL;
458 if (old_dentry == trap)
459 goto exit4;
460
461 /* target should not be an ancestor of source */
462 rc = -ENOTEMPTY;
463 if (new_dentry == trap)
464 goto exit4;
465
466 /* source must exist */
467 rc = -ENOENT;
468 if (!old_dentry->d_inode)
469 goto exit4;
470
471 /* unless the source is a directory trailing slashes give -ENOTDIR */
472 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
473 rc = -ENOTDIR;
474 if (old_dentry->d_name.name[old_dentry->d_name.len])
475 goto exit4;
476 if (new_dentry->d_name.name[new_dentry->d_name.len])
477 goto exit4;
478 }
479
480 #if defined(HAVE_4ARGS_VFS_RENAME)
481 rc = vfs_rename(old_dir->d_inode, old_dentry,
482 new_dir->d_inode, new_dentry);
483 #elif defined(HAVE_5ARGS_VFS_RENAME)
484 rc = vfs_rename(old_dir->d_inode, old_dentry,
485 new_dir->d_inode, new_dentry, NULL);
486 #else
487 rc = vfs_rename(old_dir->d_inode, old_dentry,
488 new_dir->d_inode, new_dentry, NULL, 0);
489 #endif
490 exit4:
491 unlock_rename(new_dir, old_dir);
492 exit3:
493 dput(new_dentry);
494 path_put(&new_parent);
495 exit2:
496 dput(old_dentry);
497 path_put(&old_parent);
498 exit:
499 return (-rc);
500 }
501 EXPORT_SYMBOL(vn_rename);
502
503 int
504 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
505 {
506 struct file *fp;
507 struct kstat stat;
508 int rc;
509
510 ASSERT(vp);
511 ASSERT(vp->v_file);
512 ASSERT(vap);
513
514 fp = vp->v_file;
515
516 #ifdef HAVE_2ARGS_VFS_GETATTR
517 rc = vfs_getattr(&fp->f_path, &stat);
518 #else
519 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
520 #endif
521 if (rc)
522 return (-rc);
523
524 vap->va_type = vn_mode_to_vtype(stat.mode);
525 vap->va_mode = stat.mode;
526 vap->va_uid = KUID_TO_SUID(stat.uid);
527 vap->va_gid = KGID_TO_SGID(stat.gid);
528 vap->va_fsid = 0;
529 vap->va_nodeid = stat.ino;
530 vap->va_nlink = stat.nlink;
531 vap->va_size = stat.size;
532 vap->va_blksize = stat.blksize;
533 vap->va_atime = stat.atime;
534 vap->va_mtime = stat.mtime;
535 vap->va_ctime = stat.ctime;
536 vap->va_rdev = stat.rdev;
537 vap->va_nblocks = stat.blocks;
538
539 return (0);
540 }
541 EXPORT_SYMBOL(vn_getattr);
542
543 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
544 {
545 int datasync = 0;
546 int error;
547 int fstrans;
548
549 ASSERT(vp);
550 ASSERT(vp->v_file);
551
552 if (flags & FDSYNC)
553 datasync = 1;
554
555 /*
556 * May enter XFS which generates a warning when PF_FSTRANS is set.
557 * To avoid this the flag is cleared over vfs_sync() and then reset.
558 */
559 fstrans = spl_fstrans_check();
560 if (fstrans)
561 current->flags &= ~(PF_FSTRANS);
562
563 error = -spl_filp_fsync(vp->v_file, datasync);
564 if (fstrans)
565 current->flags |= PF_FSTRANS;
566
567 return (error);
568 } /* vn_fsync() */
569 EXPORT_SYMBOL(vn_fsync);
570
571 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
572 offset_t offset, void *x6, void *x7)
573 {
574 int error = EOPNOTSUPP;
575
576 if (cmd != F_FREESP || bfp->l_whence != 0)
577 return (EOPNOTSUPP);
578
579 ASSERT(vp);
580 ASSERT(vp->v_file);
581 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
582
583 #ifdef FALLOC_FL_PUNCH_HOLE
584 /*
585 * When supported by the underlying file system preferentially
586 * use the fallocate() callback to preallocate the space.
587 */
588 error = -spl_filp_fallocate(vp->v_file,
589 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
590 bfp->l_start, bfp->l_len);
591 if (error == 0)
592 return (0);
593 #endif
594
595 #ifdef HAVE_INODE_TRUNCATE_RANGE
596 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
597 vp->v_file->f_dentry->d_inode->i_op &&
598 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
599 off_t end = bfp->l_start + bfp->l_len;
600 /*
601 * Judging from the code in shmem_truncate_range(),
602 * it seems the kernel expects the end offset to be
603 * inclusive and aligned to the end of a page.
604 */
605 if (end % PAGE_SIZE != 0) {
606 end &= ~(off_t)(PAGE_SIZE - 1);
607 if (end <= bfp->l_start)
608 return (0);
609 }
610 --end;
611
612 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
613 vp->v_file->f_dentry->d_inode,
614 bfp->l_start, end
615 );
616 return (0);
617 }
618 #endif
619
620 return (error);
621 }
622 EXPORT_SYMBOL(vn_space);
623
624 /* Function must be called while holding the vn_file_lock */
625 static file_t *
626 file_find(int fd)
627 {
628 file_t *fp;
629
630 ASSERT(spin_is_locked(&vn_file_lock));
631
632 list_for_each_entry(fp, &vn_file_list, f_list) {
633 if (fd == fp->f_fd && fp->f_task == current) {
634 ASSERT(atomic_read(&fp->f_ref) != 0);
635 return fp;
636 }
637 }
638
639 return NULL;
640 } /* file_find() */
641
642 file_t *
643 vn_getf(int fd)
644 {
645 struct kstat stat;
646 struct file *lfp;
647 file_t *fp;
648 vnode_t *vp;
649 int rc = 0;
650
651 if (fd < 0)
652 return (NULL);
653
654 /* Already open just take an extra reference */
655 spin_lock(&vn_file_lock);
656
657 fp = file_find(fd);
658 if (fp) {
659 atomic_inc(&fp->f_ref);
660 spin_unlock(&vn_file_lock);
661 return (fp);
662 }
663
664 spin_unlock(&vn_file_lock);
665
666 /* File was not yet opened create the object and setup */
667 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
668 if (fp == NULL)
669 goto out;
670
671 mutex_enter(&fp->f_lock);
672
673 fp->f_fd = fd;
674 fp->f_task = current;
675 fp->f_offset = 0;
676 atomic_inc(&fp->f_ref);
677
678 lfp = fget(fd);
679 if (lfp == NULL)
680 goto out_mutex;
681
682 vp = vn_alloc(KM_SLEEP);
683 if (vp == NULL)
684 goto out_fget;
685
686 #ifdef HAVE_2ARGS_VFS_GETATTR
687 rc = vfs_getattr(&lfp->f_path, &stat);
688 #else
689 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
690 #endif
691 if (rc)
692 goto out_vnode;
693
694 mutex_enter(&vp->v_lock);
695 vp->v_type = vn_mode_to_vtype(stat.mode);
696 vp->v_file = lfp;
697 mutex_exit(&vp->v_lock);
698
699 fp->f_vnode = vp;
700 fp->f_file = lfp;
701
702 /* Put it on the tracking list */
703 spin_lock(&vn_file_lock);
704 list_add(&fp->f_list, &vn_file_list);
705 spin_unlock(&vn_file_lock);
706
707 mutex_exit(&fp->f_lock);
708 return (fp);
709
710 out_vnode:
711 vn_free(vp);
712 out_fget:
713 fput(lfp);
714 out_mutex:
715 mutex_exit(&fp->f_lock);
716 kmem_cache_free(vn_file_cache, fp);
717 out:
718 return (NULL);
719 } /* getf() */
720 EXPORT_SYMBOL(getf);
721
722 static void releasef_locked(file_t *fp)
723 {
724 ASSERT(fp->f_file);
725 ASSERT(fp->f_vnode);
726
727 /* Unlinked from list, no refs, safe to free outside mutex */
728 fput(fp->f_file);
729 vn_free(fp->f_vnode);
730
731 kmem_cache_free(vn_file_cache, fp);
732 }
733
734 void
735 vn_releasef(int fd)
736 {
737 file_t *fp;
738
739 if (fd < 0)
740 return;
741
742 spin_lock(&vn_file_lock);
743 fp = file_find(fd);
744 if (fp) {
745 atomic_dec(&fp->f_ref);
746 if (atomic_read(&fp->f_ref) > 0) {
747 spin_unlock(&vn_file_lock);
748 return;
749 }
750
751 list_del(&fp->f_list);
752 releasef_locked(fp);
753 }
754 spin_unlock(&vn_file_lock);
755
756 return;
757 } /* releasef() */
758 EXPORT_SYMBOL(releasef);
759
760 static void
761 #ifdef HAVE_SET_FS_PWD_WITH_CONST
762 vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
763 #else
764 vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
765 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
766 {
767 struct path old_pwd;
768
769 #ifdef HAVE_FS_STRUCT_SPINLOCK
770 spin_lock(&fs->lock);
771 old_pwd = fs->pwd;
772 fs->pwd = *path;
773 path_get(path);
774 spin_unlock(&fs->lock);
775 #else
776 write_lock(&fs->lock);
777 old_pwd = fs->pwd;
778 fs->pwd = *path;
779 path_get(path);
780 write_unlock(&fs->lock);
781 #endif /* HAVE_FS_STRUCT_SPINLOCK */
782
783 if (old_pwd.dentry)
784 path_put(&old_pwd);
785 }
786
787 int
788 vn_set_pwd(const char *filename)
789 {
790 struct path path;
791 mm_segment_t saved_fs;
792 int rc;
793
794 /*
795 * user_path_dir() and __user_walk() both expect 'filename' to be
796 * a user space address so we must briefly increase the data segment
797 * size to ensure strncpy_from_user() does not fail with -EFAULT.
798 */
799 saved_fs = get_fs();
800 set_fs(get_ds());
801
802 rc = user_path_dir(filename, &path);
803 if (rc)
804 goto out;
805
806 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
807 if (rc)
808 goto dput_and_out;
809
810 vn_set_fs_pwd(current->fs, &path);
811
812 dput_and_out:
813 path_put(&path);
814 out:
815 set_fs(saved_fs);
816
817 return (-rc);
818 } /* vn_set_pwd() */
819 EXPORT_SYMBOL(vn_set_pwd);
820
821 static int
822 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
823 {
824 struct vnode *vp = buf;
825
826 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
827
828 return (0);
829 } /* vn_cache_constructor() */
830
831 static void
832 vn_cache_destructor(void *buf, void *cdrarg)
833 {
834 struct vnode *vp = buf;
835
836 mutex_destroy(&vp->v_lock);
837 } /* vn_cache_destructor() */
838
839 static int
840 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
841 {
842 file_t *fp = buf;
843
844 atomic_set(&fp->f_ref, 0);
845 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
846 INIT_LIST_HEAD(&fp->f_list);
847
848 return (0);
849 } /* file_cache_constructor() */
850
851 static void
852 vn_file_cache_destructor(void *buf, void *cdrarg)
853 {
854 file_t *fp = buf;
855
856 mutex_destroy(&fp->f_lock);
857 } /* vn_file_cache_destructor() */
858
859 int
860 spl_vn_init(void)
861 {
862 vn_cache = kmem_cache_create("spl_vn_cache",
863 sizeof(struct vnode), 64,
864 vn_cache_constructor,
865 vn_cache_destructor,
866 NULL, NULL, NULL, KMC_KMEM);
867
868 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
869 sizeof(file_t), 64,
870 vn_file_cache_constructor,
871 vn_file_cache_destructor,
872 NULL, NULL, NULL, KMC_KMEM);
873 return (0);
874 } /* vn_init() */
875
876 void
877 spl_vn_fini(void)
878 {
879 file_t *fp, *next_fp;
880 int leaked = 0;
881
882 spin_lock(&vn_file_lock);
883
884 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
885 list_del(&fp->f_list);
886 releasef_locked(fp);
887 leaked++;
888 }
889
890 spin_unlock(&vn_file_lock);
891
892 if (leaked > 0)
893 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
894
895 kmem_cache_destroy(vn_file_cache);
896 kmem_cache_destroy(vn_cache);
897
898 return;
899 } /* vn_fini() */