]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-vnode.c
Remove kern_path() wrapper
[mirror_spl.git] / module / spl / spl-vnode.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
26
27 #include <sys/cred.h>
28 #include <sys/vnode.h>
29 #include <linux/falloc.h>
30 #include <spl-debug.h>
31
32 #ifdef SS_DEBUG_SUBSYS
33 #undef SS_DEBUG_SUBSYS
34 #endif
35
36 #define SS_DEBUG_SUBSYS SS_VNODE
37
38 vnode_t *rootdir = (vnode_t *)0xabcd1234;
39 EXPORT_SYMBOL(rootdir);
40
41 static spl_kmem_cache_t *vn_cache;
42 static spl_kmem_cache_t *vn_file_cache;
43
44 static DEFINE_SPINLOCK(vn_file_lock);
45 static LIST_HEAD(vn_file_list);
46
47 vtype_t
48 vn_mode_to_vtype(mode_t mode)
49 {
50 if (S_ISREG(mode))
51 return VREG;
52
53 if (S_ISDIR(mode))
54 return VDIR;
55
56 if (S_ISCHR(mode))
57 return VCHR;
58
59 if (S_ISBLK(mode))
60 return VBLK;
61
62 if (S_ISFIFO(mode))
63 return VFIFO;
64
65 if (S_ISLNK(mode))
66 return VLNK;
67
68 if (S_ISSOCK(mode))
69 return VSOCK;
70
71 if (S_ISCHR(mode))
72 return VCHR;
73
74 return VNON;
75 } /* vn_mode_to_vtype() */
76 EXPORT_SYMBOL(vn_mode_to_vtype);
77
78 mode_t
79 vn_vtype_to_mode(vtype_t vtype)
80 {
81 if (vtype == VREG)
82 return S_IFREG;
83
84 if (vtype == VDIR)
85 return S_IFDIR;
86
87 if (vtype == VCHR)
88 return S_IFCHR;
89
90 if (vtype == VBLK)
91 return S_IFBLK;
92
93 if (vtype == VFIFO)
94 return S_IFIFO;
95
96 if (vtype == VLNK)
97 return S_IFLNK;
98
99 if (vtype == VSOCK)
100 return S_IFSOCK;
101
102 return VNON;
103 } /* vn_vtype_to_mode() */
104 EXPORT_SYMBOL(vn_vtype_to_mode);
105
106 vnode_t *
107 vn_alloc(int flag)
108 {
109 vnode_t *vp;
110 SENTRY;
111
112 vp = kmem_cache_alloc(vn_cache, flag);
113 if (vp != NULL) {
114 vp->v_file = NULL;
115 vp->v_type = 0;
116 }
117
118 SRETURN(vp);
119 } /* vn_alloc() */
120 EXPORT_SYMBOL(vn_alloc);
121
122 void
123 vn_free(vnode_t *vp)
124 {
125 SENTRY;
126 kmem_cache_free(vn_cache, vp);
127 SEXIT;
128 } /* vn_free() */
129 EXPORT_SYMBOL(vn_free);
130
131 int
132 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
133 vnode_t **vpp, int x1, void *x2)
134 {
135 struct file *fp;
136 struct kstat stat;
137 int rc, saved_umask = 0;
138 gfp_t saved_gfp;
139 vnode_t *vp;
140 SENTRY;
141
142 ASSERT(flags & (FWRITE | FREAD));
143 ASSERT(seg == UIO_SYSSPACE);
144 ASSERT(vpp);
145 *vpp = NULL;
146
147 if (!(flags & FCREAT) && (flags & FWRITE))
148 flags |= FEXCL;
149
150 /* Note for filp_open() the two low bits must be remapped to mean:
151 * 01 - read-only -> 00 read-only
152 * 10 - write-only -> 01 write-only
153 * 11 - read-write -> 10 read-write
154 */
155 flags--;
156
157 if (flags & FCREAT)
158 saved_umask = xchg(&current->fs->umask, 0);
159
160 fp = filp_open(path, flags, mode);
161
162 if (flags & FCREAT)
163 (void)xchg(&current->fs->umask, saved_umask);
164
165 if (IS_ERR(fp))
166 SRETURN(-PTR_ERR(fp));
167
168 #ifdef HAVE_2ARGS_VFS_GETATTR
169 rc = vfs_getattr(&fp->f_path, &stat);
170 #else
171 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
172 #endif
173 if (rc) {
174 filp_close(fp, 0);
175 SRETURN(-rc);
176 }
177
178 vp = vn_alloc(KM_SLEEP);
179 if (!vp) {
180 filp_close(fp, 0);
181 SRETURN(ENOMEM);
182 }
183
184 saved_gfp = mapping_gfp_mask(fp->f_mapping);
185 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
186
187 mutex_enter(&vp->v_lock);
188 vp->v_type = vn_mode_to_vtype(stat.mode);
189 vp->v_file = fp;
190 vp->v_gfp_mask = saved_gfp;
191 *vpp = vp;
192 mutex_exit(&vp->v_lock);
193
194 SRETURN(0);
195 } /* vn_open() */
196 EXPORT_SYMBOL(vn_open);
197
198 int
199 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
200 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
201 {
202 char *realpath;
203 int len, rc;
204 SENTRY;
205
206 ASSERT(vp == rootdir);
207
208 len = strlen(path) + 2;
209 realpath = kmalloc(len, GFP_KERNEL);
210 if (!realpath)
211 SRETURN(ENOMEM);
212
213 (void)snprintf(realpath, len, "/%s", path);
214 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
215 kfree(realpath);
216
217 SRETURN(rc);
218 } /* vn_openat() */
219 EXPORT_SYMBOL(vn_openat);
220
221 int
222 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
223 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
224 {
225 loff_t offset;
226 mm_segment_t saved_fs;
227 struct file *fp;
228 int rc;
229 SENTRY;
230
231 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
232 ASSERT(vp);
233 ASSERT(vp->v_file);
234 ASSERT(seg == UIO_SYSSPACE);
235 ASSERT((ioflag & ~FAPPEND) == 0);
236 ASSERT(x2 == RLIM64_INFINITY);
237
238 fp = vp->v_file;
239
240 offset = off;
241 if (ioflag & FAPPEND)
242 offset = fp->f_pos;
243
244 /* Writable user data segment must be briefly increased for this
245 * process so we can use the user space read call paths to write
246 * in to memory allocated by the kernel. */
247 saved_fs = get_fs();
248 set_fs(get_ds());
249
250 if (uio & UIO_WRITE)
251 rc = vfs_write(fp, addr, len, &offset);
252 else
253 rc = vfs_read(fp, addr, len, &offset);
254
255 set_fs(saved_fs);
256 fp->f_pos = offset;
257
258 if (rc < 0)
259 SRETURN(-rc);
260
261 if (residp) {
262 *residp = len - rc;
263 } else {
264 if (rc != len)
265 SRETURN(EIO);
266 }
267
268 SRETURN(0);
269 } /* vn_rdwr() */
270 EXPORT_SYMBOL(vn_rdwr);
271
272 int
273 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
274 {
275 int rc;
276 SENTRY;
277
278 ASSERT(vp);
279 ASSERT(vp->v_file);
280
281 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
282 rc = filp_close(vp->v_file, 0);
283 vn_free(vp);
284
285 SRETURN(-rc);
286 } /* vn_close() */
287 EXPORT_SYMBOL(vn_close);
288
289 /* vn_seek() does not actually seek it only performs bounds checking on the
290 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
291 * anything more serious. */
292 int
293 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
294 {
295 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
296 }
297 EXPORT_SYMBOL(vn_seek);
298
299 /*
300 * spl_basename() takes a NULL-terminated string s as input containing a path.
301 * It returns a char pointer to a string and a length that describe the
302 * basename of the path. If the basename is not "." or "/", it will be an index
303 * into the string. While the string should be NULL terminated, the section
304 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
305 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
306 */
307 static void
308 spl_basename(const char *s, const char **str, int *len)
309 {
310 size_t i, end;
311
312 ASSERT(str);
313 ASSERT(len);
314
315 if (!s || !*s) {
316 *str = ".";
317 *len = 1;
318 return;
319 }
320
321 i = strlen(s) - 1;
322
323 while (i && s[i--] == '/');
324
325 if (i == 0) {
326 *str = "/";
327 *len = 1;
328 return;
329 }
330
331 end = i;
332
333 for (end = i; i; i--) {
334 if (s[i] == '/') {
335 *str = &s[i+1];
336 *len = end - i + 1;
337 return;
338 }
339 }
340
341 *str = s;
342 *len = end + 1;
343 }
344
345 static struct dentry *
346 spl_kern_path_locked(const char *name, struct path *path)
347 {
348 struct path parent;
349 struct dentry *dentry;
350 const char *basename;
351 int len;
352 int rc;
353
354 ASSERT(name);
355 ASSERT(path);
356
357 spl_basename(name, &basename, &len);
358
359 /* We do not accept "." or ".." */
360 if (len <= 2 && basename[0] == '.')
361 if (len == 1 || basename[1] == '.')
362 return (ERR_PTR(-EACCES));
363
364 rc = kern_path(name, LOOKUP_PARENT, &parent);
365 if (rc)
366 return (ERR_PTR(rc));
367
368 spl_inode_lock(parent.dentry->d_inode);
369
370 dentry = lookup_one_len(basename, parent.dentry, len);
371 if (IS_ERR(dentry)) {
372 spl_inode_unlock(parent.dentry->d_inode);
373 path_put(&parent);
374 } else {
375 *path = parent;
376 }
377
378 return (dentry);
379 }
380
381 /* Based on do_unlinkat() from linux/fs/namei.c */
382 int
383 vn_remove(const char *path, uio_seg_t seg, int flags)
384 {
385 struct dentry *dentry;
386 struct path parent;
387 struct inode *inode = NULL;
388 int rc = 0;
389 SENTRY;
390
391 ASSERT(seg == UIO_SYSSPACE);
392 ASSERT(flags == RMFILE);
393
394 dentry = spl_kern_path_locked(path, &parent);
395 rc = PTR_ERR(dentry);
396 if (!IS_ERR(dentry)) {
397 if (parent.dentry->d_name.name[parent.dentry->d_name.len])
398 SGOTO(slashes, rc = 0);
399
400 inode = dentry->d_inode;
401 if (inode)
402 atomic_inc(&inode->i_count);
403 else
404 SGOTO(slashes, rc = 0);
405
406 #ifdef HAVE_2ARGS_VFS_UNLINK
407 rc = vfs_unlink(parent.dentry->d_inode, dentry);
408 #else
409 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
410 #endif /* HAVE_2ARGS_VFS_UNLINK */
411 exit1:
412 dput(dentry);
413 } else {
414 return (-rc);
415 }
416
417 spl_inode_unlock(parent.dentry->d_inode);
418 if (inode)
419 iput(inode); /* truncate the inode here */
420
421 path_put(&parent);
422 SRETURN(-rc);
423
424 slashes:
425 rc = !dentry->d_inode ? -ENOENT :
426 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
427 SGOTO(exit1, rc);
428 } /* vn_remove() */
429 EXPORT_SYMBOL(vn_remove);
430
431 /* Based on do_rename() from linux/fs/namei.c */
432 int
433 vn_rename(const char *oldname, const char *newname, int x1)
434 {
435 struct dentry *old_dir, *new_dir;
436 struct dentry *old_dentry, *new_dentry;
437 struct dentry *trap;
438 struct path old_parent, new_parent;
439 int rc = 0;
440 SENTRY;
441
442 old_dentry = spl_kern_path_locked(oldname, &old_parent);
443 if (IS_ERR(old_dentry))
444 SGOTO(exit, rc = PTR_ERR(old_dentry));
445
446 spl_inode_unlock(old_parent.dentry->d_inode);
447
448 new_dentry = spl_kern_path_locked(newname, &new_parent);
449 if (IS_ERR(new_dentry))
450 SGOTO(exit2, rc = PTR_ERR(new_dentry));
451
452 spl_inode_unlock(new_parent.dentry->d_inode);
453
454 rc = -EXDEV;
455 if (old_parent.mnt != new_parent.mnt)
456 SGOTO(exit3, rc);
457
458 old_dir = old_parent.dentry;
459 new_dir = new_parent.dentry;
460 trap = lock_rename(new_dir, old_dir);
461
462 /* source should not be ancestor of target */
463 rc = -EINVAL;
464 if (old_dentry == trap)
465 SGOTO(exit4, rc);
466
467 /* target should not be an ancestor of source */
468 rc = -ENOTEMPTY;
469 if (new_dentry == trap)
470 SGOTO(exit4, rc);
471
472 /* source must exist */
473 rc = -ENOENT;
474 if (!old_dentry->d_inode)
475 SGOTO(exit4, rc);
476
477 /* unless the source is a directory trailing slashes give -ENOTDIR */
478 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
479 rc = -ENOTDIR;
480 if (old_dentry->d_name.name[old_dentry->d_name.len])
481 SGOTO(exit4, rc);
482 if (new_dentry->d_name.name[new_dentry->d_name.len])
483 SGOTO(exit4, rc);
484 }
485
486 #if defined(HAVE_4ARGS_VFS_RENAME)
487 rc = vfs_rename(old_dir->d_inode, old_dentry,
488 new_dir->d_inode, new_dentry);
489 #elif defined(HAVE_5ARGS_VFS_RENAME)
490 rc = vfs_rename(old_dir->d_inode, old_dentry,
491 new_dir->d_inode, new_dentry, NULL);
492 #else
493 rc = vfs_rename(old_dir->d_inode, old_dentry,
494 new_dir->d_inode, new_dentry, NULL, 0);
495 #endif
496 exit4:
497 unlock_rename(new_dir, old_dir);
498 exit3:
499 dput(new_dentry);
500 path_put(&new_parent);
501 exit2:
502 dput(old_dentry);
503 path_put(&old_parent);
504 exit:
505 SRETURN(-rc);
506 }
507 EXPORT_SYMBOL(vn_rename);
508
509 int
510 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
511 {
512 struct file *fp;
513 struct kstat stat;
514 int rc;
515 SENTRY;
516
517 ASSERT(vp);
518 ASSERT(vp->v_file);
519 ASSERT(vap);
520
521 fp = vp->v_file;
522
523 #ifdef HAVE_2ARGS_VFS_GETATTR
524 rc = vfs_getattr(&fp->f_path, &stat);
525 #else
526 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
527 #endif
528 if (rc)
529 SRETURN(-rc);
530
531 vap->va_type = vn_mode_to_vtype(stat.mode);
532 vap->va_mode = stat.mode;
533 vap->va_uid = KUID_TO_SUID(stat.uid);
534 vap->va_gid = KGID_TO_SGID(stat.gid);
535 vap->va_fsid = 0;
536 vap->va_nodeid = stat.ino;
537 vap->va_nlink = stat.nlink;
538 vap->va_size = stat.size;
539 vap->va_blksize = stat.blksize;
540 vap->va_atime = stat.atime;
541 vap->va_mtime = stat.mtime;
542 vap->va_ctime = stat.ctime;
543 vap->va_rdev = stat.rdev;
544 vap->va_nblocks = stat.blocks;
545
546 SRETURN(0);
547 }
548 EXPORT_SYMBOL(vn_getattr);
549
550 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
551 {
552 int datasync = 0;
553 SENTRY;
554
555 ASSERT(vp);
556 ASSERT(vp->v_file);
557
558 if (flags & FDSYNC)
559 datasync = 1;
560
561 SRETURN(-spl_filp_fsync(vp->v_file, datasync));
562 } /* vn_fsync() */
563 EXPORT_SYMBOL(vn_fsync);
564
565 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
566 offset_t offset, void *x6, void *x7)
567 {
568 int error = EOPNOTSUPP;
569 SENTRY;
570
571 if (cmd != F_FREESP || bfp->l_whence != 0)
572 SRETURN(EOPNOTSUPP);
573
574 ASSERT(vp);
575 ASSERT(vp->v_file);
576 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
577
578 #ifdef FALLOC_FL_PUNCH_HOLE
579 /*
580 * When supported by the underlying file system preferentially
581 * use the fallocate() callback to preallocate the space.
582 */
583 error = -spl_filp_fallocate(vp->v_file,
584 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
585 bfp->l_start, bfp->l_len);
586 if (error == 0)
587 SRETURN(0);
588 #endif
589
590 #ifdef HAVE_INODE_TRUNCATE_RANGE
591 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
592 vp->v_file->f_dentry->d_inode->i_op &&
593 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
594 off_t end = bfp->l_start + bfp->l_len;
595 /*
596 * Judging from the code in shmem_truncate_range(),
597 * it seems the kernel expects the end offset to be
598 * inclusive and aligned to the end of a page.
599 */
600 if (end % PAGE_SIZE != 0) {
601 end &= ~(off_t)(PAGE_SIZE - 1);
602 if (end <= bfp->l_start)
603 SRETURN(0);
604 }
605 --end;
606
607 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
608 vp->v_file->f_dentry->d_inode,
609 bfp->l_start, end
610 );
611 SRETURN(0);
612 }
613 #endif
614
615 SRETURN(error);
616 }
617 EXPORT_SYMBOL(vn_space);
618
619 /* Function must be called while holding the vn_file_lock */
620 static file_t *
621 file_find(int fd)
622 {
623 file_t *fp;
624
625 ASSERT(spin_is_locked(&vn_file_lock));
626
627 list_for_each_entry(fp, &vn_file_list, f_list) {
628 if (fd == fp->f_fd && fp->f_task == current) {
629 ASSERT(atomic_read(&fp->f_ref) != 0);
630 return fp;
631 }
632 }
633
634 return NULL;
635 } /* file_find() */
636
637 file_t *
638 vn_getf(int fd)
639 {
640 struct kstat stat;
641 struct file *lfp;
642 file_t *fp;
643 vnode_t *vp;
644 int rc = 0;
645 SENTRY;
646
647 /* Already open just take an extra reference */
648 spin_lock(&vn_file_lock);
649
650 fp = file_find(fd);
651 if (fp) {
652 atomic_inc(&fp->f_ref);
653 spin_unlock(&vn_file_lock);
654 SRETURN(fp);
655 }
656
657 spin_unlock(&vn_file_lock);
658
659 /* File was not yet opened create the object and setup */
660 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
661 if (fp == NULL)
662 SGOTO(out, rc);
663
664 mutex_enter(&fp->f_lock);
665
666 fp->f_fd = fd;
667 fp->f_task = current;
668 fp->f_offset = 0;
669 atomic_inc(&fp->f_ref);
670
671 lfp = fget(fd);
672 if (lfp == NULL)
673 SGOTO(out_mutex, rc);
674
675 vp = vn_alloc(KM_SLEEP);
676 if (vp == NULL)
677 SGOTO(out_fget, rc);
678
679 #ifdef HAVE_2ARGS_VFS_GETATTR
680 rc = vfs_getattr(&lfp->f_path, &stat);
681 #else
682 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
683 #endif
684 if (rc)
685 SGOTO(out_vnode, rc);
686
687 mutex_enter(&vp->v_lock);
688 vp->v_type = vn_mode_to_vtype(stat.mode);
689 vp->v_file = lfp;
690 mutex_exit(&vp->v_lock);
691
692 fp->f_vnode = vp;
693 fp->f_file = lfp;
694
695 /* Put it on the tracking list */
696 spin_lock(&vn_file_lock);
697 list_add(&fp->f_list, &vn_file_list);
698 spin_unlock(&vn_file_lock);
699
700 mutex_exit(&fp->f_lock);
701 SRETURN(fp);
702
703 out_vnode:
704 vn_free(vp);
705 out_fget:
706 fput(lfp);
707 out_mutex:
708 mutex_exit(&fp->f_lock);
709 kmem_cache_free(vn_file_cache, fp);
710 out:
711 SRETURN(NULL);
712 } /* getf() */
713 EXPORT_SYMBOL(getf);
714
715 static void releasef_locked(file_t *fp)
716 {
717 ASSERT(fp->f_file);
718 ASSERT(fp->f_vnode);
719
720 /* Unlinked from list, no refs, safe to free outside mutex */
721 fput(fp->f_file);
722 vn_free(fp->f_vnode);
723
724 kmem_cache_free(vn_file_cache, fp);
725 }
726
727 void
728 vn_releasef(int fd)
729 {
730 file_t *fp;
731 SENTRY;
732
733 spin_lock(&vn_file_lock);
734 fp = file_find(fd);
735 if (fp) {
736 atomic_dec(&fp->f_ref);
737 if (atomic_read(&fp->f_ref) > 0) {
738 spin_unlock(&vn_file_lock);
739 SEXIT;
740 return;
741 }
742
743 list_del(&fp->f_list);
744 releasef_locked(fp);
745 }
746 spin_unlock(&vn_file_lock);
747
748 SEXIT;
749 return;
750 } /* releasef() */
751 EXPORT_SYMBOL(releasef);
752
753 static void
754 #ifdef HAVE_SET_FS_PWD_WITH_CONST
755 vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
756 #else
757 vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
758 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
759 {
760 struct path old_pwd;
761
762 #ifdef HAVE_FS_STRUCT_SPINLOCK
763 spin_lock(&fs->lock);
764 old_pwd = fs->pwd;
765 fs->pwd = *path;
766 path_get(path);
767 spin_unlock(&fs->lock);
768 #else
769 write_lock(&fs->lock);
770 old_pwd = fs->pwd;
771 fs->pwd = *path;
772 path_get(path);
773 write_unlock(&fs->lock);
774 #endif /* HAVE_FS_STRUCT_SPINLOCK */
775
776 if (old_pwd.dentry)
777 path_put(&old_pwd);
778 }
779
780 int
781 vn_set_pwd(const char *filename)
782 {
783 struct path path;
784 mm_segment_t saved_fs;
785 int rc;
786 SENTRY;
787
788 /*
789 * user_path_dir() and __user_walk() both expect 'filename' to be
790 * a user space address so we must briefly increase the data segment
791 * size to ensure strncpy_from_user() does not fail with -EFAULT.
792 */
793 saved_fs = get_fs();
794 set_fs(get_ds());
795
796 rc = user_path_dir(filename, &path);
797 if (rc)
798 SGOTO(out, rc);
799
800 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
801 if (rc)
802 SGOTO(dput_and_out, rc);
803
804 vn_set_fs_pwd(current->fs, &path);
805
806 dput_and_out:
807 path_put(&path);
808 out:
809 set_fs(saved_fs);
810
811 SRETURN(-rc);
812 } /* vn_set_pwd() */
813 EXPORT_SYMBOL(vn_set_pwd);
814
815 static int
816 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
817 {
818 struct vnode *vp = buf;
819
820 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
821
822 return (0);
823 } /* vn_cache_constructor() */
824
825 static void
826 vn_cache_destructor(void *buf, void *cdrarg)
827 {
828 struct vnode *vp = buf;
829
830 mutex_destroy(&vp->v_lock);
831 } /* vn_cache_destructor() */
832
833 static int
834 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
835 {
836 file_t *fp = buf;
837
838 atomic_set(&fp->f_ref, 0);
839 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
840 INIT_LIST_HEAD(&fp->f_list);
841
842 return (0);
843 } /* file_cache_constructor() */
844
845 static void
846 vn_file_cache_destructor(void *buf, void *cdrarg)
847 {
848 file_t *fp = buf;
849
850 mutex_destroy(&fp->f_lock);
851 } /* vn_file_cache_destructor() */
852
853 int
854 spl_vn_init(void)
855 {
856 SENTRY;
857 vn_cache = kmem_cache_create("spl_vn_cache",
858 sizeof(struct vnode), 64,
859 vn_cache_constructor,
860 vn_cache_destructor,
861 NULL, NULL, NULL, KMC_KMEM);
862
863 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
864 sizeof(file_t), 64,
865 vn_file_cache_constructor,
866 vn_file_cache_destructor,
867 NULL, NULL, NULL, KMC_KMEM);
868 SRETURN(0);
869 } /* vn_init() */
870
871 void
872 spl_vn_fini(void)
873 {
874 file_t *fp, *next_fp;
875 int leaked = 0;
876 SENTRY;
877
878 spin_lock(&vn_file_lock);
879
880 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
881 list_del(&fp->f_list);
882 releasef_locked(fp);
883 leaked++;
884 }
885
886 spin_unlock(&vn_file_lock);
887
888 if (leaked > 0)
889 SWARN("Warning %d files leaked\n", leaked);
890
891 kmem_cache_destroy(vn_file_cache);
892 kmem_cache_destroy(vn_cache);
893
894 SEXIT;
895 return;
896 } /* vn_fini() */