]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-vnode.c
Prepare SPL repo to merge with ZFS repo
[mirror_spl.git] / module / spl / spl-vnode.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 */
26
27 #include <sys/cred.h>
28 #include <sys/vnode.h>
29 #include <sys/kmem_cache.h>
30 #include <linux/falloc.h>
31 #include <linux/fs.h>
32 #include <linux/uaccess.h>
33 #ifdef HAVE_FDTABLE_HEADER
34 #include <linux/fdtable.h>
35 #endif
36
37 vnode_t *rootdir = (vnode_t *)0xabcd1234;
38 EXPORT_SYMBOL(rootdir);
39
40 static spl_kmem_cache_t *vn_cache;
41 static spl_kmem_cache_t *vn_file_cache;
42
43 static DEFINE_SPINLOCK(vn_file_lock);
44 static LIST_HEAD(vn_file_list);
45
46 static int
47 spl_filp_fallocate(struct file *fp, int mode, loff_t offset, loff_t len)
48 {
49 int error = -EOPNOTSUPP;
50
51 #ifdef HAVE_FILE_FALLOCATE
52 if (fp->f_op->fallocate)
53 error = fp->f_op->fallocate(fp, mode, offset, len);
54 #else
55 #ifdef HAVE_INODE_FALLOCATE
56 if (fp->f_dentry && fp->f_dentry->d_inode &&
57 fp->f_dentry->d_inode->i_op->fallocate)
58 error = fp->f_dentry->d_inode->i_op->fallocate(
59 fp->f_dentry->d_inode, mode, offset, len);
60 #endif /* HAVE_INODE_FALLOCATE */
61 #endif /* HAVE_FILE_FALLOCATE */
62
63 return (error);
64 }
65
66 static int
67 spl_filp_fsync(struct file *fp, int sync)
68 {
69 #ifdef HAVE_2ARGS_VFS_FSYNC
70 return (vfs_fsync(fp, sync));
71 #else
72 return (vfs_fsync(fp, (fp)->f_dentry, sync));
73 #endif /* HAVE_2ARGS_VFS_FSYNC */
74 }
75
76 static ssize_t
77 spl_kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
78 {
79 #if defined(HAVE_KERNEL_WRITE_PPOS)
80 return (kernel_write(file, buf, count, pos));
81 #else
82 mm_segment_t saved_fs;
83 ssize_t ret;
84
85 saved_fs = get_fs();
86 set_fs(get_ds());
87
88 ret = vfs_write(file, (__force const char __user *)buf, count, pos);
89
90 set_fs(saved_fs);
91
92 return (ret);
93 #endif
94 }
95
96 static ssize_t
97 spl_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
98 {
99 #if defined(HAVE_KERNEL_READ_PPOS)
100 return (kernel_read(file, buf, count, pos));
101 #else
102 mm_segment_t saved_fs;
103 ssize_t ret;
104
105 saved_fs = get_fs();
106 set_fs(get_ds());
107
108 ret = vfs_read(file, (void __user *)buf, count, pos);
109
110 set_fs(saved_fs);
111
112 return (ret);
113 #endif
114 }
115
116 vtype_t
117 vn_mode_to_vtype(mode_t mode)
118 {
119 if (S_ISREG(mode))
120 return (VREG);
121
122 if (S_ISDIR(mode))
123 return (VDIR);
124
125 if (S_ISCHR(mode))
126 return (VCHR);
127
128 if (S_ISBLK(mode))
129 return (VBLK);
130
131 if (S_ISFIFO(mode))
132 return (VFIFO);
133
134 if (S_ISLNK(mode))
135 return (VLNK);
136
137 if (S_ISSOCK(mode))
138 return (VSOCK);
139
140 return (VNON);
141 } /* vn_mode_to_vtype() */
142 EXPORT_SYMBOL(vn_mode_to_vtype);
143
144 mode_t
145 vn_vtype_to_mode(vtype_t vtype)
146 {
147 if (vtype == VREG)
148 return (S_IFREG);
149
150 if (vtype == VDIR)
151 return (S_IFDIR);
152
153 if (vtype == VCHR)
154 return (S_IFCHR);
155
156 if (vtype == VBLK)
157 return (S_IFBLK);
158
159 if (vtype == VFIFO)
160 return (S_IFIFO);
161
162 if (vtype == VLNK)
163 return (S_IFLNK);
164
165 if (vtype == VSOCK)
166 return (S_IFSOCK);
167
168 return (VNON);
169 } /* vn_vtype_to_mode() */
170 EXPORT_SYMBOL(vn_vtype_to_mode);
171
172 vnode_t *
173 vn_alloc(int flag)
174 {
175 vnode_t *vp;
176
177 vp = kmem_cache_alloc(vn_cache, flag);
178 if (vp != NULL) {
179 vp->v_file = NULL;
180 vp->v_type = 0;
181 }
182
183 return (vp);
184 } /* vn_alloc() */
185 EXPORT_SYMBOL(vn_alloc);
186
187 void
188 vn_free(vnode_t *vp)
189 {
190 kmem_cache_free(vn_cache, vp);
191 } /* vn_free() */
192 EXPORT_SYMBOL(vn_free);
193
194 int
195 vn_open(const char *path, uio_seg_t seg, int flags, int mode, vnode_t **vpp,
196 int x1, void *x2)
197 {
198 struct file *fp;
199 struct kstat stat;
200 int rc, saved_umask = 0;
201 gfp_t saved_gfp;
202 vnode_t *vp;
203
204 ASSERT(flags & (FWRITE | FREAD));
205 ASSERT(seg == UIO_SYSSPACE);
206 ASSERT(vpp);
207 *vpp = NULL;
208
209 if (!(flags & FCREAT) && (flags & FWRITE))
210 flags |= FEXCL;
211
212 /*
213 * Note for filp_open() the two low bits must be remapped to mean:
214 * 01 - read-only -> 00 read-only
215 * 10 - write-only -> 01 write-only
216 * 11 - read-write -> 10 read-write
217 */
218 flags--;
219
220 if (flags & FCREAT)
221 saved_umask = xchg(&current->fs->umask, 0);
222
223 fp = filp_open(path, flags, mode);
224
225 if (flags & FCREAT)
226 (void) xchg(&current->fs->umask, saved_umask);
227
228 if (IS_ERR(fp))
229 return (-PTR_ERR(fp));
230
231 #if defined(HAVE_4ARGS_VFS_GETATTR)
232 rc = vfs_getattr(&fp->f_path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
233 #elif defined(HAVE_2ARGS_VFS_GETATTR)
234 rc = vfs_getattr(&fp->f_path, &stat);
235 #else
236 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
237 #endif
238 if (rc) {
239 filp_close(fp, 0);
240 return (-rc);
241 }
242
243 vp = vn_alloc(KM_SLEEP);
244 if (!vp) {
245 filp_close(fp, 0);
246 return (ENOMEM);
247 }
248
249 saved_gfp = mapping_gfp_mask(fp->f_mapping);
250 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
251
252 mutex_enter(&vp->v_lock);
253 vp->v_type = vn_mode_to_vtype(stat.mode);
254 vp->v_file = fp;
255 vp->v_gfp_mask = saved_gfp;
256 *vpp = vp;
257 mutex_exit(&vp->v_lock);
258
259 return (0);
260 } /* vn_open() */
261 EXPORT_SYMBOL(vn_open);
262
263 int
264 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
265 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
266 {
267 char *realpath;
268 int len, rc;
269
270 ASSERT(vp == rootdir);
271
272 len = strlen(path) + 2;
273 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
274 if (!realpath)
275 return (ENOMEM);
276
277 (void) snprintf(realpath, len, "/%s", path);
278 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
279 kfree(realpath);
280
281 return (rc);
282 } /* vn_openat() */
283 EXPORT_SYMBOL(vn_openat);
284
285 int
286 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
287 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
288 {
289 struct file *fp = vp->v_file;
290 loff_t offset = off;
291 int rc;
292
293 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
294 ASSERT(seg == UIO_SYSSPACE);
295 ASSERT((ioflag & ~FAPPEND) == 0);
296
297 if (ioflag & FAPPEND)
298 offset = fp->f_pos;
299
300 if (uio & UIO_WRITE)
301 rc = spl_kernel_write(fp, addr, len, &offset);
302 else
303 rc = spl_kernel_read(fp, addr, len, &offset);
304
305 fp->f_pos = offset;
306
307 if (rc < 0)
308 return (-rc);
309
310 if (residp) {
311 *residp = len - rc;
312 } else {
313 if (rc != len)
314 return (EIO);
315 }
316
317 return (0);
318 } /* vn_rdwr() */
319 EXPORT_SYMBOL(vn_rdwr);
320
321 int
322 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
323 {
324 int rc;
325
326 ASSERT(vp);
327 ASSERT(vp->v_file);
328
329 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
330 rc = filp_close(vp->v_file, 0);
331 vn_free(vp);
332
333 return (-rc);
334 } /* vn_close() */
335 EXPORT_SYMBOL(vn_close);
336
337 /*
338 * vn_seek() does not actually seek it only performs bounds checking on the
339 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
340 * anything more serious.
341 */
342 int
343 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
344 {
345 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
346 }
347 EXPORT_SYMBOL(vn_seek);
348
349 int
350 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
351 {
352 struct file *fp;
353 struct kstat stat;
354 int rc;
355
356 ASSERT(vp);
357 ASSERT(vp->v_file);
358 ASSERT(vap);
359
360 fp = vp->v_file;
361
362 #if defined(HAVE_4ARGS_VFS_GETATTR)
363 rc = vfs_getattr(&fp->f_path, &stat, STATX_BASIC_STATS,
364 AT_STATX_SYNC_AS_STAT);
365 #elif defined(HAVE_2ARGS_VFS_GETATTR)
366 rc = vfs_getattr(&fp->f_path, &stat);
367 #else
368 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
369 #endif
370 if (rc)
371 return (-rc);
372
373 vap->va_type = vn_mode_to_vtype(stat.mode);
374 vap->va_mode = stat.mode;
375 vap->va_uid = KUID_TO_SUID(stat.uid);
376 vap->va_gid = KGID_TO_SGID(stat.gid);
377 vap->va_fsid = 0;
378 vap->va_nodeid = stat.ino;
379 vap->va_nlink = stat.nlink;
380 vap->va_size = stat.size;
381 vap->va_blksize = stat.blksize;
382 vap->va_atime = stat.atime;
383 vap->va_mtime = stat.mtime;
384 vap->va_ctime = stat.ctime;
385 vap->va_rdev = stat.rdev;
386 vap->va_nblocks = stat.blocks;
387
388 return (0);
389 }
390 EXPORT_SYMBOL(vn_getattr);
391
392 int
393 vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
394 {
395 int datasync = 0;
396 int error;
397 int fstrans;
398
399 ASSERT(vp);
400 ASSERT(vp->v_file);
401
402 if (flags & FDSYNC)
403 datasync = 1;
404
405 /*
406 * May enter XFS which generates a warning when PF_FSTRANS is set.
407 * To avoid this the flag is cleared over vfs_sync() and then reset.
408 */
409 fstrans = __spl_pf_fstrans_check();
410 if (fstrans)
411 current->flags &= ~(__SPL_PF_FSTRANS);
412
413 error = -spl_filp_fsync(vp->v_file, datasync);
414 if (fstrans)
415 current->flags |= __SPL_PF_FSTRANS;
416
417 return (error);
418 } /* vn_fsync() */
419 EXPORT_SYMBOL(vn_fsync);
420
421 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
422 offset_t offset, void *x6, void *x7)
423 {
424 int error = EOPNOTSUPP;
425 #ifdef FALLOC_FL_PUNCH_HOLE
426 int fstrans;
427 #endif
428
429 if (cmd != F_FREESP || bfp->l_whence != 0)
430 return (EOPNOTSUPP);
431
432 ASSERT(vp);
433 ASSERT(vp->v_file);
434 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
435
436 #ifdef FALLOC_FL_PUNCH_HOLE
437 /*
438 * May enter XFS which generates a warning when PF_FSTRANS is set.
439 * To avoid this the flag is cleared over vfs_sync() and then reset.
440 */
441 fstrans = __spl_pf_fstrans_check();
442 if (fstrans)
443 current->flags &= ~(__SPL_PF_FSTRANS);
444
445 /*
446 * When supported by the underlying file system preferentially
447 * use the fallocate() callback to preallocate the space.
448 */
449 error = -spl_filp_fallocate(vp->v_file,
450 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
451 bfp->l_start, bfp->l_len);
452
453 if (fstrans)
454 current->flags |= __SPL_PF_FSTRANS;
455
456 if (error == 0)
457 return (0);
458 #endif
459
460 #ifdef HAVE_INODE_TRUNCATE_RANGE
461 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
462 vp->v_file->f_dentry->d_inode->i_op &&
463 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
464 off_t end = bfp->l_start + bfp->l_len;
465 /*
466 * Judging from the code in shmem_truncate_range(),
467 * it seems the kernel expects the end offset to be
468 * inclusive and aligned to the end of a page.
469 */
470 if (end % PAGE_SIZE != 0) {
471 end &= ~(off_t)(PAGE_SIZE - 1);
472 if (end <= bfp->l_start)
473 return (0);
474 }
475 --end;
476
477 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
478 vp->v_file->f_dentry->d_inode, bfp->l_start, end);
479
480 return (0);
481 }
482 #endif
483
484 return (error);
485 }
486 EXPORT_SYMBOL(vn_space);
487
488 /* Function must be called while holding the vn_file_lock */
489 static file_t *
490 file_find(int fd, struct task_struct *task)
491 {
492 file_t *fp;
493
494 list_for_each_entry(fp, &vn_file_list, f_list) {
495 if (fd == fp->f_fd && fp->f_task == task) {
496 ASSERT(atomic_read(&fp->f_ref) != 0);
497 return (fp);
498 }
499 }
500
501 return (NULL);
502 } /* file_find() */
503
504 file_t *
505 vn_getf(int fd)
506 {
507 struct kstat stat;
508 struct file *lfp;
509 file_t *fp;
510 vnode_t *vp;
511 int rc = 0;
512
513 if (fd < 0)
514 return (NULL);
515
516 /* Already open just take an extra reference */
517 spin_lock(&vn_file_lock);
518
519 fp = file_find(fd, current);
520 if (fp) {
521 lfp = fget(fd);
522 fput(fp->f_file);
523 /*
524 * areleasef() can cause us to see a stale reference when
525 * userspace has reused a file descriptor before areleasef()
526 * has run. fput() the stale reference and replace it. We
527 * retain the original reference count such that the concurrent
528 * areleasef() will decrement its reference and terminate.
529 */
530 if (lfp != fp->f_file) {
531 fp->f_file = lfp;
532 fp->f_vnode->v_file = lfp;
533 }
534 atomic_inc(&fp->f_ref);
535 spin_unlock(&vn_file_lock);
536 return (fp);
537 }
538
539 spin_unlock(&vn_file_lock);
540
541 /* File was not yet opened create the object and setup */
542 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
543 if (fp == NULL)
544 goto out;
545
546 mutex_enter(&fp->f_lock);
547
548 fp->f_fd = fd;
549 fp->f_task = current;
550 fp->f_offset = 0;
551 atomic_inc(&fp->f_ref);
552
553 lfp = fget(fd);
554 if (lfp == NULL)
555 goto out_mutex;
556
557 vp = vn_alloc(KM_SLEEP);
558 if (vp == NULL)
559 goto out_fget;
560
561 #if defined(HAVE_4ARGS_VFS_GETATTR)
562 rc = vfs_getattr(&lfp->f_path, &stat, STATX_TYPE,
563 AT_STATX_SYNC_AS_STAT);
564 #elif defined(HAVE_2ARGS_VFS_GETATTR)
565 rc = vfs_getattr(&lfp->f_path, &stat);
566 #else
567 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
568 #endif
569 if (rc)
570 goto out_vnode;
571
572 mutex_enter(&vp->v_lock);
573 vp->v_type = vn_mode_to_vtype(stat.mode);
574 vp->v_file = lfp;
575 mutex_exit(&vp->v_lock);
576
577 fp->f_vnode = vp;
578 fp->f_file = lfp;
579
580 /* Put it on the tracking list */
581 spin_lock(&vn_file_lock);
582 list_add(&fp->f_list, &vn_file_list);
583 spin_unlock(&vn_file_lock);
584
585 mutex_exit(&fp->f_lock);
586 return (fp);
587
588 out_vnode:
589 vn_free(vp);
590 out_fget:
591 fput(lfp);
592 out_mutex:
593 mutex_exit(&fp->f_lock);
594 kmem_cache_free(vn_file_cache, fp);
595 out:
596 return (NULL);
597 } /* getf() */
598 EXPORT_SYMBOL(getf);
599
600 static void releasef_locked(file_t *fp)
601 {
602 ASSERT(fp->f_file);
603 ASSERT(fp->f_vnode);
604
605 /* Unlinked from list, no refs, safe to free outside mutex */
606 fput(fp->f_file);
607 vn_free(fp->f_vnode);
608
609 kmem_cache_free(vn_file_cache, fp);
610 }
611
612 void
613 vn_releasef(int fd)
614 {
615 areleasef(fd, P_FINFO(current));
616 }
617 EXPORT_SYMBOL(releasef);
618
619 void
620 vn_areleasef(int fd, uf_info_t *fip)
621 {
622 file_t *fp;
623 struct task_struct *task = (struct task_struct *)fip;
624
625 if (fd < 0)
626 return;
627
628 spin_lock(&vn_file_lock);
629 fp = file_find(fd, task);
630 if (fp) {
631 atomic_dec(&fp->f_ref);
632 if (atomic_read(&fp->f_ref) > 0) {
633 spin_unlock(&vn_file_lock);
634 return;
635 }
636
637 list_del(&fp->f_list);
638 releasef_locked(fp);
639 }
640 spin_unlock(&vn_file_lock);
641 } /* releasef() */
642 EXPORT_SYMBOL(areleasef);
643
644
645 static void
646 #ifdef HAVE_SET_FS_PWD_WITH_CONST
647 vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
648 #else
649 vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
650 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
651 {
652 struct path old_pwd;
653
654 #ifdef HAVE_FS_STRUCT_SPINLOCK
655 spin_lock(&fs->lock);
656 old_pwd = fs->pwd;
657 fs->pwd = *path;
658 path_get(path);
659 spin_unlock(&fs->lock);
660 #else
661 write_lock(&fs->lock);
662 old_pwd = fs->pwd;
663 fs->pwd = *path;
664 path_get(path);
665 write_unlock(&fs->lock);
666 #endif /* HAVE_FS_STRUCT_SPINLOCK */
667
668 if (old_pwd.dentry)
669 path_put(&old_pwd);
670 }
671
672 int
673 vn_set_pwd(const char *filename)
674 {
675 struct path path;
676 mm_segment_t saved_fs;
677 int rc;
678
679 /*
680 * user_path_dir() and __user_walk() both expect 'filename' to be
681 * a user space address so we must briefly increase the data segment
682 * size to ensure strncpy_from_user() does not fail with -EFAULT.
683 */
684 saved_fs = get_fs();
685 set_fs(get_ds());
686
687 rc = user_path_dir(filename, &path);
688 if (rc)
689 goto out;
690
691 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
692 if (rc)
693 goto dput_and_out;
694
695 vn_set_fs_pwd(current->fs, &path);
696
697 dput_and_out:
698 path_put(&path);
699 out:
700 set_fs(saved_fs);
701
702 return (-rc);
703 } /* vn_set_pwd() */
704 EXPORT_SYMBOL(vn_set_pwd);
705
706 static int
707 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
708 {
709 struct vnode *vp = buf;
710
711 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
712
713 return (0);
714 } /* vn_cache_constructor() */
715
716 static void
717 vn_cache_destructor(void *buf, void *cdrarg)
718 {
719 struct vnode *vp = buf;
720
721 mutex_destroy(&vp->v_lock);
722 } /* vn_cache_destructor() */
723
724 static int
725 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
726 {
727 file_t *fp = buf;
728
729 atomic_set(&fp->f_ref, 0);
730 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
731 INIT_LIST_HEAD(&fp->f_list);
732
733 return (0);
734 } /* vn_file_cache_constructor() */
735
736 static void
737 vn_file_cache_destructor(void *buf, void *cdrarg)
738 {
739 file_t *fp = buf;
740
741 mutex_destroy(&fp->f_lock);
742 } /* vn_file_cache_destructor() */
743
744 int
745 spl_vn_init(void)
746 {
747 vn_cache = kmem_cache_create("spl_vn_cache",
748 sizeof (struct vnode), 64, vn_cache_constructor,
749 vn_cache_destructor, NULL, NULL, NULL, 0);
750
751 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
752 sizeof (file_t), 64, vn_file_cache_constructor,
753 vn_file_cache_destructor, NULL, NULL, NULL, 0);
754
755 return (0);
756 } /* spl_vn_init() */
757
758 void
759 spl_vn_fini(void)
760 {
761 file_t *fp, *next_fp;
762 int leaked = 0;
763
764 spin_lock(&vn_file_lock);
765
766 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
767 list_del(&fp->f_list);
768 releasef_locked(fp);
769 leaked++;
770 }
771
772 spin_unlock(&vn_file_lock);
773
774 if (leaked > 0)
775 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
776
777 kmem_cache_destroy(vn_file_cache);
778 kmem_cache_destroy(vn_cache);
779 } /* spl_vn_fini() */