]>
Commit | Line | Data |
---|---|---|
7bdf406d TG |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://zfsonlinux.org/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Vnode Implementation. | |
25 | \*****************************************************************************/ | |
26 | ||
27 | #include <sys/cred.h> | |
28 | #include <sys/vnode.h> | |
29 | #include <sys/kmem_cache.h> | |
30 | #include <linux/falloc.h> | |
31 | #include <linux/file_compat.h> | |
32 | ||
33 | vnode_t *rootdir = (vnode_t *)0xabcd1234; | |
34 | EXPORT_SYMBOL(rootdir); | |
35 | ||
36 | static spl_kmem_cache_t *vn_cache; | |
37 | static spl_kmem_cache_t *vn_file_cache; | |
38 | ||
39 | static DEFINE_SPINLOCK(vn_file_lock); | |
40 | static LIST_HEAD(vn_file_list); | |
41 | ||
42 | vtype_t | |
43 | vn_mode_to_vtype(mode_t mode) | |
44 | { | |
45 | if (S_ISREG(mode)) | |
46 | return VREG; | |
47 | ||
48 | if (S_ISDIR(mode)) | |
49 | return VDIR; | |
50 | ||
51 | if (S_ISCHR(mode)) | |
52 | return VCHR; | |
53 | ||
54 | if (S_ISBLK(mode)) | |
55 | return VBLK; | |
56 | ||
57 | if (S_ISFIFO(mode)) | |
58 | return VFIFO; | |
59 | ||
60 | if (S_ISLNK(mode)) | |
61 | return VLNK; | |
62 | ||
63 | if (S_ISSOCK(mode)) | |
64 | return VSOCK; | |
65 | ||
66 | if (S_ISCHR(mode)) | |
67 | return VCHR; | |
68 | ||
69 | return VNON; | |
70 | } /* vn_mode_to_vtype() */ | |
71 | EXPORT_SYMBOL(vn_mode_to_vtype); | |
72 | ||
73 | mode_t | |
74 | vn_vtype_to_mode(vtype_t vtype) | |
75 | { | |
76 | if (vtype == VREG) | |
77 | return S_IFREG; | |
78 | ||
79 | if (vtype == VDIR) | |
80 | return S_IFDIR; | |
81 | ||
82 | if (vtype == VCHR) | |
83 | return S_IFCHR; | |
84 | ||
85 | if (vtype == VBLK) | |
86 | return S_IFBLK; | |
87 | ||
88 | if (vtype == VFIFO) | |
89 | return S_IFIFO; | |
90 | ||
91 | if (vtype == VLNK) | |
92 | return S_IFLNK; | |
93 | ||
94 | if (vtype == VSOCK) | |
95 | return S_IFSOCK; | |
96 | ||
97 | return VNON; | |
98 | } /* vn_vtype_to_mode() */ | |
99 | EXPORT_SYMBOL(vn_vtype_to_mode); | |
100 | ||
101 | vnode_t * | |
102 | vn_alloc(int flag) | |
103 | { | |
104 | vnode_t *vp; | |
105 | ||
106 | vp = kmem_cache_alloc(vn_cache, flag); | |
107 | if (vp != NULL) { | |
108 | vp->v_file = NULL; | |
109 | vp->v_type = 0; | |
110 | } | |
111 | ||
112 | return (vp); | |
113 | } /* vn_alloc() */ | |
114 | EXPORT_SYMBOL(vn_alloc); | |
115 | ||
116 | void | |
117 | vn_free(vnode_t *vp) | |
118 | { | |
119 | kmem_cache_free(vn_cache, vp); | |
120 | } /* vn_free() */ | |
121 | EXPORT_SYMBOL(vn_free); | |
122 | ||
123 | int | |
124 | vn_open(const char *path, uio_seg_t seg, int flags, int mode, | |
125 | vnode_t **vpp, int x1, void *x2) | |
126 | { | |
127 | struct file *fp; | |
128 | struct kstat stat; | |
129 | int rc, saved_umask = 0; | |
130 | gfp_t saved_gfp; | |
131 | vnode_t *vp; | |
132 | ||
133 | ASSERT(flags & (FWRITE | FREAD)); | |
134 | ASSERT(seg == UIO_SYSSPACE); | |
135 | ASSERT(vpp); | |
136 | *vpp = NULL; | |
137 | ||
138 | if (!(flags & FCREAT) && (flags & FWRITE)) | |
139 | flags |= FEXCL; | |
140 | ||
141 | /* Note for filp_open() the two low bits must be remapped to mean: | |
142 | * 01 - read-only -> 00 read-only | |
143 | * 10 - write-only -> 01 write-only | |
144 | * 11 - read-write -> 10 read-write | |
145 | */ | |
146 | flags--; | |
147 | ||
148 | if (flags & FCREAT) | |
149 | saved_umask = xchg(¤t->fs->umask, 0); | |
150 | ||
151 | fp = filp_open(path, flags, mode); | |
152 | ||
153 | if (flags & FCREAT) | |
154 | (void)xchg(¤t->fs->umask, saved_umask); | |
155 | ||
156 | if (IS_ERR(fp)) | |
157 | return (-PTR_ERR(fp)); | |
158 | ||
159 | #ifdef HAVE_2ARGS_VFS_GETATTR | |
160 | rc = vfs_getattr(&fp->f_path, &stat); | |
161 | #else | |
162 | rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat); | |
163 | #endif | |
164 | if (rc) { | |
165 | filp_close(fp, 0); | |
166 | return (-rc); | |
167 | } | |
168 | ||
169 | vp = vn_alloc(KM_SLEEP); | |
170 | if (!vp) { | |
171 | filp_close(fp, 0); | |
172 | return (ENOMEM); | |
173 | } | |
174 | ||
175 | saved_gfp = mapping_gfp_mask(fp->f_mapping); | |
176 | mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS)); | |
177 | ||
178 | mutex_enter(&vp->v_lock); | |
179 | vp->v_type = vn_mode_to_vtype(stat.mode); | |
180 | vp->v_file = fp; | |
181 | vp->v_gfp_mask = saved_gfp; | |
182 | *vpp = vp; | |
183 | mutex_exit(&vp->v_lock); | |
184 | ||
185 | return (0); | |
186 | } /* vn_open() */ | |
187 | EXPORT_SYMBOL(vn_open); | |
188 | ||
189 | int | |
190 | vn_openat(const char *path, uio_seg_t seg, int flags, int mode, | |
191 | vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd) | |
192 | { | |
193 | char *realpath; | |
194 | int len, rc; | |
195 | ||
196 | ASSERT(vp == rootdir); | |
197 | ||
198 | len = strlen(path) + 2; | |
199 | realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP)); | |
200 | if (!realpath) | |
201 | return (ENOMEM); | |
202 | ||
203 | (void)snprintf(realpath, len, "/%s", path); | |
204 | rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2); | |
205 | kfree(realpath); | |
206 | ||
207 | return (rc); | |
208 | } /* vn_openat() */ | |
209 | EXPORT_SYMBOL(vn_openat); | |
210 | ||
211 | int | |
212 | vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off, | |
213 | uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp) | |
214 | { | |
215 | loff_t offset; | |
216 | mm_segment_t saved_fs; | |
217 | struct file *fp; | |
218 | int rc; | |
219 | ||
220 | ASSERT(uio == UIO_WRITE || uio == UIO_READ); | |
221 | ASSERT(vp); | |
222 | ASSERT(vp->v_file); | |
223 | ASSERT(seg == UIO_SYSSPACE); | |
224 | ASSERT((ioflag & ~FAPPEND) == 0); | |
0bd31011 | 225 | ASSERT(x2 == RLIM64_INFINITY); |
7bdf406d TG |
226 | |
227 | fp = vp->v_file; | |
228 | ||
229 | offset = off; | |
230 | if (ioflag & FAPPEND) | |
231 | offset = fp->f_pos; | |
232 | ||
233 | /* Writable user data segment must be briefly increased for this | |
234 | * process so we can use the user space read call paths to write | |
235 | * in to memory allocated by the kernel. */ | |
236 | saved_fs = get_fs(); | |
237 | set_fs(get_ds()); | |
238 | ||
239 | if (uio & UIO_WRITE) | |
240 | rc = vfs_write(fp, addr, len, &offset); | |
241 | else | |
242 | rc = vfs_read(fp, addr, len, &offset); | |
243 | ||
244 | set_fs(saved_fs); | |
245 | fp->f_pos = offset; | |
246 | ||
247 | if (rc < 0) | |
248 | return (-rc); | |
249 | ||
250 | if (residp) { | |
251 | *residp = len - rc; | |
252 | } else { | |
253 | if (rc != len) | |
254 | return (EIO); | |
255 | } | |
256 | ||
257 | return (0); | |
258 | } /* vn_rdwr() */ | |
259 | EXPORT_SYMBOL(vn_rdwr); | |
260 | ||
261 | int | |
262 | vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4) | |
263 | { | |
264 | int rc; | |
265 | ||
266 | ASSERT(vp); | |
267 | ASSERT(vp->v_file); | |
268 | ||
269 | mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask); | |
270 | rc = filp_close(vp->v_file, 0); | |
271 | vn_free(vp); | |
272 | ||
273 | return (-rc); | |
274 | } /* vn_close() */ | |
275 | EXPORT_SYMBOL(vn_close); | |
276 | ||
277 | /* vn_seek() does not actually seek it only performs bounds checking on the | |
278 | * proposed seek. We perform minimal checking and allow vn_rdwr() to catch | |
279 | * anything more serious. */ | |
280 | int | |
281 | vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct) | |
282 | { | |
283 | return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); | |
284 | } | |
285 | EXPORT_SYMBOL(vn_seek); | |
286 | ||
287 | /* | |
288 | * spl_basename() takes a NULL-terminated string s as input containing a path. | |
289 | * It returns a char pointer to a string and a length that describe the | |
290 | * basename of the path. If the basename is not "." or "/", it will be an index | |
291 | * into the string. While the string should be NULL terminated, the section | |
292 | * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and | |
293 | * CC0. Anyone wishing to reuse it in another codebase may pick either license. | |
294 | */ | |
295 | static void | |
296 | spl_basename(const char *s, const char **str, int *len) | |
297 | { | |
298 | size_t i, end; | |
299 | ||
300 | ASSERT(str); | |
301 | ASSERT(len); | |
302 | ||
303 | if (!s || !*s) { | |
304 | *str = "."; | |
305 | *len = 1; | |
306 | return; | |
307 | } | |
308 | ||
309 | i = strlen(s) - 1; | |
310 | ||
311 | while (i && s[i--] == '/'); | |
312 | ||
313 | if (i == 0) { | |
314 | *str = "/"; | |
315 | *len = 1; | |
316 | return; | |
317 | } | |
318 | ||
319 | end = i; | |
320 | ||
321 | for (end = i; i; i--) { | |
322 | if (s[i] == '/') { | |
323 | *str = &s[i+1]; | |
324 | *len = end - i + 1; | |
325 | return; | |
326 | } | |
327 | } | |
328 | ||
329 | *str = s; | |
330 | *len = end + 1; | |
331 | } | |
332 | ||
333 | static struct dentry * | |
334 | spl_kern_path_locked(const char *name, struct path *path) | |
335 | { | |
336 | struct path parent; | |
337 | struct dentry *dentry; | |
338 | const char *basename; | |
339 | int len; | |
340 | int rc; | |
341 | ||
342 | ASSERT(name); | |
343 | ASSERT(path); | |
344 | ||
345 | spl_basename(name, &basename, &len); | |
346 | ||
347 | /* We do not accept "." or ".." */ | |
348 | if (len <= 2 && basename[0] == '.') | |
349 | if (len == 1 || basename[1] == '.') | |
350 | return (ERR_PTR(-EACCES)); | |
351 | ||
352 | rc = kern_path(name, LOOKUP_PARENT, &parent); | |
353 | if (rc) | |
354 | return (ERR_PTR(rc)); | |
355 | ||
356 | /* use I_MUTEX_PARENT because vfs_unlink needs it */ | |
357 | spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT); | |
358 | ||
359 | dentry = lookup_one_len(basename, parent.dentry, len); | |
360 | if (IS_ERR(dentry)) { | |
361 | spl_inode_unlock(parent.dentry->d_inode); | |
362 | path_put(&parent); | |
363 | } else { | |
364 | *path = parent; | |
365 | } | |
366 | ||
367 | return (dentry); | |
368 | } | |
369 | ||
370 | /* Based on do_unlinkat() from linux/fs/namei.c */ | |
371 | int | |
372 | vn_remove(const char *path, uio_seg_t seg, int flags) | |
373 | { | |
374 | struct dentry *dentry; | |
375 | struct path parent; | |
376 | struct inode *inode = NULL; | |
377 | int rc = 0; | |
378 | ||
379 | ASSERT(seg == UIO_SYSSPACE); | |
380 | ASSERT(flags == RMFILE); | |
381 | ||
382 | dentry = spl_kern_path_locked(path, &parent); | |
383 | rc = PTR_ERR(dentry); | |
384 | if (!IS_ERR(dentry)) { | |
385 | if (parent.dentry->d_name.name[parent.dentry->d_name.len]) { | |
386 | rc = 0; | |
387 | goto slashes; | |
388 | } | |
389 | ||
390 | inode = dentry->d_inode; | |
391 | if (inode) { | |
392 | atomic_inc(&inode->i_count); | |
393 | } else { | |
394 | rc = 0; | |
395 | goto slashes; | |
396 | } | |
397 | ||
398 | #ifdef HAVE_2ARGS_VFS_UNLINK | |
399 | rc = vfs_unlink(parent.dentry->d_inode, dentry); | |
400 | #else | |
401 | rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL); | |
402 | #endif /* HAVE_2ARGS_VFS_UNLINK */ | |
403 | exit1: | |
404 | dput(dentry); | |
405 | } else { | |
406 | return (-rc); | |
407 | } | |
408 | ||
409 | spl_inode_unlock(parent.dentry->d_inode); | |
410 | if (inode) | |
411 | iput(inode); /* truncate the inode here */ | |
412 | ||
413 | path_put(&parent); | |
414 | return (-rc); | |
415 | ||
416 | slashes: | |
417 | rc = !dentry->d_inode ? -ENOENT : | |
418 | S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; | |
419 | goto exit1; | |
420 | } /* vn_remove() */ | |
421 | EXPORT_SYMBOL(vn_remove); | |
422 | ||
423 | /* Based on do_rename() from linux/fs/namei.c */ | |
424 | int | |
425 | vn_rename(const char *oldname, const char *newname, int x1) | |
426 | { | |
427 | struct dentry *old_dir, *new_dir; | |
428 | struct dentry *old_dentry, *new_dentry; | |
429 | struct dentry *trap; | |
430 | struct path old_parent, new_parent; | |
431 | int rc = 0; | |
432 | ||
433 | old_dentry = spl_kern_path_locked(oldname, &old_parent); | |
434 | if (IS_ERR(old_dentry)) { | |
435 | rc = PTR_ERR(old_dentry); | |
436 | goto exit; | |
437 | } | |
438 | ||
439 | spl_inode_unlock(old_parent.dentry->d_inode); | |
440 | ||
441 | new_dentry = spl_kern_path_locked(newname, &new_parent); | |
442 | if (IS_ERR(new_dentry)) { | |
443 | rc = PTR_ERR(new_dentry); | |
444 | goto exit2; | |
445 | } | |
446 | ||
447 | spl_inode_unlock(new_parent.dentry->d_inode); | |
448 | ||
449 | rc = -EXDEV; | |
450 | if (old_parent.mnt != new_parent.mnt) | |
451 | goto exit3; | |
452 | ||
453 | old_dir = old_parent.dentry; | |
454 | new_dir = new_parent.dentry; | |
455 | trap = lock_rename(new_dir, old_dir); | |
456 | ||
457 | /* source should not be ancestor of target */ | |
458 | rc = -EINVAL; | |
459 | if (old_dentry == trap) | |
460 | goto exit4; | |
461 | ||
462 | /* target should not be an ancestor of source */ | |
463 | rc = -ENOTEMPTY; | |
464 | if (new_dentry == trap) | |
465 | goto exit4; | |
466 | ||
467 | /* source must exist */ | |
468 | rc = -ENOENT; | |
469 | if (!old_dentry->d_inode) | |
470 | goto exit4; | |
471 | ||
472 | /* unless the source is a directory trailing slashes give -ENOTDIR */ | |
473 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) { | |
474 | rc = -ENOTDIR; | |
475 | if (old_dentry->d_name.name[old_dentry->d_name.len]) | |
476 | goto exit4; | |
477 | if (new_dentry->d_name.name[new_dentry->d_name.len]) | |
478 | goto exit4; | |
479 | } | |
480 | ||
481 | #if defined(HAVE_4ARGS_VFS_RENAME) | |
482 | rc = vfs_rename(old_dir->d_inode, old_dentry, | |
483 | new_dir->d_inode, new_dentry); | |
484 | #elif defined(HAVE_5ARGS_VFS_RENAME) | |
485 | rc = vfs_rename(old_dir->d_inode, old_dentry, | |
486 | new_dir->d_inode, new_dentry, NULL); | |
487 | #else | |
488 | rc = vfs_rename(old_dir->d_inode, old_dentry, | |
489 | new_dir->d_inode, new_dentry, NULL, 0); | |
490 | #endif | |
491 | exit4: | |
492 | unlock_rename(new_dir, old_dir); | |
493 | exit3: | |
494 | dput(new_dentry); | |
495 | path_put(&new_parent); | |
496 | exit2: | |
497 | dput(old_dentry); | |
498 | path_put(&old_parent); | |
499 | exit: | |
500 | return (-rc); | |
501 | } | |
502 | EXPORT_SYMBOL(vn_rename); | |
503 | ||
504 | int | |
505 | vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4) | |
506 | { | |
507 | struct file *fp; | |
508 | struct kstat stat; | |
509 | int rc; | |
510 | ||
511 | ASSERT(vp); | |
512 | ASSERT(vp->v_file); | |
513 | ASSERT(vap); | |
514 | ||
515 | fp = vp->v_file; | |
516 | ||
517 | #ifdef HAVE_2ARGS_VFS_GETATTR | |
518 | rc = vfs_getattr(&fp->f_path, &stat); | |
519 | #else | |
520 | rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat); | |
521 | #endif | |
522 | if (rc) | |
523 | return (-rc); | |
524 | ||
525 | vap->va_type = vn_mode_to_vtype(stat.mode); | |
526 | vap->va_mode = stat.mode; | |
527 | vap->va_uid = KUID_TO_SUID(stat.uid); | |
528 | vap->va_gid = KGID_TO_SGID(stat.gid); | |
529 | vap->va_fsid = 0; | |
530 | vap->va_nodeid = stat.ino; | |
531 | vap->va_nlink = stat.nlink; | |
532 | vap->va_size = stat.size; | |
533 | vap->va_blksize = stat.blksize; | |
534 | vap->va_atime = stat.atime; | |
535 | vap->va_mtime = stat.mtime; | |
536 | vap->va_ctime = stat.ctime; | |
537 | vap->va_rdev = stat.rdev; | |
538 | vap->va_nblocks = stat.blocks; | |
539 | ||
540 | return (0); | |
541 | } | |
542 | EXPORT_SYMBOL(vn_getattr); | |
543 | ||
544 | int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4) | |
545 | { | |
546 | int datasync = 0; | |
547 | int error; | |
548 | int fstrans; | |
549 | ||
550 | ASSERT(vp); | |
551 | ASSERT(vp->v_file); | |
552 | ||
553 | if (flags & FDSYNC) | |
554 | datasync = 1; | |
555 | ||
556 | /* | |
557 | * May enter XFS which generates a warning when PF_FSTRANS is set. | |
558 | * To avoid this the flag is cleared over vfs_sync() and then reset. | |
559 | */ | |
560 | fstrans = spl_fstrans_check(); | |
561 | if (fstrans) | |
562 | current->flags &= ~(PF_FSTRANS); | |
563 | ||
564 | error = -spl_filp_fsync(vp->v_file, datasync); | |
565 | if (fstrans) | |
566 | current->flags |= PF_FSTRANS; | |
567 | ||
568 | return (error); | |
569 | } /* vn_fsync() */ | |
570 | EXPORT_SYMBOL(vn_fsync); | |
571 | ||
572 | int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag, | |
573 | offset_t offset, void *x6, void *x7) | |
574 | { | |
575 | int error = EOPNOTSUPP; | |
576 | ||
577 | if (cmd != F_FREESP || bfp->l_whence != 0) | |
578 | return (EOPNOTSUPP); | |
579 | ||
580 | ASSERT(vp); | |
581 | ASSERT(vp->v_file); | |
582 | ASSERT(bfp->l_start >= 0 && bfp->l_len > 0); | |
583 | ||
584 | #ifdef FALLOC_FL_PUNCH_HOLE | |
585 | /* | |
586 | * When supported by the underlying file system preferentially | |
587 | * use the fallocate() callback to preallocate the space. | |
588 | */ | |
589 | error = -spl_filp_fallocate(vp->v_file, | |
590 | FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, | |
591 | bfp->l_start, bfp->l_len); | |
592 | if (error == 0) | |
593 | return (0); | |
594 | #endif | |
595 | ||
596 | #ifdef HAVE_INODE_TRUNCATE_RANGE | |
597 | if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode && | |
598 | vp->v_file->f_dentry->d_inode->i_op && | |
599 | vp->v_file->f_dentry->d_inode->i_op->truncate_range) { | |
600 | off_t end = bfp->l_start + bfp->l_len; | |
601 | /* | |
602 | * Judging from the code in shmem_truncate_range(), | |
603 | * it seems the kernel expects the end offset to be | |
604 | * inclusive and aligned to the end of a page. | |
605 | */ | |
606 | if (end % PAGE_SIZE != 0) { | |
607 | end &= ~(off_t)(PAGE_SIZE - 1); | |
608 | if (end <= bfp->l_start) | |
609 | return (0); | |
610 | } | |
611 | --end; | |
612 | ||
613 | vp->v_file->f_dentry->d_inode->i_op->truncate_range( | |
614 | vp->v_file->f_dentry->d_inode, | |
615 | bfp->l_start, end | |
616 | ); | |
617 | return (0); | |
618 | } | |
619 | #endif | |
620 | ||
621 | return (error); | |
622 | } | |
623 | EXPORT_SYMBOL(vn_space); | |
624 | ||
625 | /* Function must be called while holding the vn_file_lock */ | |
626 | static file_t * | |
627 | file_find(int fd, struct task_struct *task) | |
628 | { | |
629 | file_t *fp; | |
630 | ||
631 | ASSERT(spin_is_locked(&vn_file_lock)); | |
632 | ||
633 | list_for_each_entry(fp, &vn_file_list, f_list) { | |
634 | if (fd == fp->f_fd && fp->f_task == task) { | |
635 | ASSERT(atomic_read(&fp->f_ref) != 0); | |
636 | return fp; | |
637 | } | |
638 | } | |
639 | ||
640 | return NULL; | |
641 | } /* file_find() */ | |
642 | ||
643 | file_t * | |
644 | vn_getf(int fd) | |
645 | { | |
646 | struct kstat stat; | |
647 | struct file *lfp; | |
648 | file_t *fp; | |
649 | vnode_t *vp; | |
650 | int rc = 0; | |
651 | ||
652 | if (fd < 0) | |
653 | return (NULL); | |
654 | ||
655 | /* Already open just take an extra reference */ | |
656 | spin_lock(&vn_file_lock); | |
657 | ||
658 | fp = file_find(fd, current); | |
659 | if (fp) { | |
660 | atomic_inc(&fp->f_ref); | |
661 | spin_unlock(&vn_file_lock); | |
662 | return (fp); | |
663 | } | |
664 | ||
665 | spin_unlock(&vn_file_lock); | |
666 | ||
667 | /* File was not yet opened create the object and setup */ | |
668 | fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP); | |
669 | if (fp == NULL) | |
670 | goto out; | |
671 | ||
672 | mutex_enter(&fp->f_lock); | |
673 | ||
674 | fp->f_fd = fd; | |
675 | fp->f_task = current; | |
676 | fp->f_offset = 0; | |
677 | atomic_inc(&fp->f_ref); | |
678 | ||
679 | lfp = fget(fd); | |
680 | if (lfp == NULL) | |
681 | goto out_mutex; | |
682 | ||
683 | vp = vn_alloc(KM_SLEEP); | |
684 | if (vp == NULL) | |
685 | goto out_fget; | |
686 | ||
687 | #ifdef HAVE_2ARGS_VFS_GETATTR | |
688 | rc = vfs_getattr(&lfp->f_path, &stat); | |
689 | #else | |
690 | rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat); | |
691 | #endif | |
692 | if (rc) | |
693 | goto out_vnode; | |
694 | ||
695 | mutex_enter(&vp->v_lock); | |
696 | vp->v_type = vn_mode_to_vtype(stat.mode); | |
697 | vp->v_file = lfp; | |
698 | mutex_exit(&vp->v_lock); | |
699 | ||
700 | fp->f_vnode = vp; | |
701 | fp->f_file = lfp; | |
702 | ||
703 | /* Put it on the tracking list */ | |
704 | spin_lock(&vn_file_lock); | |
705 | list_add(&fp->f_list, &vn_file_list); | |
706 | spin_unlock(&vn_file_lock); | |
707 | ||
708 | mutex_exit(&fp->f_lock); | |
709 | return (fp); | |
710 | ||
711 | out_vnode: | |
712 | vn_free(vp); | |
713 | out_fget: | |
714 | fput(lfp); | |
715 | out_mutex: | |
716 | mutex_exit(&fp->f_lock); | |
717 | kmem_cache_free(vn_file_cache, fp); | |
718 | out: | |
719 | return (NULL); | |
720 | } /* getf() */ | |
721 | EXPORT_SYMBOL(getf); | |
722 | ||
723 | static void releasef_locked(file_t *fp) | |
724 | { | |
725 | ASSERT(fp->f_file); | |
726 | ASSERT(fp->f_vnode); | |
727 | ||
728 | /* Unlinked from list, no refs, safe to free outside mutex */ | |
729 | fput(fp->f_file); | |
730 | vn_free(fp->f_vnode); | |
731 | ||
732 | kmem_cache_free(vn_file_cache, fp); | |
733 | } | |
734 | ||
735 | void | |
736 | vn_releasef(int fd) | |
737 | { | |
738 | areleasef(fd, P_FINFO(current)); | |
739 | } | |
740 | EXPORT_SYMBOL(releasef); | |
741 | ||
742 | void | |
743 | vn_areleasef(int fd, uf_info_t *fip) | |
744 | { | |
745 | file_t *fp; | |
746 | struct task_struct *task = (struct task_struct *)fip; | |
747 | ||
748 | if (fd < 0) | |
749 | return; | |
750 | ||
751 | spin_lock(&vn_file_lock); | |
752 | fp = file_find(fd, task); | |
753 | if (fp) { | |
754 | atomic_dec(&fp->f_ref); | |
755 | if (atomic_read(&fp->f_ref) > 0) { | |
756 | spin_unlock(&vn_file_lock); | |
757 | return; | |
758 | } | |
759 | ||
760 | list_del(&fp->f_list); | |
761 | releasef_locked(fp); | |
762 | } | |
763 | spin_unlock(&vn_file_lock); | |
764 | ||
765 | return; | |
766 | } /* releasef() */ | |
767 | EXPORT_SYMBOL(areleasef); | |
768 | ||
769 | ||
770 | static void | |
771 | #ifdef HAVE_SET_FS_PWD_WITH_CONST | |
772 | vn_set_fs_pwd(struct fs_struct *fs, const struct path *path) | |
773 | #else | |
774 | vn_set_fs_pwd(struct fs_struct *fs, struct path *path) | |
775 | #endif /* HAVE_SET_FS_PWD_WITH_CONST */ | |
776 | { | |
777 | struct path old_pwd; | |
778 | ||
779 | #ifdef HAVE_FS_STRUCT_SPINLOCK | |
780 | spin_lock(&fs->lock); | |
781 | old_pwd = fs->pwd; | |
782 | fs->pwd = *path; | |
783 | path_get(path); | |
784 | spin_unlock(&fs->lock); | |
785 | #else | |
786 | write_lock(&fs->lock); | |
787 | old_pwd = fs->pwd; | |
788 | fs->pwd = *path; | |
789 | path_get(path); | |
790 | write_unlock(&fs->lock); | |
791 | #endif /* HAVE_FS_STRUCT_SPINLOCK */ | |
792 | ||
793 | if (old_pwd.dentry) | |
794 | path_put(&old_pwd); | |
795 | } | |
796 | ||
797 | int | |
798 | vn_set_pwd(const char *filename) | |
799 | { | |
800 | struct path path; | |
801 | mm_segment_t saved_fs; | |
802 | int rc; | |
803 | ||
804 | /* | |
805 | * user_path_dir() and __user_walk() both expect 'filename' to be | |
806 | * a user space address so we must briefly increase the data segment | |
807 | * size to ensure strncpy_from_user() does not fail with -EFAULT. | |
808 | */ | |
809 | saved_fs = get_fs(); | |
810 | set_fs(get_ds()); | |
811 | ||
812 | rc = user_path_dir(filename, &path); | |
813 | if (rc) | |
814 | goto out; | |
815 | ||
816 | rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS); | |
817 | if (rc) | |
818 | goto dput_and_out; | |
819 | ||
820 | vn_set_fs_pwd(current->fs, &path); | |
821 | ||
822 | dput_and_out: | |
823 | path_put(&path); | |
824 | out: | |
825 | set_fs(saved_fs); | |
826 | ||
827 | return (-rc); | |
828 | } /* vn_set_pwd() */ | |
829 | EXPORT_SYMBOL(vn_set_pwd); | |
830 | ||
831 | static int | |
832 | vn_cache_constructor(void *buf, void *cdrarg, int kmflags) | |
833 | { | |
834 | struct vnode *vp = buf; | |
835 | ||
836 | mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL); | |
837 | ||
838 | return (0); | |
839 | } /* vn_cache_constructor() */ | |
840 | ||
841 | static void | |
842 | vn_cache_destructor(void *buf, void *cdrarg) | |
843 | { | |
844 | struct vnode *vp = buf; | |
845 | ||
846 | mutex_destroy(&vp->v_lock); | |
847 | } /* vn_cache_destructor() */ | |
848 | ||
849 | static int | |
850 | vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags) | |
851 | { | |
852 | file_t *fp = buf; | |
853 | ||
854 | atomic_set(&fp->f_ref, 0); | |
855 | mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL); | |
856 | INIT_LIST_HEAD(&fp->f_list); | |
857 | ||
858 | return (0); | |
859 | } /* file_cache_constructor() */ | |
860 | ||
861 | static void | |
862 | vn_file_cache_destructor(void *buf, void *cdrarg) | |
863 | { | |
864 | file_t *fp = buf; | |
865 | ||
866 | mutex_destroy(&fp->f_lock); | |
867 | } /* vn_file_cache_destructor() */ | |
868 | ||
869 | int | |
870 | spl_vn_init(void) | |
871 | { | |
872 | vn_cache = kmem_cache_create("spl_vn_cache", | |
873 | sizeof(struct vnode), 64, | |
874 | vn_cache_constructor, | |
875 | vn_cache_destructor, | |
d177712e | 876 | NULL, NULL, NULL, 0); |
7bdf406d TG |
877 | |
878 | vn_file_cache = kmem_cache_create("spl_vn_file_cache", | |
879 | sizeof(file_t), 64, | |
880 | vn_file_cache_constructor, | |
881 | vn_file_cache_destructor, | |
d177712e | 882 | NULL, NULL, NULL, 0); |
7bdf406d TG |
883 | return (0); |
884 | } /* vn_init() */ | |
885 | ||
886 | void | |
887 | spl_vn_fini(void) | |
888 | { | |
889 | file_t *fp, *next_fp; | |
890 | int leaked = 0; | |
891 | ||
892 | spin_lock(&vn_file_lock); | |
893 | ||
894 | list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) { | |
895 | list_del(&fp->f_list); | |
896 | releasef_locked(fp); | |
897 | leaked++; | |
898 | } | |
899 | ||
900 | spin_unlock(&vn_file_lock); | |
901 | ||
902 | if (leaked > 0) | |
903 | printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked); | |
904 | ||
905 | kmem_cache_destroy(vn_file_cache); | |
906 | kmem_cache_destroy(vn_cache); | |
907 | ||
908 | return; | |
909 | } /* vn_fini() */ |