]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/stat.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / stat.c
1 /*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/cred.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
18 #include <linux/compat.h>
19
20 #include <linux/uaccess.h>
21 #include <asm/unistd.h>
22
23 /**
24 * generic_fillattr - Fill in the basic attributes from the inode struct
25 * @inode: Inode to use as the source
26 * @stat: Where to fill in the attributes
27 *
28 * Fill in the basic attributes in the kstat structure from data that's to be
29 * found on the VFS inode structure. This is the default if no getattr inode
30 * operation is supplied.
31 */
32 void generic_fillattr(struct inode *inode, struct kstat *stat)
33 {
34 stat->dev = inode->i_sb->s_dev;
35 stat->ino = inode->i_ino;
36 stat->mode = inode->i_mode;
37 stat->nlink = inode->i_nlink;
38 stat->uid = inode->i_uid;
39 stat->gid = inode->i_gid;
40 stat->rdev = inode->i_rdev;
41 stat->size = i_size_read(inode);
42 stat->atime = inode->i_atime;
43 stat->mtime = inode->i_mtime;
44 stat->ctime = inode->i_ctime;
45 stat->blksize = i_blocksize(inode);
46 stat->blocks = inode->i_blocks;
47
48 if (IS_NOATIME(inode))
49 stat->result_mask &= ~STATX_ATIME;
50 if (IS_AUTOMOUNT(inode))
51 stat->attributes |= STATX_ATTR_AUTOMOUNT;
52 }
53 EXPORT_SYMBOL(generic_fillattr);
54
55 /**
56 * vfs_getattr_nosec - getattr without security checks
57 * @path: file to get attributes from
58 * @stat: structure to return attributes in
59 * @request_mask: STATX_xxx flags indicating what the caller wants
60 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
61 *
62 * Get attributes without calling security_inode_getattr.
63 *
64 * Currently the only caller other than vfs_getattr is internal to the
65 * filehandle lookup code, which uses only the inode number and returns no
66 * attributes to any user. Any other code probably wants vfs_getattr.
67 */
68 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
69 u32 request_mask, unsigned int query_flags)
70 {
71 struct inode *inode = d_backing_inode(path->dentry);
72
73 memset(stat, 0, sizeof(*stat));
74 stat->result_mask |= STATX_BASIC_STATS;
75 request_mask &= STATX_ALL;
76 query_flags &= KSTAT_QUERY_FLAGS;
77 if (inode->i_op->getattr)
78 return inode->i_op->getattr(path, stat, request_mask,
79 query_flags);
80
81 generic_fillattr(inode, stat);
82 return 0;
83 }
84 EXPORT_SYMBOL(vfs_getattr_nosec);
85
86 /*
87 * vfs_getattr - Get the enhanced basic attributes of a file
88 * @path: The file of interest
89 * @stat: Where to return the statistics
90 * @request_mask: STATX_xxx flags indicating what the caller wants
91 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
92 *
93 * Ask the filesystem for a file's attributes. The caller must indicate in
94 * request_mask and query_flags to indicate what they want.
95 *
96 * If the file is remote, the filesystem can be forced to update the attributes
97 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
98 * suppress the update by passing AT_STATX_DONT_SYNC.
99 *
100 * Bits must have been set in request_mask to indicate which attributes the
101 * caller wants retrieving. Any such attribute not requested may be returned
102 * anyway, but the value may be approximate, and, if remote, may not have been
103 * synchronised with the server.
104 *
105 * 0 will be returned on success, and a -ve error code if unsuccessful.
106 */
107 int vfs_getattr(const struct path *path, struct kstat *stat,
108 u32 request_mask, unsigned int query_flags)
109 {
110 int retval;
111
112 retval = security_inode_getattr(path);
113 if (retval)
114 return retval;
115 return vfs_getattr_nosec(path, stat, request_mask, query_flags);
116 }
117 EXPORT_SYMBOL(vfs_getattr);
118
119 /**
120 * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
121 * @fd: The file descriptor referring to the file of interest
122 * @stat: The result structure to fill in.
123 * @request_mask: STATX_xxx flags indicating what the caller wants
124 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
125 *
126 * This function is a wrapper around vfs_getattr(). The main difference is
127 * that it uses a file descriptor to determine the file location.
128 *
129 * 0 will be returned on success, and a -ve error code if unsuccessful.
130 */
131 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
132 u32 request_mask, unsigned int query_flags)
133 {
134 struct fd f;
135 int error = -EBADF;
136
137 if (query_flags & ~KSTAT_QUERY_FLAGS)
138 return -EINVAL;
139
140 f = fdget_raw(fd);
141 if (f.file) {
142 error = vfs_getattr(&f.file->f_path, stat,
143 request_mask, query_flags);
144 fdput(f);
145 }
146 return error;
147 }
148 EXPORT_SYMBOL(vfs_statx_fd);
149
150 /**
151 * vfs_statx - Get basic and extra attributes by filename
152 * @dfd: A file descriptor representing the base dir for a relative filename
153 * @filename: The name of the file of interest
154 * @flags: Flags to control the query
155 * @stat: The result structure to fill in.
156 * @request_mask: STATX_xxx flags indicating what the caller wants
157 *
158 * This function is a wrapper around vfs_getattr(). The main difference is
159 * that it uses a filename and base directory to determine the file location.
160 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
161 * at the given name from being referenced.
162 *
163 * 0 will be returned on success, and a -ve error code if unsuccessful.
164 */
165 int vfs_statx(int dfd, const char __user *filename, int flags,
166 struct kstat *stat, u32 request_mask)
167 {
168 struct path path;
169 int error = -EINVAL;
170 unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
171
172 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
173 AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
174 return -EINVAL;
175
176 if (flags & AT_SYMLINK_NOFOLLOW)
177 lookup_flags &= ~LOOKUP_FOLLOW;
178 if (flags & AT_NO_AUTOMOUNT)
179 lookup_flags &= ~LOOKUP_AUTOMOUNT;
180 if (flags & AT_EMPTY_PATH)
181 lookup_flags |= LOOKUP_EMPTY;
182
183 retry:
184 error = user_path_at(dfd, filename, lookup_flags, &path);
185 if (error)
186 goto out;
187
188 error = vfs_getattr(&path, stat, request_mask, flags);
189 path_put(&path);
190 if (retry_estale(error, lookup_flags)) {
191 lookup_flags |= LOOKUP_REVAL;
192 goto retry;
193 }
194 out:
195 return error;
196 }
197 EXPORT_SYMBOL(vfs_statx);
198
199
200 #ifdef __ARCH_WANT_OLD_STAT
201
202 /*
203 * For backward compatibility? Maybe this should be moved
204 * into arch/i386 instead?
205 */
206 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
207 {
208 static int warncount = 5;
209 struct __old_kernel_stat tmp;
210
211 if (warncount > 0) {
212 warncount--;
213 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
214 current->comm);
215 } else if (warncount < 0) {
216 /* it's laughable, but... */
217 warncount = 0;
218 }
219
220 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
221 tmp.st_dev = old_encode_dev(stat->dev);
222 tmp.st_ino = stat->ino;
223 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
224 return -EOVERFLOW;
225 tmp.st_mode = stat->mode;
226 tmp.st_nlink = stat->nlink;
227 if (tmp.st_nlink != stat->nlink)
228 return -EOVERFLOW;
229 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
230 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
231 tmp.st_rdev = old_encode_dev(stat->rdev);
232 #if BITS_PER_LONG == 32
233 if (stat->size > MAX_NON_LFS)
234 return -EOVERFLOW;
235 #endif
236 tmp.st_size = stat->size;
237 tmp.st_atime = stat->atime.tv_sec;
238 tmp.st_mtime = stat->mtime.tv_sec;
239 tmp.st_ctime = stat->ctime.tv_sec;
240 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
241 }
242
243 SYSCALL_DEFINE2(stat, const char __user *, filename,
244 struct __old_kernel_stat __user *, statbuf)
245 {
246 struct kstat stat;
247 int error;
248
249 error = vfs_stat(filename, &stat);
250 if (error)
251 return error;
252
253 return cp_old_stat(&stat, statbuf);
254 }
255
256 SYSCALL_DEFINE2(lstat, const char __user *, filename,
257 struct __old_kernel_stat __user *, statbuf)
258 {
259 struct kstat stat;
260 int error;
261
262 error = vfs_lstat(filename, &stat);
263 if (error)
264 return error;
265
266 return cp_old_stat(&stat, statbuf);
267 }
268
269 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
270 {
271 struct kstat stat;
272 int error = vfs_fstat(fd, &stat);
273
274 if (!error)
275 error = cp_old_stat(&stat, statbuf);
276
277 return error;
278 }
279
280 #endif /* __ARCH_WANT_OLD_STAT */
281
282 #if BITS_PER_LONG == 32
283 # define choose_32_64(a,b) a
284 #else
285 # define choose_32_64(a,b) b
286 #endif
287
288 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
289 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
290
291 #ifndef INIT_STRUCT_STAT_PADDING
292 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
293 #endif
294
295 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
296 {
297 struct stat tmp;
298
299 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
300 return -EOVERFLOW;
301 #if BITS_PER_LONG == 32
302 if (stat->size > MAX_NON_LFS)
303 return -EOVERFLOW;
304 #endif
305
306 INIT_STRUCT_STAT_PADDING(tmp);
307 tmp.st_dev = encode_dev(stat->dev);
308 tmp.st_ino = stat->ino;
309 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
310 return -EOVERFLOW;
311 tmp.st_mode = stat->mode;
312 tmp.st_nlink = stat->nlink;
313 if (tmp.st_nlink != stat->nlink)
314 return -EOVERFLOW;
315 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
316 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
317 tmp.st_rdev = encode_dev(stat->rdev);
318 tmp.st_size = stat->size;
319 tmp.st_atime = stat->atime.tv_sec;
320 tmp.st_mtime = stat->mtime.tv_sec;
321 tmp.st_ctime = stat->ctime.tv_sec;
322 #ifdef STAT_HAVE_NSEC
323 tmp.st_atime_nsec = stat->atime.tv_nsec;
324 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
325 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
326 #endif
327 tmp.st_blocks = stat->blocks;
328 tmp.st_blksize = stat->blksize;
329 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
330 }
331
332 SYSCALL_DEFINE2(newstat, const char __user *, filename,
333 struct stat __user *, statbuf)
334 {
335 struct kstat stat;
336 int error = vfs_stat(filename, &stat);
337
338 if (error)
339 return error;
340 return cp_new_stat(&stat, statbuf);
341 }
342
343 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
344 struct stat __user *, statbuf)
345 {
346 struct kstat stat;
347 int error;
348
349 error = vfs_lstat(filename, &stat);
350 if (error)
351 return error;
352
353 return cp_new_stat(&stat, statbuf);
354 }
355
356 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
357 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
358 struct stat __user *, statbuf, int, flag)
359 {
360 struct kstat stat;
361 int error;
362
363 error = vfs_fstatat(dfd, filename, &stat, flag);
364 if (error)
365 return error;
366 return cp_new_stat(&stat, statbuf);
367 }
368 #endif
369
370 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
371 {
372 struct kstat stat;
373 int error = vfs_fstat(fd, &stat);
374
375 if (!error)
376 error = cp_new_stat(&stat, statbuf);
377
378 return error;
379 }
380
381 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
382 char __user *, buf, int, bufsiz)
383 {
384 struct path path;
385 int error;
386 int empty = 0;
387 unsigned int lookup_flags = LOOKUP_EMPTY;
388
389 if (bufsiz <= 0)
390 return -EINVAL;
391
392 retry:
393 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
394 if (!error) {
395 struct inode *inode = d_backing_inode(path.dentry);
396
397 error = empty ? -ENOENT : -EINVAL;
398 /*
399 * AFS mountpoints allow readlink(2) but are not symlinks
400 */
401 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
402 error = security_inode_readlink(path.dentry);
403 if (!error) {
404 touch_atime(&path);
405 error = vfs_readlink(path.dentry, buf, bufsiz);
406 }
407 }
408 path_put(&path);
409 if (retry_estale(error, lookup_flags)) {
410 lookup_flags |= LOOKUP_REVAL;
411 goto retry;
412 }
413 }
414 return error;
415 }
416
417 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
418 int, bufsiz)
419 {
420 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
421 }
422
423
424 /* ---------- LFS-64 ----------- */
425 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
426
427 #ifndef INIT_STRUCT_STAT64_PADDING
428 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
429 #endif
430
431 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
432 {
433 struct stat64 tmp;
434
435 INIT_STRUCT_STAT64_PADDING(tmp);
436 #ifdef CONFIG_MIPS
437 /* mips has weird padding, so we don't get 64 bits there */
438 tmp.st_dev = new_encode_dev(stat->dev);
439 tmp.st_rdev = new_encode_dev(stat->rdev);
440 #else
441 tmp.st_dev = huge_encode_dev(stat->dev);
442 tmp.st_rdev = huge_encode_dev(stat->rdev);
443 #endif
444 tmp.st_ino = stat->ino;
445 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
446 return -EOVERFLOW;
447 #ifdef STAT64_HAS_BROKEN_ST_INO
448 tmp.__st_ino = stat->ino;
449 #endif
450 tmp.st_mode = stat->mode;
451 tmp.st_nlink = stat->nlink;
452 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
453 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
454 tmp.st_atime = stat->atime.tv_sec;
455 tmp.st_atime_nsec = stat->atime.tv_nsec;
456 tmp.st_mtime = stat->mtime.tv_sec;
457 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
458 tmp.st_ctime = stat->ctime.tv_sec;
459 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
460 tmp.st_size = stat->size;
461 tmp.st_blocks = stat->blocks;
462 tmp.st_blksize = stat->blksize;
463 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
464 }
465
466 SYSCALL_DEFINE2(stat64, const char __user *, filename,
467 struct stat64 __user *, statbuf)
468 {
469 struct kstat stat;
470 int error = vfs_stat(filename, &stat);
471
472 if (!error)
473 error = cp_new_stat64(&stat, statbuf);
474
475 return error;
476 }
477
478 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
479 struct stat64 __user *, statbuf)
480 {
481 struct kstat stat;
482 int error = vfs_lstat(filename, &stat);
483
484 if (!error)
485 error = cp_new_stat64(&stat, statbuf);
486
487 return error;
488 }
489
490 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
491 {
492 struct kstat stat;
493 int error = vfs_fstat(fd, &stat);
494
495 if (!error)
496 error = cp_new_stat64(&stat, statbuf);
497
498 return error;
499 }
500
501 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
502 struct stat64 __user *, statbuf, int, flag)
503 {
504 struct kstat stat;
505 int error;
506
507 error = vfs_fstatat(dfd, filename, &stat, flag);
508 if (error)
509 return error;
510 return cp_new_stat64(&stat, statbuf);
511 }
512 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
513
514 static noinline_for_stack int
515 cp_statx(const struct kstat *stat, struct statx __user *buffer)
516 {
517 struct statx tmp;
518
519 memset(&tmp, 0, sizeof(tmp));
520
521 tmp.stx_mask = stat->result_mask;
522 tmp.stx_blksize = stat->blksize;
523 tmp.stx_attributes = stat->attributes;
524 tmp.stx_nlink = stat->nlink;
525 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
526 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
527 tmp.stx_mode = stat->mode;
528 tmp.stx_ino = stat->ino;
529 tmp.stx_size = stat->size;
530 tmp.stx_blocks = stat->blocks;
531 tmp.stx_attributes_mask = stat->attributes_mask;
532 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
533 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
534 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
535 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
536 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
537 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
538 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
539 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
540 tmp.stx_rdev_major = MAJOR(stat->rdev);
541 tmp.stx_rdev_minor = MINOR(stat->rdev);
542 tmp.stx_dev_major = MAJOR(stat->dev);
543 tmp.stx_dev_minor = MINOR(stat->dev);
544
545 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
546 }
547
548 /**
549 * sys_statx - System call to get enhanced stats
550 * @dfd: Base directory to pathwalk from *or* fd to stat.
551 * @filename: File to stat or "" with AT_EMPTY_PATH
552 * @flags: AT_* flags to control pathwalk.
553 * @mask: Parts of statx struct actually required.
554 * @buffer: Result buffer.
555 *
556 * Note that fstat() can be emulated by setting dfd to the fd of interest,
557 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
558 */
559 SYSCALL_DEFINE5(statx,
560 int, dfd, const char __user *, filename, unsigned, flags,
561 unsigned int, mask,
562 struct statx __user *, buffer)
563 {
564 struct kstat stat;
565 int error;
566
567 if (mask & STATX__RESERVED)
568 return -EINVAL;
569 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
570 return -EINVAL;
571
572 error = vfs_statx(dfd, filename, flags, &stat, mask);
573 if (error)
574 return error;
575
576 return cp_statx(&stat, buffer);
577 }
578
579 #ifdef CONFIG_COMPAT
580 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
581 {
582 struct compat_stat tmp;
583
584 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
585 return -EOVERFLOW;
586
587 memset(&tmp, 0, sizeof(tmp));
588 tmp.st_dev = old_encode_dev(stat->dev);
589 tmp.st_ino = stat->ino;
590 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
591 return -EOVERFLOW;
592 tmp.st_mode = stat->mode;
593 tmp.st_nlink = stat->nlink;
594 if (tmp.st_nlink != stat->nlink)
595 return -EOVERFLOW;
596 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
597 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
598 tmp.st_rdev = old_encode_dev(stat->rdev);
599 if ((u64) stat->size > MAX_NON_LFS)
600 return -EOVERFLOW;
601 tmp.st_size = stat->size;
602 tmp.st_atime = stat->atime.tv_sec;
603 tmp.st_atime_nsec = stat->atime.tv_nsec;
604 tmp.st_mtime = stat->mtime.tv_sec;
605 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
606 tmp.st_ctime = stat->ctime.tv_sec;
607 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
608 tmp.st_blocks = stat->blocks;
609 tmp.st_blksize = stat->blksize;
610 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
611 }
612
613 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
614 struct compat_stat __user *, statbuf)
615 {
616 struct kstat stat;
617 int error;
618
619 error = vfs_stat(filename, &stat);
620 if (error)
621 return error;
622 return cp_compat_stat(&stat, statbuf);
623 }
624
625 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
626 struct compat_stat __user *, statbuf)
627 {
628 struct kstat stat;
629 int error;
630
631 error = vfs_lstat(filename, &stat);
632 if (error)
633 return error;
634 return cp_compat_stat(&stat, statbuf);
635 }
636
637 #ifndef __ARCH_WANT_STAT64
638 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
639 const char __user *, filename,
640 struct compat_stat __user *, statbuf, int, flag)
641 {
642 struct kstat stat;
643 int error;
644
645 error = vfs_fstatat(dfd, filename, &stat, flag);
646 if (error)
647 return error;
648 return cp_compat_stat(&stat, statbuf);
649 }
650 #endif
651
652 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
653 struct compat_stat __user *, statbuf)
654 {
655 struct kstat stat;
656 int error = vfs_fstat(fd, &stat);
657
658 if (!error)
659 error = cp_compat_stat(&stat, statbuf);
660 return error;
661 }
662 #endif
663
664 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
665 void __inode_add_bytes(struct inode *inode, loff_t bytes)
666 {
667 inode->i_blocks += bytes >> 9;
668 bytes &= 511;
669 inode->i_bytes += bytes;
670 if (inode->i_bytes >= 512) {
671 inode->i_blocks++;
672 inode->i_bytes -= 512;
673 }
674 }
675 EXPORT_SYMBOL(__inode_add_bytes);
676
677 void inode_add_bytes(struct inode *inode, loff_t bytes)
678 {
679 spin_lock(&inode->i_lock);
680 __inode_add_bytes(inode, bytes);
681 spin_unlock(&inode->i_lock);
682 }
683
684 EXPORT_SYMBOL(inode_add_bytes);
685
686 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
687 {
688 inode->i_blocks -= bytes >> 9;
689 bytes &= 511;
690 if (inode->i_bytes < bytes) {
691 inode->i_blocks--;
692 inode->i_bytes += 512;
693 }
694 inode->i_bytes -= bytes;
695 }
696
697 EXPORT_SYMBOL(__inode_sub_bytes);
698
699 void inode_sub_bytes(struct inode *inode, loff_t bytes)
700 {
701 spin_lock(&inode->i_lock);
702 __inode_sub_bytes(inode, bytes);
703 spin_unlock(&inode->i_lock);
704 }
705
706 EXPORT_SYMBOL(inode_sub_bytes);
707
708 loff_t inode_get_bytes(struct inode *inode)
709 {
710 loff_t ret;
711
712 spin_lock(&inode->i_lock);
713 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
714 spin_unlock(&inode->i_lock);
715 return ret;
716 }
717
718 EXPORT_SYMBOL(inode_get_bytes);
719
720 void inode_set_bytes(struct inode *inode, loff_t bytes)
721 {
722 /* Caller is here responsible for sufficient locking
723 * (ie. inode->i_lock) */
724 inode->i_blocks = bytes >> 9;
725 inode->i_bytes = bytes & 511;
726 }
727
728 EXPORT_SYMBOL(inode_set_bytes);