]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/stat.c
drm/i915: Remove redundant store of logical CDCLK state
[mirror_ubuntu-bionic-kernel.git] / fs / stat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/stat.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23
24 /**
25 * generic_fillattr - Fill in the basic attributes from the inode struct
26 * @inode: Inode to use as the source
27 * @stat: Where to fill in the attributes
28 *
29 * Fill in the basic attributes in the kstat structure from data that's to be
30 * found on the VFS inode structure. This is the default if no getattr inode
31 * operation is supplied.
32 */
33 void generic_fillattr(struct inode *inode, struct kstat *stat)
34 {
35 stat->dev = inode->i_sb->s_dev;
36 stat->ino = inode->i_ino;
37 stat->mode = inode->i_mode;
38 stat->nlink = inode->i_nlink;
39 stat->uid = inode->i_uid;
40 stat->gid = inode->i_gid;
41 stat->rdev = inode->i_rdev;
42 stat->size = i_size_read(inode);
43 stat->atime = inode->i_atime;
44 stat->mtime = inode->i_mtime;
45 stat->ctime = inode->i_ctime;
46 stat->blksize = i_blocksize(inode);
47 stat->blocks = inode->i_blocks;
48
49 if (IS_NOATIME(inode))
50 stat->result_mask &= ~STATX_ATIME;
51 if (IS_AUTOMOUNT(inode))
52 stat->attributes |= STATX_ATTR_AUTOMOUNT;
53 }
54 EXPORT_SYMBOL(generic_fillattr);
55
56 /**
57 * vfs_getattr_nosec - getattr without security checks
58 * @path: file to get attributes from
59 * @stat: structure to return attributes in
60 * @request_mask: STATX_xxx flags indicating what the caller wants
61 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
62 *
63 * Get attributes without calling security_inode_getattr.
64 *
65 * Currently the only caller other than vfs_getattr is internal to the
66 * filehandle lookup code, which uses only the inode number and returns no
67 * attributes to any user. Any other code probably wants vfs_getattr.
68 */
69 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
70 u32 request_mask, unsigned int query_flags)
71 {
72 struct inode *inode = d_backing_inode(path->dentry);
73
74 memset(stat, 0, sizeof(*stat));
75 stat->result_mask |= STATX_BASIC_STATS;
76 request_mask &= STATX_ALL;
77 query_flags &= KSTAT_QUERY_FLAGS;
78 if (inode->i_op->getattr)
79 return inode->i_op->getattr(path, stat, request_mask,
80 query_flags);
81
82 generic_fillattr(inode, stat);
83 return 0;
84 }
85 EXPORT_SYMBOL(vfs_getattr_nosec);
86
87 /*
88 * vfs_getattr - Get the enhanced basic attributes of a file
89 * @path: The file of interest
90 * @stat: Where to return the statistics
91 * @request_mask: STATX_xxx flags indicating what the caller wants
92 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
93 *
94 * Ask the filesystem for a file's attributes. The caller must indicate in
95 * request_mask and query_flags to indicate what they want.
96 *
97 * If the file is remote, the filesystem can be forced to update the attributes
98 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
99 * suppress the update by passing AT_STATX_DONT_SYNC.
100 *
101 * Bits must have been set in request_mask to indicate which attributes the
102 * caller wants retrieving. Any such attribute not requested may be returned
103 * anyway, but the value may be approximate, and, if remote, may not have been
104 * synchronised with the server.
105 *
106 * 0 will be returned on success, and a -ve error code if unsuccessful.
107 */
108 int vfs_getattr(const struct path *path, struct kstat *stat,
109 u32 request_mask, unsigned int query_flags)
110 {
111 int retval;
112
113 retval = security_inode_getattr(path);
114 if (retval)
115 return retval;
116 return vfs_getattr_nosec(path, stat, request_mask, query_flags);
117 }
118 EXPORT_SYMBOL(vfs_getattr);
119
120 /**
121 * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
122 * @fd: The file descriptor referring to the file of interest
123 * @stat: The result structure to fill in.
124 * @request_mask: STATX_xxx flags indicating what the caller wants
125 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
126 *
127 * This function is a wrapper around vfs_getattr(). The main difference is
128 * that it uses a file descriptor to determine the file location.
129 *
130 * 0 will be returned on success, and a -ve error code if unsuccessful.
131 */
132 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
133 u32 request_mask, unsigned int query_flags)
134 {
135 struct fd f;
136 int error = -EBADF;
137
138 if (query_flags & ~KSTAT_QUERY_FLAGS)
139 return -EINVAL;
140
141 f = fdget_raw(fd);
142 if (f.file) {
143 error = vfs_getattr(&f.file->f_path, stat,
144 request_mask, query_flags);
145 fdput(f);
146 }
147 return error;
148 }
149 EXPORT_SYMBOL(vfs_statx_fd);
150
151 /**
152 * vfs_statx - Get basic and extra attributes by filename
153 * @dfd: A file descriptor representing the base dir for a relative filename
154 * @filename: The name of the file of interest
155 * @flags: Flags to control the query
156 * @stat: The result structure to fill in.
157 * @request_mask: STATX_xxx flags indicating what the caller wants
158 *
159 * This function is a wrapper around vfs_getattr(). The main difference is
160 * that it uses a filename and base directory to determine the file location.
161 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
162 * at the given name from being referenced.
163 *
164 * 0 will be returned on success, and a -ve error code if unsuccessful.
165 */
166 int vfs_statx(int dfd, const char __user *filename, int flags,
167 struct kstat *stat, u32 request_mask)
168 {
169 struct path path;
170 int error = -EINVAL;
171 unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
172
173 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
174 AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
175 return -EINVAL;
176
177 if (flags & AT_SYMLINK_NOFOLLOW)
178 lookup_flags &= ~LOOKUP_FOLLOW;
179 if (flags & AT_NO_AUTOMOUNT)
180 lookup_flags &= ~LOOKUP_AUTOMOUNT;
181 if (flags & AT_EMPTY_PATH)
182 lookup_flags |= LOOKUP_EMPTY;
183
184 retry:
185 error = user_path_at(dfd, filename, lookup_flags, &path);
186 if (error)
187 goto out;
188
189 error = vfs_getattr(&path, stat, request_mask, flags);
190 path_put(&path);
191 if (retry_estale(error, lookup_flags)) {
192 lookup_flags |= LOOKUP_REVAL;
193 goto retry;
194 }
195 out:
196 return error;
197 }
198 EXPORT_SYMBOL(vfs_statx);
199
200
201 #ifdef __ARCH_WANT_OLD_STAT
202
203 /*
204 * For backward compatibility? Maybe this should be moved
205 * into arch/i386 instead?
206 */
207 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
208 {
209 static int warncount = 5;
210 struct __old_kernel_stat tmp;
211
212 if (warncount > 0) {
213 warncount--;
214 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
215 current->comm);
216 } else if (warncount < 0) {
217 /* it's laughable, but... */
218 warncount = 0;
219 }
220
221 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
222 tmp.st_dev = old_encode_dev(stat->dev);
223 tmp.st_ino = stat->ino;
224 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
225 return -EOVERFLOW;
226 tmp.st_mode = stat->mode;
227 tmp.st_nlink = stat->nlink;
228 if (tmp.st_nlink != stat->nlink)
229 return -EOVERFLOW;
230 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
231 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
232 tmp.st_rdev = old_encode_dev(stat->rdev);
233 #if BITS_PER_LONG == 32
234 if (stat->size > MAX_NON_LFS)
235 return -EOVERFLOW;
236 #endif
237 tmp.st_size = stat->size;
238 tmp.st_atime = stat->atime.tv_sec;
239 tmp.st_mtime = stat->mtime.tv_sec;
240 tmp.st_ctime = stat->ctime.tv_sec;
241 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
242 }
243
244 SYSCALL_DEFINE2(stat, const char __user *, filename,
245 struct __old_kernel_stat __user *, statbuf)
246 {
247 struct kstat stat;
248 int error;
249
250 error = vfs_stat(filename, &stat);
251 if (error)
252 return error;
253
254 return cp_old_stat(&stat, statbuf);
255 }
256
257 SYSCALL_DEFINE2(lstat, const char __user *, filename,
258 struct __old_kernel_stat __user *, statbuf)
259 {
260 struct kstat stat;
261 int error;
262
263 error = vfs_lstat(filename, &stat);
264 if (error)
265 return error;
266
267 return cp_old_stat(&stat, statbuf);
268 }
269
270 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
271 {
272 struct kstat stat;
273 int error = vfs_fstat(fd, &stat);
274
275 if (!error)
276 error = cp_old_stat(&stat, statbuf);
277
278 return error;
279 }
280
281 #endif /* __ARCH_WANT_OLD_STAT */
282
283 #if BITS_PER_LONG == 32
284 # define choose_32_64(a,b) a
285 #else
286 # define choose_32_64(a,b) b
287 #endif
288
289 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
290 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
291
292 #ifndef INIT_STRUCT_STAT_PADDING
293 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
294 #endif
295
296 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
297 {
298 struct stat tmp;
299
300 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
301 return -EOVERFLOW;
302 #if BITS_PER_LONG == 32
303 if (stat->size > MAX_NON_LFS)
304 return -EOVERFLOW;
305 #endif
306
307 INIT_STRUCT_STAT_PADDING(tmp);
308 tmp.st_dev = encode_dev(stat->dev);
309 tmp.st_ino = stat->ino;
310 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
311 return -EOVERFLOW;
312 tmp.st_mode = stat->mode;
313 tmp.st_nlink = stat->nlink;
314 if (tmp.st_nlink != stat->nlink)
315 return -EOVERFLOW;
316 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
317 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
318 tmp.st_rdev = encode_dev(stat->rdev);
319 tmp.st_size = stat->size;
320 tmp.st_atime = stat->atime.tv_sec;
321 tmp.st_mtime = stat->mtime.tv_sec;
322 tmp.st_ctime = stat->ctime.tv_sec;
323 #ifdef STAT_HAVE_NSEC
324 tmp.st_atime_nsec = stat->atime.tv_nsec;
325 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
326 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
327 #endif
328 tmp.st_blocks = stat->blocks;
329 tmp.st_blksize = stat->blksize;
330 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
331 }
332
333 SYSCALL_DEFINE2(newstat, const char __user *, filename,
334 struct stat __user *, statbuf)
335 {
336 struct kstat stat;
337 int error = vfs_stat(filename, &stat);
338
339 if (error)
340 return error;
341 return cp_new_stat(&stat, statbuf);
342 }
343
344 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
345 struct stat __user *, statbuf)
346 {
347 struct kstat stat;
348 int error;
349
350 error = vfs_lstat(filename, &stat);
351 if (error)
352 return error;
353
354 return cp_new_stat(&stat, statbuf);
355 }
356
357 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
358 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
359 struct stat __user *, statbuf, int, flag)
360 {
361 struct kstat stat;
362 int error;
363
364 error = vfs_fstatat(dfd, filename, &stat, flag);
365 if (error)
366 return error;
367 return cp_new_stat(&stat, statbuf);
368 }
369 #endif
370
371 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
372 {
373 struct kstat stat;
374 int error = vfs_fstat(fd, &stat);
375
376 if (!error)
377 error = cp_new_stat(&stat, statbuf);
378
379 return error;
380 }
381
382 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
383 char __user *, buf, int, bufsiz)
384 {
385 struct path path;
386 int error;
387 int empty = 0;
388 unsigned int lookup_flags = LOOKUP_EMPTY;
389
390 if (bufsiz <= 0)
391 return -EINVAL;
392
393 retry:
394 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
395 if (!error) {
396 struct inode *inode = d_backing_inode(path.dentry);
397
398 error = empty ? -ENOENT : -EINVAL;
399 /*
400 * AFS mountpoints allow readlink(2) but are not symlinks
401 */
402 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
403 error = security_inode_readlink(path.dentry);
404 if (!error) {
405 touch_atime(&path);
406 error = vfs_readlink(path.dentry, buf, bufsiz);
407 }
408 }
409 path_put(&path);
410 if (retry_estale(error, lookup_flags)) {
411 lookup_flags |= LOOKUP_REVAL;
412 goto retry;
413 }
414 }
415 return error;
416 }
417
418 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
419 int, bufsiz)
420 {
421 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
422 }
423
424
425 /* ---------- LFS-64 ----------- */
426 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
427
428 #ifndef INIT_STRUCT_STAT64_PADDING
429 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
430 #endif
431
432 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
433 {
434 struct stat64 tmp;
435
436 INIT_STRUCT_STAT64_PADDING(tmp);
437 #ifdef CONFIG_MIPS
438 /* mips has weird padding, so we don't get 64 bits there */
439 tmp.st_dev = new_encode_dev(stat->dev);
440 tmp.st_rdev = new_encode_dev(stat->rdev);
441 #else
442 tmp.st_dev = huge_encode_dev(stat->dev);
443 tmp.st_rdev = huge_encode_dev(stat->rdev);
444 #endif
445 tmp.st_ino = stat->ino;
446 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
447 return -EOVERFLOW;
448 #ifdef STAT64_HAS_BROKEN_ST_INO
449 tmp.__st_ino = stat->ino;
450 #endif
451 tmp.st_mode = stat->mode;
452 tmp.st_nlink = stat->nlink;
453 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
454 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
455 tmp.st_atime = stat->atime.tv_sec;
456 tmp.st_atime_nsec = stat->atime.tv_nsec;
457 tmp.st_mtime = stat->mtime.tv_sec;
458 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
459 tmp.st_ctime = stat->ctime.tv_sec;
460 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
461 tmp.st_size = stat->size;
462 tmp.st_blocks = stat->blocks;
463 tmp.st_blksize = stat->blksize;
464 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
465 }
466
467 SYSCALL_DEFINE2(stat64, const char __user *, filename,
468 struct stat64 __user *, statbuf)
469 {
470 struct kstat stat;
471 int error = vfs_stat(filename, &stat);
472
473 if (!error)
474 error = cp_new_stat64(&stat, statbuf);
475
476 return error;
477 }
478
479 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
480 struct stat64 __user *, statbuf)
481 {
482 struct kstat stat;
483 int error = vfs_lstat(filename, &stat);
484
485 if (!error)
486 error = cp_new_stat64(&stat, statbuf);
487
488 return error;
489 }
490
491 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
492 {
493 struct kstat stat;
494 int error = vfs_fstat(fd, &stat);
495
496 if (!error)
497 error = cp_new_stat64(&stat, statbuf);
498
499 return error;
500 }
501
502 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
503 struct stat64 __user *, statbuf, int, flag)
504 {
505 struct kstat stat;
506 int error;
507
508 error = vfs_fstatat(dfd, filename, &stat, flag);
509 if (error)
510 return error;
511 return cp_new_stat64(&stat, statbuf);
512 }
513 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
514
515 static noinline_for_stack int
516 cp_statx(const struct kstat *stat, struct statx __user *buffer)
517 {
518 struct statx tmp;
519
520 memset(&tmp, 0, sizeof(tmp));
521
522 tmp.stx_mask = stat->result_mask;
523 tmp.stx_blksize = stat->blksize;
524 tmp.stx_attributes = stat->attributes;
525 tmp.stx_nlink = stat->nlink;
526 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
527 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
528 tmp.stx_mode = stat->mode;
529 tmp.stx_ino = stat->ino;
530 tmp.stx_size = stat->size;
531 tmp.stx_blocks = stat->blocks;
532 tmp.stx_attributes_mask = stat->attributes_mask;
533 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
534 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
535 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
536 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
537 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
538 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
539 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
540 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
541 tmp.stx_rdev_major = MAJOR(stat->rdev);
542 tmp.stx_rdev_minor = MINOR(stat->rdev);
543 tmp.stx_dev_major = MAJOR(stat->dev);
544 tmp.stx_dev_minor = MINOR(stat->dev);
545
546 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
547 }
548
549 /**
550 * sys_statx - System call to get enhanced stats
551 * @dfd: Base directory to pathwalk from *or* fd to stat.
552 * @filename: File to stat or "" with AT_EMPTY_PATH
553 * @flags: AT_* flags to control pathwalk.
554 * @mask: Parts of statx struct actually required.
555 * @buffer: Result buffer.
556 *
557 * Note that fstat() can be emulated by setting dfd to the fd of interest,
558 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
559 */
560 SYSCALL_DEFINE5(statx,
561 int, dfd, const char __user *, filename, unsigned, flags,
562 unsigned int, mask,
563 struct statx __user *, buffer)
564 {
565 struct kstat stat;
566 int error;
567
568 if (mask & STATX__RESERVED)
569 return -EINVAL;
570 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
571 return -EINVAL;
572
573 error = vfs_statx(dfd, filename, flags, &stat, mask);
574 if (error)
575 return error;
576
577 return cp_statx(&stat, buffer);
578 }
579
580 #ifdef CONFIG_COMPAT
581 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
582 {
583 struct compat_stat tmp;
584
585 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
586 return -EOVERFLOW;
587
588 memset(&tmp, 0, sizeof(tmp));
589 tmp.st_dev = old_encode_dev(stat->dev);
590 tmp.st_ino = stat->ino;
591 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
592 return -EOVERFLOW;
593 tmp.st_mode = stat->mode;
594 tmp.st_nlink = stat->nlink;
595 if (tmp.st_nlink != stat->nlink)
596 return -EOVERFLOW;
597 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
598 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
599 tmp.st_rdev = old_encode_dev(stat->rdev);
600 if ((u64) stat->size > MAX_NON_LFS)
601 return -EOVERFLOW;
602 tmp.st_size = stat->size;
603 tmp.st_atime = stat->atime.tv_sec;
604 tmp.st_atime_nsec = stat->atime.tv_nsec;
605 tmp.st_mtime = stat->mtime.tv_sec;
606 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
607 tmp.st_ctime = stat->ctime.tv_sec;
608 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
609 tmp.st_blocks = stat->blocks;
610 tmp.st_blksize = stat->blksize;
611 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
612 }
613
614 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
615 struct compat_stat __user *, statbuf)
616 {
617 struct kstat stat;
618 int error;
619
620 error = vfs_stat(filename, &stat);
621 if (error)
622 return error;
623 return cp_compat_stat(&stat, statbuf);
624 }
625
626 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
627 struct compat_stat __user *, statbuf)
628 {
629 struct kstat stat;
630 int error;
631
632 error = vfs_lstat(filename, &stat);
633 if (error)
634 return error;
635 return cp_compat_stat(&stat, statbuf);
636 }
637
638 #ifndef __ARCH_WANT_STAT64
639 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
640 const char __user *, filename,
641 struct compat_stat __user *, statbuf, int, flag)
642 {
643 struct kstat stat;
644 int error;
645
646 error = vfs_fstatat(dfd, filename, &stat, flag);
647 if (error)
648 return error;
649 return cp_compat_stat(&stat, statbuf);
650 }
651 #endif
652
653 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
654 struct compat_stat __user *, statbuf)
655 {
656 struct kstat stat;
657 int error = vfs_fstat(fd, &stat);
658
659 if (!error)
660 error = cp_compat_stat(&stat, statbuf);
661 return error;
662 }
663 #endif
664
665 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
666 void __inode_add_bytes(struct inode *inode, loff_t bytes)
667 {
668 inode->i_blocks += bytes >> 9;
669 bytes &= 511;
670 inode->i_bytes += bytes;
671 if (inode->i_bytes >= 512) {
672 inode->i_blocks++;
673 inode->i_bytes -= 512;
674 }
675 }
676 EXPORT_SYMBOL(__inode_add_bytes);
677
678 void inode_add_bytes(struct inode *inode, loff_t bytes)
679 {
680 spin_lock(&inode->i_lock);
681 __inode_add_bytes(inode, bytes);
682 spin_unlock(&inode->i_lock);
683 }
684
685 EXPORT_SYMBOL(inode_add_bytes);
686
687 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
688 {
689 inode->i_blocks -= bytes >> 9;
690 bytes &= 511;
691 if (inode->i_bytes < bytes) {
692 inode->i_blocks--;
693 inode->i_bytes += 512;
694 }
695 inode->i_bytes -= bytes;
696 }
697
698 EXPORT_SYMBOL(__inode_sub_bytes);
699
700 void inode_sub_bytes(struct inode *inode, loff_t bytes)
701 {
702 spin_lock(&inode->i_lock);
703 __inode_sub_bytes(inode, bytes);
704 spin_unlock(&inode->i_lock);
705 }
706
707 EXPORT_SYMBOL(inode_sub_bytes);
708
709 loff_t inode_get_bytes(struct inode *inode)
710 {
711 loff_t ret;
712
713 spin_lock(&inode->i_lock);
714 ret = __inode_get_bytes(inode);
715 spin_unlock(&inode->i_lock);
716 return ret;
717 }
718
719 EXPORT_SYMBOL(inode_get_bytes);
720
721 void inode_set_bytes(struct inode *inode, loff_t bytes)
722 {
723 /* Caller is here responsible for sufficient locking
724 * (ie. inode->i_lock) */
725 inode->i_blocks = bytes >> 9;
726 inode->i_bytes = bytes & 511;
727 }
728
729 EXPORT_SYMBOL(inode_set_bytes);