]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/stat.c
fcntl: move compat syscalls from compat.c
[mirror_ubuntu-artful-kernel.git] / fs / stat.c
1 /*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/cred.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
18
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21
22 /**
23 * generic_fillattr - Fill in the basic attributes from the inode struct
24 * @inode: Inode to use as the source
25 * @stat: Where to fill in the attributes
26 *
27 * Fill in the basic attributes in the kstat structure from data that's to be
28 * found on the VFS inode structure. This is the default if no getattr inode
29 * operation is supplied.
30 */
31 void generic_fillattr(struct inode *inode, struct kstat *stat)
32 {
33 stat->dev = inode->i_sb->s_dev;
34 stat->ino = inode->i_ino;
35 stat->mode = inode->i_mode;
36 stat->nlink = inode->i_nlink;
37 stat->uid = inode->i_uid;
38 stat->gid = inode->i_gid;
39 stat->rdev = inode->i_rdev;
40 stat->size = i_size_read(inode);
41 stat->atime = inode->i_atime;
42 stat->mtime = inode->i_mtime;
43 stat->ctime = inode->i_ctime;
44 stat->blksize = i_blocksize(inode);
45 stat->blocks = inode->i_blocks;
46
47 if (IS_NOATIME(inode))
48 stat->result_mask &= ~STATX_ATIME;
49 if (IS_AUTOMOUNT(inode))
50 stat->attributes |= STATX_ATTR_AUTOMOUNT;
51 }
52 EXPORT_SYMBOL(generic_fillattr);
53
54 /**
55 * vfs_getattr_nosec - getattr without security checks
56 * @path: file to get attributes from
57 * @stat: structure to return attributes in
58 * @request_mask: STATX_xxx flags indicating what the caller wants
59 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
60 *
61 * Get attributes without calling security_inode_getattr.
62 *
63 * Currently the only caller other than vfs_getattr is internal to the
64 * filehandle lookup code, which uses only the inode number and returns no
65 * attributes to any user. Any other code probably wants vfs_getattr.
66 */
67 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
68 u32 request_mask, unsigned int query_flags)
69 {
70 struct inode *inode = d_backing_inode(path->dentry);
71
72 memset(stat, 0, sizeof(*stat));
73 stat->result_mask |= STATX_BASIC_STATS;
74 request_mask &= STATX_ALL;
75 query_flags &= KSTAT_QUERY_FLAGS;
76 if (inode->i_op->getattr)
77 return inode->i_op->getattr(path, stat, request_mask,
78 query_flags);
79
80 generic_fillattr(inode, stat);
81 return 0;
82 }
83 EXPORT_SYMBOL(vfs_getattr_nosec);
84
85 /*
86 * vfs_getattr - Get the enhanced basic attributes of a file
87 * @path: The file of interest
88 * @stat: Where to return the statistics
89 * @request_mask: STATX_xxx flags indicating what the caller wants
90 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
91 *
92 * Ask the filesystem for a file's attributes. The caller must indicate in
93 * request_mask and query_flags to indicate what they want.
94 *
95 * If the file is remote, the filesystem can be forced to update the attributes
96 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
97 * suppress the update by passing AT_STATX_DONT_SYNC.
98 *
99 * Bits must have been set in request_mask to indicate which attributes the
100 * caller wants retrieving. Any such attribute not requested may be returned
101 * anyway, but the value may be approximate, and, if remote, may not have been
102 * synchronised with the server.
103 *
104 * 0 will be returned on success, and a -ve error code if unsuccessful.
105 */
106 int vfs_getattr(const struct path *path, struct kstat *stat,
107 u32 request_mask, unsigned int query_flags)
108 {
109 int retval;
110
111 retval = security_inode_getattr(path);
112 if (retval)
113 return retval;
114 return vfs_getattr_nosec(path, stat, request_mask, query_flags);
115 }
116 EXPORT_SYMBOL(vfs_getattr);
117
118 /**
119 * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
120 * @fd: The file descriptor referring to the file of interest
121 * @stat: The result structure to fill in.
122 * @request_mask: STATX_xxx flags indicating what the caller wants
123 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
124 *
125 * This function is a wrapper around vfs_getattr(). The main difference is
126 * that it uses a file descriptor to determine the file location.
127 *
128 * 0 will be returned on success, and a -ve error code if unsuccessful.
129 */
130 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
131 u32 request_mask, unsigned int query_flags)
132 {
133 struct fd f = fdget_raw(fd);
134 int error = -EBADF;
135
136 if (f.file) {
137 error = vfs_getattr(&f.file->f_path, stat,
138 request_mask, query_flags);
139 fdput(f);
140 }
141 return error;
142 }
143 EXPORT_SYMBOL(vfs_statx_fd);
144
145 /**
146 * vfs_statx - Get basic and extra attributes by filename
147 * @dfd: A file descriptor representing the base dir for a relative filename
148 * @filename: The name of the file of interest
149 * @flags: Flags to control the query
150 * @stat: The result structure to fill in.
151 * @request_mask: STATX_xxx flags indicating what the caller wants
152 *
153 * This function is a wrapper around vfs_getattr(). The main difference is
154 * that it uses a filename and base directory to determine the file location.
155 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
156 * at the given name from being referenced.
157 *
158 * The caller must have preset stat->request_mask as for vfs_getattr(). The
159 * flags are also used to load up stat->query_flags.
160 *
161 * 0 will be returned on success, and a -ve error code if unsuccessful.
162 */
163 int vfs_statx(int dfd, const char __user *filename, int flags,
164 struct kstat *stat, u32 request_mask)
165 {
166 struct path path;
167 int error = -EINVAL;
168 unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
169
170 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
171 AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
172 return -EINVAL;
173
174 if (flags & AT_SYMLINK_NOFOLLOW)
175 lookup_flags &= ~LOOKUP_FOLLOW;
176 if (flags & AT_NO_AUTOMOUNT)
177 lookup_flags &= ~LOOKUP_AUTOMOUNT;
178 if (flags & AT_EMPTY_PATH)
179 lookup_flags |= LOOKUP_EMPTY;
180
181 retry:
182 error = user_path_at(dfd, filename, lookup_flags, &path);
183 if (error)
184 goto out;
185
186 error = vfs_getattr(&path, stat, request_mask, flags);
187 path_put(&path);
188 if (retry_estale(error, lookup_flags)) {
189 lookup_flags |= LOOKUP_REVAL;
190 goto retry;
191 }
192 out:
193 return error;
194 }
195 EXPORT_SYMBOL(vfs_statx);
196
197
198 #ifdef __ARCH_WANT_OLD_STAT
199
200 /*
201 * For backward compatibility? Maybe this should be moved
202 * into arch/i386 instead?
203 */
204 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
205 {
206 static int warncount = 5;
207 struct __old_kernel_stat tmp;
208
209 if (warncount > 0) {
210 warncount--;
211 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
212 current->comm);
213 } else if (warncount < 0) {
214 /* it's laughable, but... */
215 warncount = 0;
216 }
217
218 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
219 tmp.st_dev = old_encode_dev(stat->dev);
220 tmp.st_ino = stat->ino;
221 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
222 return -EOVERFLOW;
223 tmp.st_mode = stat->mode;
224 tmp.st_nlink = stat->nlink;
225 if (tmp.st_nlink != stat->nlink)
226 return -EOVERFLOW;
227 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
228 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
229 tmp.st_rdev = old_encode_dev(stat->rdev);
230 #if BITS_PER_LONG == 32
231 if (stat->size > MAX_NON_LFS)
232 return -EOVERFLOW;
233 #endif
234 tmp.st_size = stat->size;
235 tmp.st_atime = stat->atime.tv_sec;
236 tmp.st_mtime = stat->mtime.tv_sec;
237 tmp.st_ctime = stat->ctime.tv_sec;
238 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
239 }
240
241 SYSCALL_DEFINE2(stat, const char __user *, filename,
242 struct __old_kernel_stat __user *, statbuf)
243 {
244 struct kstat stat;
245 int error;
246
247 error = vfs_stat(filename, &stat);
248 if (error)
249 return error;
250
251 return cp_old_stat(&stat, statbuf);
252 }
253
254 SYSCALL_DEFINE2(lstat, const char __user *, filename,
255 struct __old_kernel_stat __user *, statbuf)
256 {
257 struct kstat stat;
258 int error;
259
260 error = vfs_lstat(filename, &stat);
261 if (error)
262 return error;
263
264 return cp_old_stat(&stat, statbuf);
265 }
266
267 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
268 {
269 struct kstat stat;
270 int error = vfs_fstat(fd, &stat);
271
272 if (!error)
273 error = cp_old_stat(&stat, statbuf);
274
275 return error;
276 }
277
278 #endif /* __ARCH_WANT_OLD_STAT */
279
280 #if BITS_PER_LONG == 32
281 # define choose_32_64(a,b) a
282 #else
283 # define choose_32_64(a,b) b
284 #endif
285
286 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
287 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
288
289 #ifndef INIT_STRUCT_STAT_PADDING
290 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
291 #endif
292
293 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
294 {
295 struct stat tmp;
296
297 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
298 return -EOVERFLOW;
299 #if BITS_PER_LONG == 32
300 if (stat->size > MAX_NON_LFS)
301 return -EOVERFLOW;
302 #endif
303
304 INIT_STRUCT_STAT_PADDING(tmp);
305 tmp.st_dev = encode_dev(stat->dev);
306 tmp.st_ino = stat->ino;
307 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
308 return -EOVERFLOW;
309 tmp.st_mode = stat->mode;
310 tmp.st_nlink = stat->nlink;
311 if (tmp.st_nlink != stat->nlink)
312 return -EOVERFLOW;
313 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
314 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
315 tmp.st_rdev = encode_dev(stat->rdev);
316 tmp.st_size = stat->size;
317 tmp.st_atime = stat->atime.tv_sec;
318 tmp.st_mtime = stat->mtime.tv_sec;
319 tmp.st_ctime = stat->ctime.tv_sec;
320 #ifdef STAT_HAVE_NSEC
321 tmp.st_atime_nsec = stat->atime.tv_nsec;
322 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
323 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
324 #endif
325 tmp.st_blocks = stat->blocks;
326 tmp.st_blksize = stat->blksize;
327 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
328 }
329
330 SYSCALL_DEFINE2(newstat, const char __user *, filename,
331 struct stat __user *, statbuf)
332 {
333 struct kstat stat;
334 int error = vfs_stat(filename, &stat);
335
336 if (error)
337 return error;
338 return cp_new_stat(&stat, statbuf);
339 }
340
341 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
342 struct stat __user *, statbuf)
343 {
344 struct kstat stat;
345 int error;
346
347 error = vfs_lstat(filename, &stat);
348 if (error)
349 return error;
350
351 return cp_new_stat(&stat, statbuf);
352 }
353
354 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
355 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
356 struct stat __user *, statbuf, int, flag)
357 {
358 struct kstat stat;
359 int error;
360
361 error = vfs_fstatat(dfd, filename, &stat, flag);
362 if (error)
363 return error;
364 return cp_new_stat(&stat, statbuf);
365 }
366 #endif
367
368 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
369 {
370 struct kstat stat;
371 int error = vfs_fstat(fd, &stat);
372
373 if (!error)
374 error = cp_new_stat(&stat, statbuf);
375
376 return error;
377 }
378
379 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
380 char __user *, buf, int, bufsiz)
381 {
382 struct path path;
383 int error;
384 int empty = 0;
385 unsigned int lookup_flags = LOOKUP_EMPTY;
386
387 if (bufsiz <= 0)
388 return -EINVAL;
389
390 retry:
391 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
392 if (!error) {
393 struct inode *inode = d_backing_inode(path.dentry);
394
395 error = empty ? -ENOENT : -EINVAL;
396 /*
397 * AFS mountpoints allow readlink(2) but are not symlinks
398 */
399 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
400 error = security_inode_readlink(path.dentry);
401 if (!error) {
402 touch_atime(&path);
403 error = vfs_readlink(path.dentry, buf, bufsiz);
404 }
405 }
406 path_put(&path);
407 if (retry_estale(error, lookup_flags)) {
408 lookup_flags |= LOOKUP_REVAL;
409 goto retry;
410 }
411 }
412 return error;
413 }
414
415 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
416 int, bufsiz)
417 {
418 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
419 }
420
421
422 /* ---------- LFS-64 ----------- */
423 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
424
425 #ifndef INIT_STRUCT_STAT64_PADDING
426 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
427 #endif
428
429 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
430 {
431 struct stat64 tmp;
432
433 INIT_STRUCT_STAT64_PADDING(tmp);
434 #ifdef CONFIG_MIPS
435 /* mips has weird padding, so we don't get 64 bits there */
436 tmp.st_dev = new_encode_dev(stat->dev);
437 tmp.st_rdev = new_encode_dev(stat->rdev);
438 #else
439 tmp.st_dev = huge_encode_dev(stat->dev);
440 tmp.st_rdev = huge_encode_dev(stat->rdev);
441 #endif
442 tmp.st_ino = stat->ino;
443 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
444 return -EOVERFLOW;
445 #ifdef STAT64_HAS_BROKEN_ST_INO
446 tmp.__st_ino = stat->ino;
447 #endif
448 tmp.st_mode = stat->mode;
449 tmp.st_nlink = stat->nlink;
450 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
451 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
452 tmp.st_atime = stat->atime.tv_sec;
453 tmp.st_atime_nsec = stat->atime.tv_nsec;
454 tmp.st_mtime = stat->mtime.tv_sec;
455 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
456 tmp.st_ctime = stat->ctime.tv_sec;
457 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
458 tmp.st_size = stat->size;
459 tmp.st_blocks = stat->blocks;
460 tmp.st_blksize = stat->blksize;
461 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
462 }
463
464 SYSCALL_DEFINE2(stat64, const char __user *, filename,
465 struct stat64 __user *, statbuf)
466 {
467 struct kstat stat;
468 int error = vfs_stat(filename, &stat);
469
470 if (!error)
471 error = cp_new_stat64(&stat, statbuf);
472
473 return error;
474 }
475
476 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
477 struct stat64 __user *, statbuf)
478 {
479 struct kstat stat;
480 int error = vfs_lstat(filename, &stat);
481
482 if (!error)
483 error = cp_new_stat64(&stat, statbuf);
484
485 return error;
486 }
487
488 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
489 {
490 struct kstat stat;
491 int error = vfs_fstat(fd, &stat);
492
493 if (!error)
494 error = cp_new_stat64(&stat, statbuf);
495
496 return error;
497 }
498
499 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
500 struct stat64 __user *, statbuf, int, flag)
501 {
502 struct kstat stat;
503 int error;
504
505 error = vfs_fstatat(dfd, filename, &stat, flag);
506 if (error)
507 return error;
508 return cp_new_stat64(&stat, statbuf);
509 }
510 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
511
512 static inline int __put_timestamp(struct timespec *kts,
513 struct statx_timestamp __user *uts)
514 {
515 return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
516 __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
517 __put_user(0, &uts->__reserved ));
518 }
519
520 /*
521 * Set the statx results.
522 */
523 static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
524 {
525 uid_t uid = from_kuid_munged(current_user_ns(), stat->uid);
526 gid_t gid = from_kgid_munged(current_user_ns(), stat->gid);
527
528 if (__put_user(stat->result_mask, &buffer->stx_mask ) ||
529 __put_user(stat->mode, &buffer->stx_mode ) ||
530 __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) ||
531 __put_user(stat->nlink, &buffer->stx_nlink ) ||
532 __put_user(uid, &buffer->stx_uid ) ||
533 __put_user(gid, &buffer->stx_gid ) ||
534 __put_user(stat->attributes, &buffer->stx_attributes ) ||
535 __put_user(stat->blksize, &buffer->stx_blksize ) ||
536 __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) ||
537 __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) ||
538 __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) ||
539 __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) ||
540 __put_timestamp(&stat->atime, &buffer->stx_atime ) ||
541 __put_timestamp(&stat->btime, &buffer->stx_btime ) ||
542 __put_timestamp(&stat->ctime, &buffer->stx_ctime ) ||
543 __put_timestamp(&stat->mtime, &buffer->stx_mtime ) ||
544 __put_user(stat->ino, &buffer->stx_ino ) ||
545 __put_user(stat->size, &buffer->stx_size ) ||
546 __put_user(stat->blocks, &buffer->stx_blocks ) ||
547 __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) ||
548 __clear_user(&buffer->__spare2, sizeof(buffer->__spare2)))
549 return -EFAULT;
550
551 return 0;
552 }
553
554 /**
555 * sys_statx - System call to get enhanced stats
556 * @dfd: Base directory to pathwalk from *or* fd to stat.
557 * @filename: File to stat *or* NULL.
558 * @flags: AT_* flags to control pathwalk.
559 * @mask: Parts of statx struct actually required.
560 * @buffer: Result buffer.
561 *
562 * Note that if filename is NULL, then it does the equivalent of fstat() using
563 * dfd to indicate the file of interest.
564 */
565 SYSCALL_DEFINE5(statx,
566 int, dfd, const char __user *, filename, unsigned, flags,
567 unsigned int, mask,
568 struct statx __user *, buffer)
569 {
570 struct kstat stat;
571 int error;
572
573 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
574 return -EINVAL;
575 if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
576 return -EFAULT;
577
578 if (filename)
579 error = vfs_statx(dfd, filename, flags, &stat, mask);
580 else
581 error = vfs_statx_fd(dfd, &stat, mask, flags);
582 if (error)
583 return error;
584 return statx_set_result(&stat, buffer);
585 }
586
587 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
588 void __inode_add_bytes(struct inode *inode, loff_t bytes)
589 {
590 inode->i_blocks += bytes >> 9;
591 bytes &= 511;
592 inode->i_bytes += bytes;
593 if (inode->i_bytes >= 512) {
594 inode->i_blocks++;
595 inode->i_bytes -= 512;
596 }
597 }
598
599 void inode_add_bytes(struct inode *inode, loff_t bytes)
600 {
601 spin_lock(&inode->i_lock);
602 __inode_add_bytes(inode, bytes);
603 spin_unlock(&inode->i_lock);
604 }
605
606 EXPORT_SYMBOL(inode_add_bytes);
607
608 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
609 {
610 inode->i_blocks -= bytes >> 9;
611 bytes &= 511;
612 if (inode->i_bytes < bytes) {
613 inode->i_blocks--;
614 inode->i_bytes += 512;
615 }
616 inode->i_bytes -= bytes;
617 }
618
619 EXPORT_SYMBOL(__inode_sub_bytes);
620
621 void inode_sub_bytes(struct inode *inode, loff_t bytes)
622 {
623 spin_lock(&inode->i_lock);
624 __inode_sub_bytes(inode, bytes);
625 spin_unlock(&inode->i_lock);
626 }
627
628 EXPORT_SYMBOL(inode_sub_bytes);
629
630 loff_t inode_get_bytes(struct inode *inode)
631 {
632 loff_t ret;
633
634 spin_lock(&inode->i_lock);
635 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
636 spin_unlock(&inode->i_lock);
637 return ret;
638 }
639
640 EXPORT_SYMBOL(inode_get_bytes);
641
642 void inode_set_bytes(struct inode *inode, loff_t bytes)
643 {
644 /* Caller is here responsible for sufficient locking
645 * (ie. inode->i_lock) */
646 inode->i_blocks = bytes >> 9;
647 inode->i_bytes = bytes & 511;
648 }
649
650 EXPORT_SYMBOL(inode_set_bytes);