]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/stat.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / fs / stat.c
1 /*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/cred.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
18
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21
22 void generic_fillattr(struct inode *inode, struct kstat *stat)
23 {
24 stat->dev = inode->i_sb->s_dev;
25 stat->ino = inode->i_ino;
26 stat->mode = inode->i_mode;
27 stat->nlink = inode->i_nlink;
28 stat->uid = inode->i_uid;
29 stat->gid = inode->i_gid;
30 stat->rdev = inode->i_rdev;
31 stat->size = i_size_read(inode);
32 stat->atime = inode->i_atime;
33 stat->mtime = inode->i_mtime;
34 stat->ctime = inode->i_ctime;
35 stat->blksize = i_blocksize(inode);
36 stat->blocks = inode->i_blocks;
37 }
38
39 EXPORT_SYMBOL(generic_fillattr);
40
41 /**
42 * vfs_getattr_nosec - getattr without security checks
43 * @path: file to get attributes from
44 * @stat: structure to return attributes in
45 *
46 * Get attributes without calling security_inode_getattr.
47 *
48 * Currently the only caller other than vfs_getattr is internal to the
49 * filehandle lookup code, which uses only the inode number and returns
50 * no attributes to any user. Any other code probably wants
51 * vfs_getattr.
52 */
53 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
54 {
55 struct inode *inode = d_backing_inode(path->dentry);
56
57 if (inode->i_op->getattr)
58 return inode->i_op->getattr(path->mnt, path->dentry, stat);
59
60 generic_fillattr(inode, stat);
61 return 0;
62 }
63
64 EXPORT_SYMBOL(vfs_getattr_nosec);
65
66 int vfs_getattr(struct path *path, struct kstat *stat)
67 {
68 int retval;
69
70 retval = security_inode_getattr(path);
71 if (retval)
72 return retval;
73 return vfs_getattr_nosec(path, stat);
74 }
75
76 EXPORT_SYMBOL(vfs_getattr);
77
78 int vfs_fstat(unsigned int fd, struct kstat *stat)
79 {
80 struct fd f = fdget_raw(fd);
81 int error = -EBADF;
82
83 if (f.file) {
84 error = vfs_getattr(&f.file->f_path, stat);
85 fdput(f);
86 }
87 return error;
88 }
89 EXPORT_SYMBOL(vfs_fstat);
90
91 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
92 int flag)
93 {
94 struct path path;
95 int error = -EINVAL;
96 unsigned int lookup_flags = 0;
97
98 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
99 AT_EMPTY_PATH)) != 0)
100 goto out;
101
102 if (!(flag & AT_SYMLINK_NOFOLLOW))
103 lookup_flags |= LOOKUP_FOLLOW;
104 if (flag & AT_EMPTY_PATH)
105 lookup_flags |= LOOKUP_EMPTY;
106 retry:
107 error = user_path_at(dfd, filename, lookup_flags, &path);
108 if (error)
109 goto out;
110
111 error = vfs_getattr(&path, stat);
112 path_put(&path);
113 if (retry_estale(error, lookup_flags)) {
114 lookup_flags |= LOOKUP_REVAL;
115 goto retry;
116 }
117 out:
118 return error;
119 }
120 EXPORT_SYMBOL(vfs_fstatat);
121
122 int vfs_stat(const char __user *name, struct kstat *stat)
123 {
124 return vfs_fstatat(AT_FDCWD, name, stat, 0);
125 }
126 EXPORT_SYMBOL(vfs_stat);
127
128 int vfs_lstat(const char __user *name, struct kstat *stat)
129 {
130 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
131 }
132 EXPORT_SYMBOL(vfs_lstat);
133
134
135 #ifdef __ARCH_WANT_OLD_STAT
136
137 /*
138 * For backward compatibility? Maybe this should be moved
139 * into arch/i386 instead?
140 */
141 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
142 {
143 static int warncount = 5;
144 struct __old_kernel_stat tmp;
145
146 if (warncount > 0) {
147 warncount--;
148 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
149 current->comm);
150 } else if (warncount < 0) {
151 /* it's laughable, but... */
152 warncount = 0;
153 }
154
155 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
156 tmp.st_dev = old_encode_dev(stat->dev);
157 tmp.st_ino = stat->ino;
158 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
159 return -EOVERFLOW;
160 tmp.st_mode = stat->mode;
161 tmp.st_nlink = stat->nlink;
162 if (tmp.st_nlink != stat->nlink)
163 return -EOVERFLOW;
164 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
165 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
166 tmp.st_rdev = old_encode_dev(stat->rdev);
167 #if BITS_PER_LONG == 32
168 if (stat->size > MAX_NON_LFS)
169 return -EOVERFLOW;
170 #endif
171 tmp.st_size = stat->size;
172 tmp.st_atime = stat->atime.tv_sec;
173 tmp.st_mtime = stat->mtime.tv_sec;
174 tmp.st_ctime = stat->ctime.tv_sec;
175 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
176 }
177
178 SYSCALL_DEFINE2(stat, const char __user *, filename,
179 struct __old_kernel_stat __user *, statbuf)
180 {
181 struct kstat stat;
182 int error;
183
184 error = vfs_stat(filename, &stat);
185 if (error)
186 return error;
187
188 return cp_old_stat(&stat, statbuf);
189 }
190
191 SYSCALL_DEFINE2(lstat, const char __user *, filename,
192 struct __old_kernel_stat __user *, statbuf)
193 {
194 struct kstat stat;
195 int error;
196
197 error = vfs_lstat(filename, &stat);
198 if (error)
199 return error;
200
201 return cp_old_stat(&stat, statbuf);
202 }
203
204 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
205 {
206 struct kstat stat;
207 int error = vfs_fstat(fd, &stat);
208
209 if (!error)
210 error = cp_old_stat(&stat, statbuf);
211
212 return error;
213 }
214
215 #endif /* __ARCH_WANT_OLD_STAT */
216
217 #if BITS_PER_LONG == 32
218 # define choose_32_64(a,b) a
219 #else
220 # define choose_32_64(a,b) b
221 #endif
222
223 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
224 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
225
226 #ifndef INIT_STRUCT_STAT_PADDING
227 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
228 #endif
229
230 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
231 {
232 struct stat tmp;
233
234 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
235 return -EOVERFLOW;
236 #if BITS_PER_LONG == 32
237 if (stat->size > MAX_NON_LFS)
238 return -EOVERFLOW;
239 #endif
240
241 INIT_STRUCT_STAT_PADDING(tmp);
242 tmp.st_dev = encode_dev(stat->dev);
243 tmp.st_ino = stat->ino;
244 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
245 return -EOVERFLOW;
246 tmp.st_mode = stat->mode;
247 tmp.st_nlink = stat->nlink;
248 if (tmp.st_nlink != stat->nlink)
249 return -EOVERFLOW;
250 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
251 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
252 tmp.st_rdev = encode_dev(stat->rdev);
253 tmp.st_size = stat->size;
254 tmp.st_atime = stat->atime.tv_sec;
255 tmp.st_mtime = stat->mtime.tv_sec;
256 tmp.st_ctime = stat->ctime.tv_sec;
257 #ifdef STAT_HAVE_NSEC
258 tmp.st_atime_nsec = stat->atime.tv_nsec;
259 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
260 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
261 #endif
262 tmp.st_blocks = stat->blocks;
263 tmp.st_blksize = stat->blksize;
264 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
265 }
266
267 SYSCALL_DEFINE2(newstat, const char __user *, filename,
268 struct stat __user *, statbuf)
269 {
270 struct kstat stat;
271 int error = vfs_stat(filename, &stat);
272
273 if (error)
274 return error;
275 return cp_new_stat(&stat, statbuf);
276 }
277
278 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
279 struct stat __user *, statbuf)
280 {
281 struct kstat stat;
282 int error;
283
284 error = vfs_lstat(filename, &stat);
285 if (error)
286 return error;
287
288 return cp_new_stat(&stat, statbuf);
289 }
290
291 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
292 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
293 struct stat __user *, statbuf, int, flag)
294 {
295 struct kstat stat;
296 int error;
297
298 error = vfs_fstatat(dfd, filename, &stat, flag);
299 if (error)
300 return error;
301 return cp_new_stat(&stat, statbuf);
302 }
303 #endif
304
305 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
306 {
307 struct kstat stat;
308 int error = vfs_fstat(fd, &stat);
309
310 if (!error)
311 error = cp_new_stat(&stat, statbuf);
312
313 return error;
314 }
315
316 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
317 char __user *, buf, int, bufsiz)
318 {
319 struct path path;
320 int error;
321 int empty = 0;
322 unsigned int lookup_flags = LOOKUP_EMPTY;
323
324 if (bufsiz <= 0)
325 return -EINVAL;
326
327 retry:
328 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
329 if (!error) {
330 struct inode *inode = d_backing_inode(path.dentry);
331
332 error = empty ? -ENOENT : -EINVAL;
333 /*
334 * AFS mountpoints allow readlink(2) but are not symlinks
335 */
336 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
337 error = security_inode_readlink(path.dentry);
338 if (!error) {
339 touch_atime(&path);
340 error = vfs_readlink(path.dentry, buf, bufsiz);
341 }
342 }
343 path_put(&path);
344 if (retry_estale(error, lookup_flags)) {
345 lookup_flags |= LOOKUP_REVAL;
346 goto retry;
347 }
348 }
349 return error;
350 }
351
352 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
353 int, bufsiz)
354 {
355 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
356 }
357
358
359 /* ---------- LFS-64 ----------- */
360 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
361
362 #ifndef INIT_STRUCT_STAT64_PADDING
363 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
364 #endif
365
366 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
367 {
368 struct stat64 tmp;
369
370 INIT_STRUCT_STAT64_PADDING(tmp);
371 #ifdef CONFIG_MIPS
372 /* mips has weird padding, so we don't get 64 bits there */
373 tmp.st_dev = new_encode_dev(stat->dev);
374 tmp.st_rdev = new_encode_dev(stat->rdev);
375 #else
376 tmp.st_dev = huge_encode_dev(stat->dev);
377 tmp.st_rdev = huge_encode_dev(stat->rdev);
378 #endif
379 tmp.st_ino = stat->ino;
380 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
381 return -EOVERFLOW;
382 #ifdef STAT64_HAS_BROKEN_ST_INO
383 tmp.__st_ino = stat->ino;
384 #endif
385 tmp.st_mode = stat->mode;
386 tmp.st_nlink = stat->nlink;
387 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
388 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
389 tmp.st_atime = stat->atime.tv_sec;
390 tmp.st_atime_nsec = stat->atime.tv_nsec;
391 tmp.st_mtime = stat->mtime.tv_sec;
392 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
393 tmp.st_ctime = stat->ctime.tv_sec;
394 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
395 tmp.st_size = stat->size;
396 tmp.st_blocks = stat->blocks;
397 tmp.st_blksize = stat->blksize;
398 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
399 }
400
401 SYSCALL_DEFINE2(stat64, const char __user *, filename,
402 struct stat64 __user *, statbuf)
403 {
404 struct kstat stat;
405 int error = vfs_stat(filename, &stat);
406
407 if (!error)
408 error = cp_new_stat64(&stat, statbuf);
409
410 return error;
411 }
412
413 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
414 struct stat64 __user *, statbuf)
415 {
416 struct kstat stat;
417 int error = vfs_lstat(filename, &stat);
418
419 if (!error)
420 error = cp_new_stat64(&stat, statbuf);
421
422 return error;
423 }
424
425 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
426 {
427 struct kstat stat;
428 int error = vfs_fstat(fd, &stat);
429
430 if (!error)
431 error = cp_new_stat64(&stat, statbuf);
432
433 return error;
434 }
435
436 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
437 struct stat64 __user *, statbuf, int, flag)
438 {
439 struct kstat stat;
440 int error;
441
442 error = vfs_fstatat(dfd, filename, &stat, flag);
443 if (error)
444 return error;
445 return cp_new_stat64(&stat, statbuf);
446 }
447 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
448
449 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
450 void __inode_add_bytes(struct inode *inode, loff_t bytes)
451 {
452 inode->i_blocks += bytes >> 9;
453 bytes &= 511;
454 inode->i_bytes += bytes;
455 if (inode->i_bytes >= 512) {
456 inode->i_blocks++;
457 inode->i_bytes -= 512;
458 }
459 }
460
461 void inode_add_bytes(struct inode *inode, loff_t bytes)
462 {
463 spin_lock(&inode->i_lock);
464 __inode_add_bytes(inode, bytes);
465 spin_unlock(&inode->i_lock);
466 }
467
468 EXPORT_SYMBOL(inode_add_bytes);
469
470 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
471 {
472 inode->i_blocks -= bytes >> 9;
473 bytes &= 511;
474 if (inode->i_bytes < bytes) {
475 inode->i_blocks--;
476 inode->i_bytes += 512;
477 }
478 inode->i_bytes -= bytes;
479 }
480
481 EXPORT_SYMBOL(__inode_sub_bytes);
482
483 void inode_sub_bytes(struct inode *inode, loff_t bytes)
484 {
485 spin_lock(&inode->i_lock);
486 __inode_sub_bytes(inode, bytes);
487 spin_unlock(&inode->i_lock);
488 }
489
490 EXPORT_SYMBOL(inode_sub_bytes);
491
492 loff_t inode_get_bytes(struct inode *inode)
493 {
494 loff_t ret;
495
496 spin_lock(&inode->i_lock);
497 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
498 spin_unlock(&inode->i_lock);
499 return ret;
500 }
501
502 EXPORT_SYMBOL(inode_get_bytes);
503
504 void inode_set_bytes(struct inode *inode, loff_t bytes)
505 {
506 /* Caller is here responsible for sufficient locking
507 * (ie. inode->i_lock) */
508 inode->i_blocks = bytes >> 9;
509 inode->i_bytes = bytes & 511;
510 }
511
512 EXPORT_SYMBOL(inode_set_bytes);