]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/stat.c
clk-bcm2835: Read max core clock from firmware
[mirror_ubuntu-zesty-kernel.git] / fs / stat.c
1 /*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17
18 #include <linux/uaccess.h>
19 #include <asm/unistd.h>
20
21 void generic_fillattr(struct inode *inode, struct kstat *stat)
22 {
23 stat->dev = inode->i_sb->s_dev;
24 stat->ino = inode->i_ino;
25 stat->mode = inode->i_mode;
26 stat->nlink = inode->i_nlink;
27 stat->uid = inode->i_uid;
28 stat->gid = inode->i_gid;
29 stat->rdev = inode->i_rdev;
30 stat->size = i_size_read(inode);
31 stat->atime = inode->i_atime;
32 stat->mtime = inode->i_mtime;
33 stat->ctime = inode->i_ctime;
34 stat->blksize = (1 << inode->i_blkbits);
35 stat->blocks = inode->i_blocks;
36 }
37
38 EXPORT_SYMBOL(generic_fillattr);
39
40 /**
41 * vfs_getattr_nosec - getattr without security checks
42 * @path: file to get attributes from
43 * @stat: structure to return attributes in
44 *
45 * Get attributes without calling security_inode_getattr.
46 *
47 * Currently the only caller other than vfs_getattr is internal to the
48 * filehandle lookup code, which uses only the inode number and returns
49 * no attributes to any user. Any other code probably wants
50 * vfs_getattr.
51 */
52 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
53 {
54 struct inode *inode = d_backing_inode(path->dentry);
55
56 if (inode->i_op->getattr)
57 return inode->i_op->getattr(path->mnt, path->dentry, stat);
58
59 generic_fillattr(inode, stat);
60 return 0;
61 }
62
63 EXPORT_SYMBOL(vfs_getattr_nosec);
64
65 int vfs_getattr(struct path *path, struct kstat *stat)
66 {
67 int retval;
68
69 retval = security_inode_getattr(path);
70 if (retval)
71 return retval;
72 return vfs_getattr_nosec(path, stat);
73 }
74
75 EXPORT_SYMBOL(vfs_getattr);
76
77 int vfs_fstat(unsigned int fd, struct kstat *stat)
78 {
79 struct fd f = fdget_raw(fd);
80 int error = -EBADF;
81
82 if (f.file) {
83 error = vfs_getattr(&f.file->f_path, stat);
84 fdput(f);
85 }
86 return error;
87 }
88 EXPORT_SYMBOL(vfs_fstat);
89
90 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
91 int flag)
92 {
93 struct path path;
94 int error = -EINVAL;
95 unsigned int lookup_flags = 0;
96
97 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
98 AT_EMPTY_PATH)) != 0)
99 goto out;
100
101 if (!(flag & AT_SYMLINK_NOFOLLOW))
102 lookup_flags |= LOOKUP_FOLLOW;
103 if (flag & AT_EMPTY_PATH)
104 lookup_flags |= LOOKUP_EMPTY;
105 retry:
106 error = user_path_at(dfd, filename, lookup_flags, &path);
107 if (error)
108 goto out;
109
110 error = vfs_getattr(&path, stat);
111 path_put(&path);
112 if (retry_estale(error, lookup_flags)) {
113 lookup_flags |= LOOKUP_REVAL;
114 goto retry;
115 }
116 out:
117 return error;
118 }
119 EXPORT_SYMBOL(vfs_fstatat);
120
121 int vfs_stat(const char __user *name, struct kstat *stat)
122 {
123 return vfs_fstatat(AT_FDCWD, name, stat, 0);
124 }
125 EXPORT_SYMBOL(vfs_stat);
126
127 int vfs_lstat(const char __user *name, struct kstat *stat)
128 {
129 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
130 }
131 EXPORT_SYMBOL(vfs_lstat);
132
133
134 #ifdef __ARCH_WANT_OLD_STAT
135
136 /*
137 * For backward compatibility? Maybe this should be moved
138 * into arch/i386 instead?
139 */
140 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
141 {
142 static int warncount = 5;
143 struct __old_kernel_stat tmp;
144
145 if (warncount > 0) {
146 warncount--;
147 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
148 current->comm);
149 } else if (warncount < 0) {
150 /* it's laughable, but... */
151 warncount = 0;
152 }
153
154 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
155 tmp.st_dev = old_encode_dev(stat->dev);
156 tmp.st_ino = stat->ino;
157 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
158 return -EOVERFLOW;
159 tmp.st_mode = stat->mode;
160 tmp.st_nlink = stat->nlink;
161 if (tmp.st_nlink != stat->nlink)
162 return -EOVERFLOW;
163 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
164 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
165 tmp.st_rdev = old_encode_dev(stat->rdev);
166 #if BITS_PER_LONG == 32
167 if (stat->size > MAX_NON_LFS)
168 return -EOVERFLOW;
169 #endif
170 tmp.st_size = stat->size;
171 tmp.st_atime = stat->atime.tv_sec;
172 tmp.st_mtime = stat->mtime.tv_sec;
173 tmp.st_ctime = stat->ctime.tv_sec;
174 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
175 }
176
177 SYSCALL_DEFINE2(stat, const char __user *, filename,
178 struct __old_kernel_stat __user *, statbuf)
179 {
180 struct kstat stat;
181 int error;
182
183 error = vfs_stat(filename, &stat);
184 if (error)
185 return error;
186
187 return cp_old_stat(&stat, statbuf);
188 }
189
190 SYSCALL_DEFINE2(lstat, const char __user *, filename,
191 struct __old_kernel_stat __user *, statbuf)
192 {
193 struct kstat stat;
194 int error;
195
196 error = vfs_lstat(filename, &stat);
197 if (error)
198 return error;
199
200 return cp_old_stat(&stat, statbuf);
201 }
202
203 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
204 {
205 struct kstat stat;
206 int error = vfs_fstat(fd, &stat);
207
208 if (!error)
209 error = cp_old_stat(&stat, statbuf);
210
211 return error;
212 }
213
214 #endif /* __ARCH_WANT_OLD_STAT */
215
216 #if BITS_PER_LONG == 32
217 # define choose_32_64(a,b) a
218 #else
219 # define choose_32_64(a,b) b
220 #endif
221
222 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
223 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
224
225 #ifndef INIT_STRUCT_STAT_PADDING
226 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
227 #endif
228
229 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
230 {
231 struct stat tmp;
232
233 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
234 return -EOVERFLOW;
235 #if BITS_PER_LONG == 32
236 if (stat->size > MAX_NON_LFS)
237 return -EOVERFLOW;
238 #endif
239
240 INIT_STRUCT_STAT_PADDING(tmp);
241 tmp.st_dev = encode_dev(stat->dev);
242 tmp.st_ino = stat->ino;
243 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
244 return -EOVERFLOW;
245 tmp.st_mode = stat->mode;
246 tmp.st_nlink = stat->nlink;
247 if (tmp.st_nlink != stat->nlink)
248 return -EOVERFLOW;
249 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
250 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
251 tmp.st_rdev = encode_dev(stat->rdev);
252 tmp.st_size = stat->size;
253 tmp.st_atime = stat->atime.tv_sec;
254 tmp.st_mtime = stat->mtime.tv_sec;
255 tmp.st_ctime = stat->ctime.tv_sec;
256 #ifdef STAT_HAVE_NSEC
257 tmp.st_atime_nsec = stat->atime.tv_nsec;
258 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
259 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
260 #endif
261 tmp.st_blocks = stat->blocks;
262 tmp.st_blksize = stat->blksize;
263 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
264 }
265
266 SYSCALL_DEFINE2(newstat, const char __user *, filename,
267 struct stat __user *, statbuf)
268 {
269 struct kstat stat;
270 int error = vfs_stat(filename, &stat);
271
272 if (error)
273 return error;
274 return cp_new_stat(&stat, statbuf);
275 }
276
277 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
278 struct stat __user *, statbuf)
279 {
280 struct kstat stat;
281 int error;
282
283 error = vfs_lstat(filename, &stat);
284 if (error)
285 return error;
286
287 return cp_new_stat(&stat, statbuf);
288 }
289
290 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
291 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
292 struct stat __user *, statbuf, int, flag)
293 {
294 struct kstat stat;
295 int error;
296
297 error = vfs_fstatat(dfd, filename, &stat, flag);
298 if (error)
299 return error;
300 return cp_new_stat(&stat, statbuf);
301 }
302 #endif
303
304 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
305 {
306 struct kstat stat;
307 int error = vfs_fstat(fd, &stat);
308
309 if (!error)
310 error = cp_new_stat(&stat, statbuf);
311
312 return error;
313 }
314
315 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
316 char __user *, buf, int, bufsiz)
317 {
318 struct path path;
319 int error;
320 int empty = 0;
321 unsigned int lookup_flags = LOOKUP_EMPTY;
322
323 if (bufsiz <= 0)
324 return -EINVAL;
325
326 retry:
327 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
328 if (!error) {
329 struct inode *inode = d_backing_inode(path.dentry);
330
331 error = empty ? -ENOENT : -EINVAL;
332 /*
333 * AFS mountpoints allow readlink(2) but are not symlinks
334 */
335 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
336 error = security_inode_readlink(path.dentry);
337 if (!error) {
338 touch_atime(&path);
339 error = vfs_readlink(path.dentry, buf, bufsiz);
340 }
341 }
342 path_put(&path);
343 if (retry_estale(error, lookup_flags)) {
344 lookup_flags |= LOOKUP_REVAL;
345 goto retry;
346 }
347 }
348 return error;
349 }
350
351 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
352 int, bufsiz)
353 {
354 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
355 }
356
357
358 /* ---------- LFS-64 ----------- */
359 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
360
361 #ifndef INIT_STRUCT_STAT64_PADDING
362 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
363 #endif
364
365 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
366 {
367 struct stat64 tmp;
368
369 INIT_STRUCT_STAT64_PADDING(tmp);
370 #ifdef CONFIG_MIPS
371 /* mips has weird padding, so we don't get 64 bits there */
372 tmp.st_dev = new_encode_dev(stat->dev);
373 tmp.st_rdev = new_encode_dev(stat->rdev);
374 #else
375 tmp.st_dev = huge_encode_dev(stat->dev);
376 tmp.st_rdev = huge_encode_dev(stat->rdev);
377 #endif
378 tmp.st_ino = stat->ino;
379 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
380 return -EOVERFLOW;
381 #ifdef STAT64_HAS_BROKEN_ST_INO
382 tmp.__st_ino = stat->ino;
383 #endif
384 tmp.st_mode = stat->mode;
385 tmp.st_nlink = stat->nlink;
386 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
387 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
388 tmp.st_atime = stat->atime.tv_sec;
389 tmp.st_atime_nsec = stat->atime.tv_nsec;
390 tmp.st_mtime = stat->mtime.tv_sec;
391 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
392 tmp.st_ctime = stat->ctime.tv_sec;
393 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
394 tmp.st_size = stat->size;
395 tmp.st_blocks = stat->blocks;
396 tmp.st_blksize = stat->blksize;
397 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
398 }
399
400 SYSCALL_DEFINE2(stat64, const char __user *, filename,
401 struct stat64 __user *, statbuf)
402 {
403 struct kstat stat;
404 int error = vfs_stat(filename, &stat);
405
406 if (!error)
407 error = cp_new_stat64(&stat, statbuf);
408
409 return error;
410 }
411
412 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
413 struct stat64 __user *, statbuf)
414 {
415 struct kstat stat;
416 int error = vfs_lstat(filename, &stat);
417
418 if (!error)
419 error = cp_new_stat64(&stat, statbuf);
420
421 return error;
422 }
423
424 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
425 {
426 struct kstat stat;
427 int error = vfs_fstat(fd, &stat);
428
429 if (!error)
430 error = cp_new_stat64(&stat, statbuf);
431
432 return error;
433 }
434
435 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
436 struct stat64 __user *, statbuf, int, flag)
437 {
438 struct kstat stat;
439 int error;
440
441 error = vfs_fstatat(dfd, filename, &stat, flag);
442 if (error)
443 return error;
444 return cp_new_stat64(&stat, statbuf);
445 }
446 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
447
448 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
449 void __inode_add_bytes(struct inode *inode, loff_t bytes)
450 {
451 inode->i_blocks += bytes >> 9;
452 bytes &= 511;
453 inode->i_bytes += bytes;
454 if (inode->i_bytes >= 512) {
455 inode->i_blocks++;
456 inode->i_bytes -= 512;
457 }
458 }
459
460 void inode_add_bytes(struct inode *inode, loff_t bytes)
461 {
462 spin_lock(&inode->i_lock);
463 __inode_add_bytes(inode, bytes);
464 spin_unlock(&inode->i_lock);
465 }
466
467 EXPORT_SYMBOL(inode_add_bytes);
468
469 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
470 {
471 inode->i_blocks -= bytes >> 9;
472 bytes &= 511;
473 if (inode->i_bytes < bytes) {
474 inode->i_blocks--;
475 inode->i_bytes += 512;
476 }
477 inode->i_bytes -= bytes;
478 }
479
480 EXPORT_SYMBOL(__inode_sub_bytes);
481
482 void inode_sub_bytes(struct inode *inode, loff_t bytes)
483 {
484 spin_lock(&inode->i_lock);
485 __inode_sub_bytes(inode, bytes);
486 spin_unlock(&inode->i_lock);
487 }
488
489 EXPORT_SYMBOL(inode_sub_bytes);
490
491 loff_t inode_get_bytes(struct inode *inode)
492 {
493 loff_t ret;
494
495 spin_lock(&inode->i_lock);
496 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
497 spin_unlock(&inode->i_lock);
498 return ret;
499 }
500
501 EXPORT_SYMBOL(inode_get_bytes);
502
503 void inode_set_bytes(struct inode *inode, loff_t bytes)
504 {
505 /* Caller is here responsible for sufficient locking
506 * (ie. inode->i_lock) */
507 inode->i_blocks = bytes >> 9;
508 inode->i_bytes = bytes & 511;
509 }
510
511 EXPORT_SYMBOL(inode_set_bytes);