]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/read_write.c
constify rw_verify_area()
[mirror_ubuntu-artful-kernel.git] / fs / read_write.c
1 /*
2 * linux/fs/read_write.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/slab.h>
8 #include <linux/stat.h>
9 #include <linux/fcntl.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/aio.h>
13 #include <linux/fsnotify.h>
14 #include <linux/security.h>
15 #include <linux/export.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
18 #include <linux/splice.h>
19 #include <linux/compat.h>
20 #include "internal.h"
21
22 #include <asm/uaccess.h>
23 #include <asm/unistd.h>
24
25 typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
26 typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
27 unsigned long, loff_t);
28
29 const struct file_operations generic_ro_fops = {
30 .llseek = generic_file_llseek,
31 .read = do_sync_read,
32 .aio_read = generic_file_aio_read,
33 .mmap = generic_file_readonly_mmap,
34 .splice_read = generic_file_splice_read,
35 };
36
37 EXPORT_SYMBOL(generic_ro_fops);
38
39 static inline int unsigned_offsets(struct file *file)
40 {
41 return file->f_mode & FMODE_UNSIGNED_OFFSET;
42 }
43
44 static loff_t lseek_execute(struct file *file, struct inode *inode,
45 loff_t offset, loff_t maxsize)
46 {
47 if (offset < 0 && !unsigned_offsets(file))
48 return -EINVAL;
49 if (offset > maxsize)
50 return -EINVAL;
51
52 if (offset != file->f_pos) {
53 file->f_pos = offset;
54 file->f_version = 0;
55 }
56 return offset;
57 }
58
59 /**
60 * generic_file_llseek_size - generic llseek implementation for regular files
61 * @file: file structure to seek on
62 * @offset: file offset to seek to
63 * @whence: type of seek
64 * @size: max size of this file in file system
65 * @eof: offset used for SEEK_END position
66 *
67 * This is a variant of generic_file_llseek that allows passing in a custom
68 * maximum file size and a custom EOF position, for e.g. hashed directories
69 *
70 * Synchronization:
71 * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
72 * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
73 * read/writes behave like SEEK_SET against seeks.
74 */
75 loff_t
76 generic_file_llseek_size(struct file *file, loff_t offset, int whence,
77 loff_t maxsize, loff_t eof)
78 {
79 struct inode *inode = file->f_mapping->host;
80
81 switch (whence) {
82 case SEEK_END:
83 offset += eof;
84 break;
85 case SEEK_CUR:
86 /*
87 * Here we special-case the lseek(fd, 0, SEEK_CUR)
88 * position-querying operation. Avoid rewriting the "same"
89 * f_pos value back to the file because a concurrent read(),
90 * write() or lseek() might have altered it
91 */
92 if (offset == 0)
93 return file->f_pos;
94 /*
95 * f_lock protects against read/modify/write race with other
96 * SEEK_CURs. Note that parallel writes and reads behave
97 * like SEEK_SET.
98 */
99 spin_lock(&file->f_lock);
100 offset = lseek_execute(file, inode, file->f_pos + offset,
101 maxsize);
102 spin_unlock(&file->f_lock);
103 return offset;
104 case SEEK_DATA:
105 /*
106 * In the generic case the entire file is data, so as long as
107 * offset isn't at the end of the file then the offset is data.
108 */
109 if (offset >= eof)
110 return -ENXIO;
111 break;
112 case SEEK_HOLE:
113 /*
114 * There is a virtual hole at the end of the file, so as long as
115 * offset isn't i_size or larger, return i_size.
116 */
117 if (offset >= eof)
118 return -ENXIO;
119 offset = eof;
120 break;
121 }
122
123 return lseek_execute(file, inode, offset, maxsize);
124 }
125 EXPORT_SYMBOL(generic_file_llseek_size);
126
127 /**
128 * generic_file_llseek - generic llseek implementation for regular files
129 * @file: file structure to seek on
130 * @offset: file offset to seek to
131 * @whence: type of seek
132 *
133 * This is a generic implemenation of ->llseek useable for all normal local
134 * filesystems. It just updates the file offset to the value specified by
135 * @offset and @whence.
136 */
137 loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
138 {
139 struct inode *inode = file->f_mapping->host;
140
141 return generic_file_llseek_size(file, offset, whence,
142 inode->i_sb->s_maxbytes,
143 i_size_read(inode));
144 }
145 EXPORT_SYMBOL(generic_file_llseek);
146
147 /**
148 * fixed_size_llseek - llseek implementation for fixed-sized devices
149 * @file: file structure to seek on
150 * @offset: file offset to seek to
151 * @whence: type of seek
152 * @size: size of the file
153 *
154 */
155 loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
156 {
157 switch (whence) {
158 case SEEK_SET: case SEEK_CUR: case SEEK_END:
159 return generic_file_llseek_size(file, offset, whence,
160 size, size);
161 default:
162 return -EINVAL;
163 }
164 }
165 EXPORT_SYMBOL(fixed_size_llseek);
166
167 /**
168 * noop_llseek - No Operation Performed llseek implementation
169 * @file: file structure to seek on
170 * @offset: file offset to seek to
171 * @whence: type of seek
172 *
173 * This is an implementation of ->llseek useable for the rare special case when
174 * userspace expects the seek to succeed but the (device) file is actually not
175 * able to perform the seek. In this case you use noop_llseek() instead of
176 * falling back to the default implementation of ->llseek.
177 */
178 loff_t noop_llseek(struct file *file, loff_t offset, int whence)
179 {
180 return file->f_pos;
181 }
182 EXPORT_SYMBOL(noop_llseek);
183
184 loff_t no_llseek(struct file *file, loff_t offset, int whence)
185 {
186 return -ESPIPE;
187 }
188 EXPORT_SYMBOL(no_llseek);
189
190 loff_t default_llseek(struct file *file, loff_t offset, int whence)
191 {
192 struct inode *inode = file_inode(file);
193 loff_t retval;
194
195 mutex_lock(&inode->i_mutex);
196 switch (whence) {
197 case SEEK_END:
198 offset += i_size_read(inode);
199 break;
200 case SEEK_CUR:
201 if (offset == 0) {
202 retval = file->f_pos;
203 goto out;
204 }
205 offset += file->f_pos;
206 break;
207 case SEEK_DATA:
208 /*
209 * In the generic case the entire file is data, so as
210 * long as offset isn't at the end of the file then the
211 * offset is data.
212 */
213 if (offset >= inode->i_size) {
214 retval = -ENXIO;
215 goto out;
216 }
217 break;
218 case SEEK_HOLE:
219 /*
220 * There is a virtual hole at the end of the file, so
221 * as long as offset isn't i_size or larger, return
222 * i_size.
223 */
224 if (offset >= inode->i_size) {
225 retval = -ENXIO;
226 goto out;
227 }
228 offset = inode->i_size;
229 break;
230 }
231 retval = -EINVAL;
232 if (offset >= 0 || unsigned_offsets(file)) {
233 if (offset != file->f_pos) {
234 file->f_pos = offset;
235 file->f_version = 0;
236 }
237 retval = offset;
238 }
239 out:
240 mutex_unlock(&inode->i_mutex);
241 return retval;
242 }
243 EXPORT_SYMBOL(default_llseek);
244
245 loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
246 {
247 loff_t (*fn)(struct file *, loff_t, int);
248
249 fn = no_llseek;
250 if (file->f_mode & FMODE_LSEEK) {
251 if (file->f_op && file->f_op->llseek)
252 fn = file->f_op->llseek;
253 }
254 return fn(file, offset, whence);
255 }
256 EXPORT_SYMBOL(vfs_llseek);
257
258 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
259 {
260 off_t retval;
261 struct fd f = fdget(fd);
262 if (!f.file)
263 return -EBADF;
264
265 retval = -EINVAL;
266 if (whence <= SEEK_MAX) {
267 loff_t res = vfs_llseek(f.file, offset, whence);
268 retval = res;
269 if (res != (loff_t)retval)
270 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
271 }
272 fdput(f);
273 return retval;
274 }
275
276 #ifdef CONFIG_COMPAT
277 COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
278 {
279 return sys_lseek(fd, offset, whence);
280 }
281 #endif
282
283 #ifdef __ARCH_WANT_SYS_LLSEEK
284 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
285 unsigned long, offset_low, loff_t __user *, result,
286 unsigned int, whence)
287 {
288 int retval;
289 struct fd f = fdget(fd);
290 loff_t offset;
291
292 if (!f.file)
293 return -EBADF;
294
295 retval = -EINVAL;
296 if (whence > SEEK_MAX)
297 goto out_putf;
298
299 offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
300 whence);
301
302 retval = (int)offset;
303 if (offset >= 0) {
304 retval = -EFAULT;
305 if (!copy_to_user(result, &offset, sizeof(offset)))
306 retval = 0;
307 }
308 out_putf:
309 fdput(f);
310 return retval;
311 }
312 #endif
313
314 /*
315 * rw_verify_area doesn't like huge counts. We limit
316 * them to something that fits in "int" so that others
317 * won't have to do range checks all the time.
318 */
319 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
320 {
321 struct inode *inode;
322 loff_t pos;
323 int retval = -EINVAL;
324
325 inode = file_inode(file);
326 if (unlikely((ssize_t) count < 0))
327 return retval;
328 pos = *ppos;
329 if (unlikely(pos < 0)) {
330 if (!unsigned_offsets(file))
331 return retval;
332 if (count >= -pos) /* both values are in 0..LLONG_MAX */
333 return -EOVERFLOW;
334 } else if (unlikely((loff_t) (pos + count) < 0)) {
335 if (!unsigned_offsets(file))
336 return retval;
337 }
338
339 if (unlikely(inode->i_flock && mandatory_lock(inode))) {
340 retval = locks_mandatory_area(
341 read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
342 inode, file, pos, count);
343 if (retval < 0)
344 return retval;
345 }
346 retval = security_file_permission(file,
347 read_write == READ ? MAY_READ : MAY_WRITE);
348 if (retval)
349 return retval;
350 return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
351 }
352
353 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
354 {
355 struct iovec iov = { .iov_base = buf, .iov_len = len };
356 struct kiocb kiocb;
357 ssize_t ret;
358
359 init_sync_kiocb(&kiocb, filp);
360 kiocb.ki_pos = *ppos;
361 kiocb.ki_left = len;
362 kiocb.ki_nbytes = len;
363
364 ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
365 if (-EIOCBQUEUED == ret)
366 ret = wait_on_sync_kiocb(&kiocb);
367 *ppos = kiocb.ki_pos;
368 return ret;
369 }
370
371 EXPORT_SYMBOL(do_sync_read);
372
373 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
374 {
375 ssize_t ret;
376
377 if (!(file->f_mode & FMODE_READ))
378 return -EBADF;
379 if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
380 return -EINVAL;
381 if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
382 return -EFAULT;
383
384 ret = rw_verify_area(READ, file, pos, count);
385 if (ret >= 0) {
386 count = ret;
387 if (file->f_op->read)
388 ret = file->f_op->read(file, buf, count, pos);
389 else
390 ret = do_sync_read(file, buf, count, pos);
391 if (ret > 0) {
392 fsnotify_access(file);
393 add_rchar(current, ret);
394 }
395 inc_syscr(current);
396 }
397
398 return ret;
399 }
400
401 EXPORT_SYMBOL(vfs_read);
402
403 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
404 {
405 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
406 struct kiocb kiocb;
407 ssize_t ret;
408
409 init_sync_kiocb(&kiocb, filp);
410 kiocb.ki_pos = *ppos;
411 kiocb.ki_left = len;
412 kiocb.ki_nbytes = len;
413
414 ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
415 if (-EIOCBQUEUED == ret)
416 ret = wait_on_sync_kiocb(&kiocb);
417 *ppos = kiocb.ki_pos;
418 return ret;
419 }
420
421 EXPORT_SYMBOL(do_sync_write);
422
423 ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
424 {
425 mm_segment_t old_fs;
426 const char __user *p;
427 ssize_t ret;
428
429 if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
430 return -EINVAL;
431
432 old_fs = get_fs();
433 set_fs(get_ds());
434 p = (__force const char __user *)buf;
435 if (count > MAX_RW_COUNT)
436 count = MAX_RW_COUNT;
437 if (file->f_op->write)
438 ret = file->f_op->write(file, p, count, pos);
439 else
440 ret = do_sync_write(file, p, count, pos);
441 set_fs(old_fs);
442 if (ret > 0) {
443 fsnotify_modify(file);
444 add_wchar(current, ret);
445 }
446 inc_syscw(current);
447 return ret;
448 }
449
450 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
451 {
452 ssize_t ret;
453
454 if (!(file->f_mode & FMODE_WRITE))
455 return -EBADF;
456 if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
457 return -EINVAL;
458 if (unlikely(!access_ok(VERIFY_READ, buf, count)))
459 return -EFAULT;
460
461 ret = rw_verify_area(WRITE, file, pos, count);
462 if (ret >= 0) {
463 count = ret;
464 file_start_write(file);
465 if (file->f_op->write)
466 ret = file->f_op->write(file, buf, count, pos);
467 else
468 ret = do_sync_write(file, buf, count, pos);
469 if (ret > 0) {
470 fsnotify_modify(file);
471 add_wchar(current, ret);
472 }
473 inc_syscw(current);
474 file_end_write(file);
475 }
476
477 return ret;
478 }
479
480 EXPORT_SYMBOL(vfs_write);
481
482 static inline loff_t file_pos_read(struct file *file)
483 {
484 return file->f_pos;
485 }
486
487 static inline void file_pos_write(struct file *file, loff_t pos)
488 {
489 file->f_pos = pos;
490 }
491
492 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
493 {
494 struct fd f = fdget(fd);
495 ssize_t ret = -EBADF;
496
497 if (f.file) {
498 loff_t pos = file_pos_read(f.file);
499 ret = vfs_read(f.file, buf, count, &pos);
500 if (ret >= 0)
501 file_pos_write(f.file, pos);
502 fdput(f);
503 }
504 return ret;
505 }
506
507 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
508 size_t, count)
509 {
510 struct fd f = fdget(fd);
511 ssize_t ret = -EBADF;
512
513 if (f.file) {
514 loff_t pos = file_pos_read(f.file);
515 ret = vfs_write(f.file, buf, count, &pos);
516 if (ret >= 0)
517 file_pos_write(f.file, pos);
518 fdput(f);
519 }
520
521 return ret;
522 }
523
524 SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
525 size_t, count, loff_t, pos)
526 {
527 struct fd f;
528 ssize_t ret = -EBADF;
529
530 if (pos < 0)
531 return -EINVAL;
532
533 f = fdget(fd);
534 if (f.file) {
535 ret = -ESPIPE;
536 if (f.file->f_mode & FMODE_PREAD)
537 ret = vfs_read(f.file, buf, count, &pos);
538 fdput(f);
539 }
540
541 return ret;
542 }
543
544 SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
545 size_t, count, loff_t, pos)
546 {
547 struct fd f;
548 ssize_t ret = -EBADF;
549
550 if (pos < 0)
551 return -EINVAL;
552
553 f = fdget(fd);
554 if (f.file) {
555 ret = -ESPIPE;
556 if (f.file->f_mode & FMODE_PWRITE)
557 ret = vfs_write(f.file, buf, count, &pos);
558 fdput(f);
559 }
560
561 return ret;
562 }
563
564 /*
565 * Reduce an iovec's length in-place. Return the resulting number of segments
566 */
567 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
568 {
569 unsigned long seg = 0;
570 size_t len = 0;
571
572 while (seg < nr_segs) {
573 seg++;
574 if (len + iov->iov_len >= to) {
575 iov->iov_len = to - len;
576 break;
577 }
578 len += iov->iov_len;
579 iov++;
580 }
581 return seg;
582 }
583 EXPORT_SYMBOL(iov_shorten);
584
585 static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
586 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
587 {
588 struct kiocb kiocb;
589 ssize_t ret;
590
591 init_sync_kiocb(&kiocb, filp);
592 kiocb.ki_pos = *ppos;
593 kiocb.ki_left = len;
594 kiocb.ki_nbytes = len;
595
596 ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
597 if (ret == -EIOCBQUEUED)
598 ret = wait_on_sync_kiocb(&kiocb);
599 *ppos = kiocb.ki_pos;
600 return ret;
601 }
602
603 /* Do it by hand, with file-ops */
604 static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
605 unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
606 {
607 struct iovec *vector = iov;
608 ssize_t ret = 0;
609
610 while (nr_segs > 0) {
611 void __user *base;
612 size_t len;
613 ssize_t nr;
614
615 base = vector->iov_base;
616 len = vector->iov_len;
617 vector++;
618 nr_segs--;
619
620 nr = fn(filp, base, len, ppos);
621
622 if (nr < 0) {
623 if (!ret)
624 ret = nr;
625 break;
626 }
627 ret += nr;
628 if (nr != len)
629 break;
630 }
631
632 return ret;
633 }
634
635 /* A write operation does a read from user space and vice versa */
636 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
637
638 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
639 unsigned long nr_segs, unsigned long fast_segs,
640 struct iovec *fast_pointer,
641 struct iovec **ret_pointer)
642 {
643 unsigned long seg;
644 ssize_t ret;
645 struct iovec *iov = fast_pointer;
646
647 /*
648 * SuS says "The readv() function *may* fail if the iovcnt argument
649 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
650 * traditionally returned zero for zero segments, so...
651 */
652 if (nr_segs == 0) {
653 ret = 0;
654 goto out;
655 }
656
657 /*
658 * First get the "struct iovec" from user memory and
659 * verify all the pointers
660 */
661 if (nr_segs > UIO_MAXIOV) {
662 ret = -EINVAL;
663 goto out;
664 }
665 if (nr_segs > fast_segs) {
666 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
667 if (iov == NULL) {
668 ret = -ENOMEM;
669 goto out;
670 }
671 }
672 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
673 ret = -EFAULT;
674 goto out;
675 }
676
677 /*
678 * According to the Single Unix Specification we should return EINVAL
679 * if an element length is < 0 when cast to ssize_t or if the
680 * total length would overflow the ssize_t return value of the
681 * system call.
682 *
683 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
684 * overflow case.
685 */
686 ret = 0;
687 for (seg = 0; seg < nr_segs; seg++) {
688 void __user *buf = iov[seg].iov_base;
689 ssize_t len = (ssize_t)iov[seg].iov_len;
690
691 /* see if we we're about to use an invalid len or if
692 * it's about to overflow ssize_t */
693 if (len < 0) {
694 ret = -EINVAL;
695 goto out;
696 }
697 if (type >= 0
698 && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
699 ret = -EFAULT;
700 goto out;
701 }
702 if (len > MAX_RW_COUNT - ret) {
703 len = MAX_RW_COUNT - ret;
704 iov[seg].iov_len = len;
705 }
706 ret += len;
707 }
708 out:
709 *ret_pointer = iov;
710 return ret;
711 }
712
713 static ssize_t do_readv_writev(int type, struct file *file,
714 const struct iovec __user * uvector,
715 unsigned long nr_segs, loff_t *pos)
716 {
717 size_t tot_len;
718 struct iovec iovstack[UIO_FASTIOV];
719 struct iovec *iov = iovstack;
720 ssize_t ret;
721 io_fn_t fn;
722 iov_fn_t fnv;
723
724 if (!file->f_op) {
725 ret = -EINVAL;
726 goto out;
727 }
728
729 ret = rw_copy_check_uvector(type, uvector, nr_segs,
730 ARRAY_SIZE(iovstack), iovstack, &iov);
731 if (ret <= 0)
732 goto out;
733
734 tot_len = ret;
735 ret = rw_verify_area(type, file, pos, tot_len);
736 if (ret < 0)
737 goto out;
738
739 fnv = NULL;
740 if (type == READ) {
741 fn = file->f_op->read;
742 fnv = file->f_op->aio_read;
743 } else {
744 fn = (io_fn_t)file->f_op->write;
745 fnv = file->f_op->aio_write;
746 file_start_write(file);
747 }
748
749 if (fnv)
750 ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
751 pos, fnv);
752 else
753 ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
754
755 if (type != READ)
756 file_end_write(file);
757
758 out:
759 if (iov != iovstack)
760 kfree(iov);
761 if ((ret + (type == READ)) > 0) {
762 if (type == READ)
763 fsnotify_access(file);
764 else
765 fsnotify_modify(file);
766 }
767 return ret;
768 }
769
770 ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
771 unsigned long vlen, loff_t *pos)
772 {
773 if (!(file->f_mode & FMODE_READ))
774 return -EBADF;
775 if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
776 return -EINVAL;
777
778 return do_readv_writev(READ, file, vec, vlen, pos);
779 }
780
781 EXPORT_SYMBOL(vfs_readv);
782
783 ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
784 unsigned long vlen, loff_t *pos)
785 {
786 if (!(file->f_mode & FMODE_WRITE))
787 return -EBADF;
788 if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
789 return -EINVAL;
790
791 return do_readv_writev(WRITE, file, vec, vlen, pos);
792 }
793
794 EXPORT_SYMBOL(vfs_writev);
795
796 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
797 unsigned long, vlen)
798 {
799 struct fd f = fdget(fd);
800 ssize_t ret = -EBADF;
801
802 if (f.file) {
803 loff_t pos = file_pos_read(f.file);
804 ret = vfs_readv(f.file, vec, vlen, &pos);
805 if (ret >= 0)
806 file_pos_write(f.file, pos);
807 fdput(f);
808 }
809
810 if (ret > 0)
811 add_rchar(current, ret);
812 inc_syscr(current);
813 return ret;
814 }
815
816 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
817 unsigned long, vlen)
818 {
819 struct fd f = fdget(fd);
820 ssize_t ret = -EBADF;
821
822 if (f.file) {
823 loff_t pos = file_pos_read(f.file);
824 ret = vfs_writev(f.file, vec, vlen, &pos);
825 if (ret >= 0)
826 file_pos_write(f.file, pos);
827 fdput(f);
828 }
829
830 if (ret > 0)
831 add_wchar(current, ret);
832 inc_syscw(current);
833 return ret;
834 }
835
836 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
837 {
838 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
839 return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
840 }
841
842 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
843 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
844 {
845 loff_t pos = pos_from_hilo(pos_h, pos_l);
846 struct fd f;
847 ssize_t ret = -EBADF;
848
849 if (pos < 0)
850 return -EINVAL;
851
852 f = fdget(fd);
853 if (f.file) {
854 ret = -ESPIPE;
855 if (f.file->f_mode & FMODE_PREAD)
856 ret = vfs_readv(f.file, vec, vlen, &pos);
857 fdput(f);
858 }
859
860 if (ret > 0)
861 add_rchar(current, ret);
862 inc_syscr(current);
863 return ret;
864 }
865
866 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
867 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
868 {
869 loff_t pos = pos_from_hilo(pos_h, pos_l);
870 struct fd f;
871 ssize_t ret = -EBADF;
872
873 if (pos < 0)
874 return -EINVAL;
875
876 f = fdget(fd);
877 if (f.file) {
878 ret = -ESPIPE;
879 if (f.file->f_mode & FMODE_PWRITE)
880 ret = vfs_writev(f.file, vec, vlen, &pos);
881 fdput(f);
882 }
883
884 if (ret > 0)
885 add_wchar(current, ret);
886 inc_syscw(current);
887 return ret;
888 }
889
890 #ifdef CONFIG_COMPAT
891
892 static ssize_t compat_do_readv_writev(int type, struct file *file,
893 const struct compat_iovec __user *uvector,
894 unsigned long nr_segs, loff_t *pos)
895 {
896 compat_ssize_t tot_len;
897 struct iovec iovstack[UIO_FASTIOV];
898 struct iovec *iov = iovstack;
899 ssize_t ret;
900 io_fn_t fn;
901 iov_fn_t fnv;
902
903 ret = -EINVAL;
904 if (!file->f_op)
905 goto out;
906
907 ret = -EFAULT;
908 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
909 goto out;
910
911 ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
912 UIO_FASTIOV, iovstack, &iov);
913 if (ret <= 0)
914 goto out;
915
916 tot_len = ret;
917 ret = rw_verify_area(type, file, pos, tot_len);
918 if (ret < 0)
919 goto out;
920
921 fnv = NULL;
922 if (type == READ) {
923 fn = file->f_op->read;
924 fnv = file->f_op->aio_read;
925 } else {
926 fn = (io_fn_t)file->f_op->write;
927 fnv = file->f_op->aio_write;
928 file_start_write(file);
929 }
930
931 if (fnv)
932 ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
933 pos, fnv);
934 else
935 ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
936
937 if (type != READ)
938 file_end_write(file);
939
940 out:
941 if (iov != iovstack)
942 kfree(iov);
943 if ((ret + (type == READ)) > 0) {
944 if (type == READ)
945 fsnotify_access(file);
946 else
947 fsnotify_modify(file);
948 }
949 return ret;
950 }
951
952 static size_t compat_readv(struct file *file,
953 const struct compat_iovec __user *vec,
954 unsigned long vlen, loff_t *pos)
955 {
956 ssize_t ret = -EBADF;
957
958 if (!(file->f_mode & FMODE_READ))
959 goto out;
960
961 ret = -EINVAL;
962 if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
963 goto out;
964
965 ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
966
967 out:
968 if (ret > 0)
969 add_rchar(current, ret);
970 inc_syscr(current);
971 return ret;
972 }
973
974 COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
975 const struct compat_iovec __user *,vec,
976 unsigned long, vlen)
977 {
978 struct fd f = fdget(fd);
979 ssize_t ret;
980 loff_t pos;
981
982 if (!f.file)
983 return -EBADF;
984 pos = f.file->f_pos;
985 ret = compat_readv(f.file, vec, vlen, &pos);
986 if (ret >= 0)
987 f.file->f_pos = pos;
988 fdput(f);
989 return ret;
990 }
991
992 COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
993 const struct compat_iovec __user *,vec,
994 unsigned long, vlen, loff_t, pos)
995 {
996 struct fd f;
997 ssize_t ret;
998
999 if (pos < 0)
1000 return -EINVAL;
1001 f = fdget(fd);
1002 if (!f.file)
1003 return -EBADF;
1004 ret = -ESPIPE;
1005 if (f.file->f_mode & FMODE_PREAD)
1006 ret = compat_readv(f.file, vec, vlen, &pos);
1007 fdput(f);
1008 return ret;
1009 }
1010
1011 COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
1012 const struct compat_iovec __user *,vec,
1013 unsigned long, vlen, u32, pos_low, u32, pos_high)
1014 {
1015 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1016 return compat_sys_preadv64(fd, vec, vlen, pos);
1017 }
1018
1019 static size_t compat_writev(struct file *file,
1020 const struct compat_iovec __user *vec,
1021 unsigned long vlen, loff_t *pos)
1022 {
1023 ssize_t ret = -EBADF;
1024
1025 if (!(file->f_mode & FMODE_WRITE))
1026 goto out;
1027
1028 ret = -EINVAL;
1029 if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
1030 goto out;
1031
1032 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
1033
1034 out:
1035 if (ret > 0)
1036 add_wchar(current, ret);
1037 inc_syscw(current);
1038 return ret;
1039 }
1040
1041 COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
1042 const struct compat_iovec __user *, vec,
1043 unsigned long, vlen)
1044 {
1045 struct fd f = fdget(fd);
1046 ssize_t ret;
1047 loff_t pos;
1048
1049 if (!f.file)
1050 return -EBADF;
1051 pos = f.file->f_pos;
1052 ret = compat_writev(f.file, vec, vlen, &pos);
1053 if (ret >= 0)
1054 f.file->f_pos = pos;
1055 fdput(f);
1056 return ret;
1057 }
1058
1059 COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
1060 const struct compat_iovec __user *,vec,
1061 unsigned long, vlen, loff_t, pos)
1062 {
1063 struct fd f;
1064 ssize_t ret;
1065
1066 if (pos < 0)
1067 return -EINVAL;
1068 f = fdget(fd);
1069 if (!f.file)
1070 return -EBADF;
1071 ret = -ESPIPE;
1072 if (f.file->f_mode & FMODE_PWRITE)
1073 ret = compat_writev(f.file, vec, vlen, &pos);
1074 fdput(f);
1075 return ret;
1076 }
1077
1078 COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
1079 const struct compat_iovec __user *,vec,
1080 unsigned long, vlen, u32, pos_low, u32, pos_high)
1081 {
1082 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1083 return compat_sys_pwritev64(fd, vec, vlen, pos);
1084 }
1085 #endif
1086
1087 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1088 size_t count, loff_t max)
1089 {
1090 struct fd in, out;
1091 struct inode *in_inode, *out_inode;
1092 loff_t pos;
1093 loff_t out_pos;
1094 ssize_t retval;
1095 int fl;
1096
1097 /*
1098 * Get input file, and verify that it is ok..
1099 */
1100 retval = -EBADF;
1101 in = fdget(in_fd);
1102 if (!in.file)
1103 goto out;
1104 if (!(in.file->f_mode & FMODE_READ))
1105 goto fput_in;
1106 retval = -ESPIPE;
1107 if (!ppos) {
1108 pos = in.file->f_pos;
1109 } else {
1110 pos = *ppos;
1111 if (!(in.file->f_mode & FMODE_PREAD))
1112 goto fput_in;
1113 }
1114 retval = rw_verify_area(READ, in.file, &pos, count);
1115 if (retval < 0)
1116 goto fput_in;
1117 count = retval;
1118
1119 /*
1120 * Get output file, and verify that it is ok..
1121 */
1122 retval = -EBADF;
1123 out = fdget(out_fd);
1124 if (!out.file)
1125 goto fput_in;
1126 if (!(out.file->f_mode & FMODE_WRITE))
1127 goto fput_out;
1128 retval = -EINVAL;
1129 in_inode = file_inode(in.file);
1130 out_inode = file_inode(out.file);
1131 out_pos = out.file->f_pos;
1132 retval = rw_verify_area(WRITE, out.file, &out_pos, count);
1133 if (retval < 0)
1134 goto fput_out;
1135 count = retval;
1136
1137 if (!max)
1138 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1139
1140 if (unlikely(pos + count > max)) {
1141 retval = -EOVERFLOW;
1142 if (pos >= max)
1143 goto fput_out;
1144 count = max - pos;
1145 }
1146
1147 fl = 0;
1148 #if 0
1149 /*
1150 * We need to debate whether we can enable this or not. The
1151 * man page documents EAGAIN return for the output at least,
1152 * and the application is arguably buggy if it doesn't expect
1153 * EAGAIN on a non-blocking file descriptor.
1154 */
1155 if (in.file->f_flags & O_NONBLOCK)
1156 fl = SPLICE_F_NONBLOCK;
1157 #endif
1158 file_start_write(out.file);
1159 retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
1160 file_end_write(out.file);
1161
1162 if (retval > 0) {
1163 add_rchar(current, retval);
1164 add_wchar(current, retval);
1165 fsnotify_access(in.file);
1166 fsnotify_modify(out.file);
1167 out.file->f_pos = out_pos;
1168 if (ppos)
1169 *ppos = pos;
1170 else
1171 in.file->f_pos = pos;
1172 }
1173
1174 inc_syscr(current);
1175 inc_syscw(current);
1176 if (pos > max)
1177 retval = -EOVERFLOW;
1178
1179 fput_out:
1180 fdput(out);
1181 fput_in:
1182 fdput(in);
1183 out:
1184 return retval;
1185 }
1186
1187 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
1188 {
1189 loff_t pos;
1190 off_t off;
1191 ssize_t ret;
1192
1193 if (offset) {
1194 if (unlikely(get_user(off, offset)))
1195 return -EFAULT;
1196 pos = off;
1197 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1198 if (unlikely(put_user(pos, offset)))
1199 return -EFAULT;
1200 return ret;
1201 }
1202
1203 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1204 }
1205
1206 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
1207 {
1208 loff_t pos;
1209 ssize_t ret;
1210
1211 if (offset) {
1212 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1213 return -EFAULT;
1214 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1215 if (unlikely(put_user(pos, offset)))
1216 return -EFAULT;
1217 return ret;
1218 }
1219
1220 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1221 }
1222
1223 #ifdef CONFIG_COMPAT
1224 COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
1225 compat_off_t __user *, offset, compat_size_t, count)
1226 {
1227 loff_t pos;
1228 off_t off;
1229 ssize_t ret;
1230
1231 if (offset) {
1232 if (unlikely(get_user(off, offset)))
1233 return -EFAULT;
1234 pos = off;
1235 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1236 if (unlikely(put_user(pos, offset)))
1237 return -EFAULT;
1238 return ret;
1239 }
1240
1241 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1242 }
1243
1244 COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
1245 compat_loff_t __user *, offset, compat_size_t, count)
1246 {
1247 loff_t pos;
1248 ssize_t ret;
1249
1250 if (offset) {
1251 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1252 return -EFAULT;
1253 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1254 if (unlikely(put_user(pos, offset)))
1255 return -EFAULT;
1256 return ret;
1257 }
1258
1259 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1260 }
1261 #endif