]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/splice.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[mirror_ubuntu-bionic-kernel.git] / fs / splice.c
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
14 *
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18 *
19 */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30
31 /*
32 * Passed to the actors
33 */
34 struct splice_desc {
35 unsigned int len, total_len; /* current and remaining length */
36 unsigned int flags; /* splice flags */
37 struct file *file; /* file to read/write */
38 loff_t pos; /* file position */
39 };
40
41 /*
42 * Attempt to steal a page from a pipe buffer. This should perhaps go into
43 * a vm helper function, it's already simplified quite a bit by the
44 * addition of remove_mapping(). If success is returned, the caller may
45 * attempt to reuse this page for another destination.
46 */
47 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
48 struct pipe_buffer *buf)
49 {
50 struct page *page = buf->page;
51 struct address_space *mapping = page_mapping(page);
52
53 WARN_ON(!PageLocked(page));
54 WARN_ON(!PageUptodate(page));
55
56 /*
57 * At least for ext2 with nobh option, we need to wait on writeback
58 * completing on this page, since we'll remove it from the pagecache.
59 * Otherwise truncate wont wait on the page, allowing the disk
60 * blocks to be reused by someone else before we actually wrote our
61 * data to them. fs corruption ensues.
62 */
63 wait_on_page_writeback(page);
64
65 if (PagePrivate(page))
66 try_to_release_page(page, mapping_gfp_mask(mapping));
67
68 if (!remove_mapping(mapping, page))
69 return 1;
70
71 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
72 return 0;
73 }
74
75 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
76 struct pipe_buffer *buf)
77 {
78 page_cache_release(buf->page);
79 buf->page = NULL;
80 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
81 }
82
83 static void *page_cache_pipe_buf_map(struct file *file,
84 struct pipe_inode_info *info,
85 struct pipe_buffer *buf)
86 {
87 struct page *page = buf->page;
88 int err;
89
90 if (!PageUptodate(page)) {
91 lock_page(page);
92
93 /*
94 * Page got truncated/unhashed. This will cause a 0-byte
95 * splice, if this is the first page.
96 */
97 if (!page->mapping) {
98 err = -ENODATA;
99 goto error;
100 }
101
102 /*
103 * Uh oh, read-error from disk.
104 */
105 if (!PageUptodate(page)) {
106 err = -EIO;
107 goto error;
108 }
109
110 /*
111 * Page is ok afterall, fall through to mapping.
112 */
113 unlock_page(page);
114 }
115
116 return kmap(page);
117 error:
118 unlock_page(page);
119 return ERR_PTR(err);
120 }
121
122 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
123 struct pipe_buffer *buf)
124 {
125 kunmap(buf->page);
126 }
127
128 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
129 .can_merge = 0,
130 .map = page_cache_pipe_buf_map,
131 .unmap = page_cache_pipe_buf_unmap,
132 .release = page_cache_pipe_buf_release,
133 .steal = page_cache_pipe_buf_steal,
134 };
135
136 /*
137 * Pipe output worker. This sets up our pipe format with the page cache
138 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
139 */
140 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
141 int nr_pages, unsigned long offset,
142 unsigned long len, unsigned int flags)
143 {
144 int ret, do_wakeup, i;
145
146 ret = 0;
147 do_wakeup = 0;
148 i = 0;
149
150 if (pipe->inode)
151 mutex_lock(&pipe->inode->i_mutex);
152
153 for (;;) {
154 if (!pipe->readers) {
155 send_sig(SIGPIPE, current, 0);
156 if (!ret)
157 ret = -EPIPE;
158 break;
159 }
160
161 if (pipe->nrbufs < PIPE_BUFFERS) {
162 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
163 struct pipe_buffer *buf = pipe->bufs + newbuf;
164 struct page *page = pages[i++];
165 unsigned long this_len;
166
167 this_len = PAGE_CACHE_SIZE - offset;
168 if (this_len > len)
169 this_len = len;
170
171 buf->page = page;
172 buf->offset = offset;
173 buf->len = this_len;
174 buf->ops = &page_cache_pipe_buf_ops;
175 pipe->nrbufs++;
176 if (pipe->inode)
177 do_wakeup = 1;
178
179 ret += this_len;
180 len -= this_len;
181 offset = 0;
182 if (!--nr_pages)
183 break;
184 if (!len)
185 break;
186 if (pipe->nrbufs < PIPE_BUFFERS)
187 continue;
188
189 break;
190 }
191
192 if (flags & SPLICE_F_NONBLOCK) {
193 if (!ret)
194 ret = -EAGAIN;
195 break;
196 }
197
198 if (signal_pending(current)) {
199 if (!ret)
200 ret = -ERESTARTSYS;
201 break;
202 }
203
204 if (do_wakeup) {
205 smp_mb();
206 if (waitqueue_active(&pipe->wait))
207 wake_up_interruptible_sync(&pipe->wait);
208 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
209 do_wakeup = 0;
210 }
211
212 pipe->waiting_writers++;
213 pipe_wait(pipe);
214 pipe->waiting_writers--;
215 }
216
217 if (pipe->inode)
218 mutex_unlock(&pipe->inode->i_mutex);
219
220 if (do_wakeup) {
221 smp_mb();
222 if (waitqueue_active(&pipe->wait))
223 wake_up_interruptible(&pipe->wait);
224 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
225 }
226
227 while (i < nr_pages)
228 page_cache_release(pages[i++]);
229
230 return ret;
231 }
232
233 static int
234 __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
235 size_t len, unsigned int flags)
236 {
237 struct address_space *mapping = in->f_mapping;
238 unsigned int offset, nr_pages;
239 struct page *pages[PIPE_BUFFERS];
240 struct page *page;
241 pgoff_t index;
242 int i, error;
243
244 index = in->f_pos >> PAGE_CACHE_SHIFT;
245 offset = in->f_pos & ~PAGE_CACHE_MASK;
246 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
247
248 if (nr_pages > PIPE_BUFFERS)
249 nr_pages = PIPE_BUFFERS;
250
251 /*
252 * Initiate read-ahead on this page range. however, don't call into
253 * read-ahead if this is a non-zero offset (we are likely doing small
254 * chunk splice and the page is already there) for a single page.
255 */
256 if (!offset || nr_pages > 1)
257 do_page_cache_readahead(mapping, in, index, nr_pages);
258
259 /*
260 * Now fill in the holes:
261 */
262 error = 0;
263 for (i = 0; i < nr_pages; i++, index++) {
264 find_page:
265 /*
266 * lookup the page for this index
267 */
268 page = find_get_page(mapping, index);
269 if (!page) {
270 /*
271 * If in nonblock mode then dont block on
272 * readpage (we've kicked readahead so there
273 * will be asynchronous progress):
274 */
275 if (flags & SPLICE_F_NONBLOCK)
276 break;
277
278 /*
279 * page didn't exist, allocate one
280 */
281 page = page_cache_alloc_cold(mapping);
282 if (!page)
283 break;
284
285 error = add_to_page_cache_lru(page, mapping, index,
286 mapping_gfp_mask(mapping));
287 if (unlikely(error)) {
288 page_cache_release(page);
289 break;
290 }
291
292 goto readpage;
293 }
294
295 /*
296 * If the page isn't uptodate, we may need to start io on it
297 */
298 if (!PageUptodate(page)) {
299 lock_page(page);
300
301 /*
302 * page was truncated, stop here. if this isn't the
303 * first page, we'll just complete what we already
304 * added
305 */
306 if (!page->mapping) {
307 unlock_page(page);
308 page_cache_release(page);
309 break;
310 }
311 /*
312 * page was already under io and is now done, great
313 */
314 if (PageUptodate(page)) {
315 unlock_page(page);
316 goto fill_it;
317 }
318
319 readpage:
320 /*
321 * need to read in the page
322 */
323 error = mapping->a_ops->readpage(in, page);
324
325 if (unlikely(error)) {
326 page_cache_release(page);
327 if (error == AOP_TRUNCATED_PAGE)
328 goto find_page;
329 break;
330 }
331 }
332 fill_it:
333 pages[i] = page;
334 }
335
336 if (i)
337 return move_to_pipe(pipe, pages, i, offset, len, flags);
338
339 return error;
340 }
341
342 /**
343 * generic_file_splice_read - splice data from file to a pipe
344 * @in: file to splice from
345 * @pipe: pipe to splice to
346 * @len: number of bytes to splice
347 * @flags: splice modifier flags
348 *
349 * Will read pages from given file and fill them into a pipe.
350 */
351 ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
352 size_t len, unsigned int flags)
353 {
354 ssize_t spliced;
355 int ret;
356
357 ret = 0;
358 spliced = 0;
359
360 while (len) {
361 ret = __generic_file_splice_read(in, pipe, len, flags);
362
363 if (ret <= 0)
364 break;
365
366 in->f_pos += ret;
367 len -= ret;
368 spliced += ret;
369
370 if (!(flags & SPLICE_F_NONBLOCK))
371 continue;
372 ret = -EAGAIN;
373 break;
374 }
375
376 if (spliced)
377 return spliced;
378
379 return ret;
380 }
381
382 EXPORT_SYMBOL(generic_file_splice_read);
383
384 /*
385 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
386 * using sendpage().
387 */
388 static int pipe_to_sendpage(struct pipe_inode_info *info,
389 struct pipe_buffer *buf, struct splice_desc *sd)
390 {
391 struct file *file = sd->file;
392 loff_t pos = sd->pos;
393 unsigned int offset;
394 ssize_t ret;
395 void *ptr;
396 int more;
397
398 /*
399 * Sub-optimal, but we are limited by the pipe ->map. We don't
400 * need a kmap'ed buffer here, we just want to make sure we
401 * have the page pinned if the pipe page originates from the
402 * page cache.
403 */
404 ptr = buf->ops->map(file, info, buf);
405 if (IS_ERR(ptr))
406 return PTR_ERR(ptr);
407
408 offset = pos & ~PAGE_CACHE_MASK;
409 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
410
411 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
412
413 buf->ops->unmap(info, buf);
414 if (ret == sd->len)
415 return 0;
416
417 return -EIO;
418 }
419
420 /*
421 * This is a little more tricky than the file -> pipe splicing. There are
422 * basically three cases:
423 *
424 * - Destination page already exists in the address space and there
425 * are users of it. For that case we have no other option that
426 * copying the data. Tough luck.
427 * - Destination page already exists in the address space, but there
428 * are no users of it. Make sure it's uptodate, then drop it. Fall
429 * through to last case.
430 * - Destination page does not exist, we can add the pipe page to
431 * the page cache and avoid the copy.
432 *
433 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
434 * sd->flags), we attempt to migrate pages from the pipe to the output
435 * file address space page cache. This is possible if no one else has
436 * the pipe page referenced outside of the pipe and page cache. If
437 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
438 * a new page in the output file page cache and fill/dirty that.
439 */
440 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
441 struct splice_desc *sd)
442 {
443 struct file *file = sd->file;
444 struct address_space *mapping = file->f_mapping;
445 gfp_t gfp_mask = mapping_gfp_mask(mapping);
446 unsigned int offset;
447 struct page *page;
448 pgoff_t index;
449 char *src;
450 int ret;
451
452 /*
453 * make sure the data in this buffer is uptodate
454 */
455 src = buf->ops->map(file, info, buf);
456 if (IS_ERR(src))
457 return PTR_ERR(src);
458
459 index = sd->pos >> PAGE_CACHE_SHIFT;
460 offset = sd->pos & ~PAGE_CACHE_MASK;
461
462 /*
463 * Reuse buf page, if SPLICE_F_MOVE is set.
464 */
465 if (sd->flags & SPLICE_F_MOVE) {
466 /*
467 * If steal succeeds, buf->page is now pruned from the vm
468 * side (LRU and page cache) and we can reuse it.
469 */
470 if (buf->ops->steal(info, buf))
471 goto find_page;
472
473 /*
474 * this will also set the page locked
475 */
476 page = buf->page;
477 if (add_to_page_cache(page, mapping, index, gfp_mask))
478 goto find_page;
479
480 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
481 lru_cache_add(page);
482 } else {
483 find_page:
484 ret = -ENOMEM;
485 page = find_or_create_page(mapping, index, gfp_mask);
486 if (!page)
487 goto out_nomem;
488
489 /*
490 * If the page is uptodate, it is also locked. If it isn't
491 * uptodate, we can mark it uptodate if we are filling the
492 * full page. Otherwise we need to read it in first...
493 */
494 if (!PageUptodate(page)) {
495 if (sd->len < PAGE_CACHE_SIZE) {
496 ret = mapping->a_ops->readpage(file, page);
497 if (unlikely(ret))
498 goto out;
499
500 lock_page(page);
501
502 if (!PageUptodate(page)) {
503 /*
504 * Page got invalidated, repeat.
505 */
506 if (!page->mapping) {
507 unlock_page(page);
508 page_cache_release(page);
509 goto find_page;
510 }
511 ret = -EIO;
512 goto out;
513 }
514 } else {
515 WARN_ON(!PageLocked(page));
516 SetPageUptodate(page);
517 }
518 }
519 }
520
521 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
522 if (ret == AOP_TRUNCATED_PAGE) {
523 page_cache_release(page);
524 goto find_page;
525 } else if (ret)
526 goto out;
527
528 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
529 char *dst = kmap_atomic(page, KM_USER0);
530
531 memcpy(dst + offset, src + buf->offset, sd->len);
532 flush_dcache_page(page);
533 kunmap_atomic(dst, KM_USER0);
534 }
535
536 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
537 if (ret == AOP_TRUNCATED_PAGE) {
538 page_cache_release(page);
539 goto find_page;
540 } else if (ret)
541 goto out;
542
543 mark_page_accessed(page);
544 balance_dirty_pages_ratelimited(mapping);
545 out:
546 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
547 page_cache_release(page);
548 unlock_page(page);
549 }
550 out_nomem:
551 buf->ops->unmap(info, buf);
552 return ret;
553 }
554
555 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
556 struct splice_desc *);
557
558 /*
559 * Pipe input worker. Most of this logic works like a regular pipe, the
560 * key here is the 'actor' worker passed in that actually moves the data
561 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
562 */
563 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
564 size_t len, unsigned int flags,
565 splice_actor *actor)
566 {
567 int ret, do_wakeup, err;
568 struct splice_desc sd;
569
570 ret = 0;
571 do_wakeup = 0;
572
573 sd.total_len = len;
574 sd.flags = flags;
575 sd.file = out;
576 sd.pos = out->f_pos;
577
578 if (pipe->inode)
579 mutex_lock(&pipe->inode->i_mutex);
580
581 for (;;) {
582 if (pipe->nrbufs) {
583 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
584 struct pipe_buf_operations *ops = buf->ops;
585
586 sd.len = buf->len;
587 if (sd.len > sd.total_len)
588 sd.len = sd.total_len;
589
590 err = actor(pipe, buf, &sd);
591 if (err) {
592 if (!ret && err != -ENODATA)
593 ret = err;
594
595 break;
596 }
597
598 ret += sd.len;
599 buf->offset += sd.len;
600 buf->len -= sd.len;
601
602 if (!buf->len) {
603 buf->ops = NULL;
604 ops->release(pipe, buf);
605 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
606 pipe->nrbufs--;
607 if (pipe->inode)
608 do_wakeup = 1;
609 }
610
611 sd.pos += sd.len;
612 sd.total_len -= sd.len;
613 if (!sd.total_len)
614 break;
615 }
616
617 if (pipe->nrbufs)
618 continue;
619 if (!pipe->writers)
620 break;
621 if (!pipe->waiting_writers) {
622 if (ret)
623 break;
624 }
625
626 if (flags & SPLICE_F_NONBLOCK) {
627 if (!ret)
628 ret = -EAGAIN;
629 break;
630 }
631
632 if (signal_pending(current)) {
633 if (!ret)
634 ret = -ERESTARTSYS;
635 break;
636 }
637
638 if (do_wakeup) {
639 smp_mb();
640 if (waitqueue_active(&pipe->wait))
641 wake_up_interruptible_sync(&pipe->wait);
642 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
643 do_wakeup = 0;
644 }
645
646 pipe_wait(pipe);
647 }
648
649 if (pipe->inode)
650 mutex_unlock(&pipe->inode->i_mutex);
651
652 if (do_wakeup) {
653 smp_mb();
654 if (waitqueue_active(&pipe->wait))
655 wake_up_interruptible(&pipe->wait);
656 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
657 }
658
659 out->f_pos = sd.pos;
660 return ret;
661
662 }
663
664 /**
665 * generic_file_splice_write - splice data from a pipe to a file
666 * @pipe: pipe info
667 * @out: file to write to
668 * @len: number of bytes to splice
669 * @flags: splice modifier flags
670 *
671 * Will either move or copy pages (determined by @flags options) from
672 * the given pipe inode to the given file.
673 *
674 */
675 ssize_t
676 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
677 size_t len, unsigned int flags)
678 {
679 struct address_space *mapping = out->f_mapping;
680 ssize_t ret;
681
682 ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
683
684 /*
685 * If file or inode is SYNC and we actually wrote some data, sync it.
686 */
687 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
688 && ret > 0) {
689 struct inode *inode = mapping->host;
690 int err;
691
692 mutex_lock(&inode->i_mutex);
693 err = generic_osync_inode(mapping->host, mapping,
694 OSYNC_METADATA|OSYNC_DATA);
695 mutex_unlock(&inode->i_mutex);
696
697 if (err)
698 ret = err;
699 }
700
701 return ret;
702 }
703
704 EXPORT_SYMBOL(generic_file_splice_write);
705
706 /**
707 * generic_splice_sendpage - splice data from a pipe to a socket
708 * @inode: pipe inode
709 * @out: socket to write to
710 * @len: number of bytes to splice
711 * @flags: splice modifier flags
712 *
713 * Will send @len bytes from the pipe to a network socket. No data copying
714 * is involved.
715 *
716 */
717 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
718 size_t len, unsigned int flags)
719 {
720 return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
721 }
722
723 EXPORT_SYMBOL(generic_splice_sendpage);
724
725 /*
726 * Attempt to initiate a splice from pipe to file.
727 */
728 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
729 size_t len, unsigned int flags)
730 {
731 loff_t pos;
732 int ret;
733
734 if (unlikely(!out->f_op || !out->f_op->splice_write))
735 return -EINVAL;
736
737 if (unlikely(!(out->f_mode & FMODE_WRITE)))
738 return -EBADF;
739
740 pos = out->f_pos;
741
742 ret = rw_verify_area(WRITE, out, &pos, len);
743 if (unlikely(ret < 0))
744 return ret;
745
746 return out->f_op->splice_write(pipe, out, len, flags);
747 }
748
749 /*
750 * Attempt to initiate a splice from a file to a pipe.
751 */
752 static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
753 size_t len, unsigned int flags)
754 {
755 loff_t pos, isize, left;
756 int ret;
757
758 if (unlikely(!in->f_op || !in->f_op->splice_read))
759 return -EINVAL;
760
761 if (unlikely(!(in->f_mode & FMODE_READ)))
762 return -EBADF;
763
764 pos = in->f_pos;
765
766 ret = rw_verify_area(READ, in, &pos, len);
767 if (unlikely(ret < 0))
768 return ret;
769
770 isize = i_size_read(in->f_mapping->host);
771 if (unlikely(in->f_pos >= isize))
772 return 0;
773
774 left = isize - in->f_pos;
775 if (unlikely(left < len))
776 len = left;
777
778 return in->f_op->splice_read(in, pipe, len, flags);
779 }
780
781 long do_splice_direct(struct file *in, struct file *out, size_t len,
782 unsigned int flags)
783 {
784 struct pipe_inode_info *pipe;
785 long ret, bytes;
786 umode_t i_mode;
787 int i;
788
789 /*
790 * We require the input being a regular file, as we don't want to
791 * randomly drop data for eg socket -> socket splicing. Use the
792 * piped splicing for that!
793 */
794 i_mode = in->f_dentry->d_inode->i_mode;
795 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
796 return -EINVAL;
797
798 /*
799 * neither in nor out is a pipe, setup an internal pipe attached to
800 * 'out' and transfer the wanted data from 'in' to 'out' through that
801 */
802 pipe = current->splice_pipe;
803 if (unlikely(!pipe)) {
804 pipe = alloc_pipe_info(NULL);
805 if (!pipe)
806 return -ENOMEM;
807
808 /*
809 * We don't have an immediate reader, but we'll read the stuff
810 * out of the pipe right after the move_to_pipe(). So set
811 * PIPE_READERS appropriately.
812 */
813 pipe->readers = 1;
814
815 current->splice_pipe = pipe;
816 }
817
818 /*
819 * Do the splice.
820 */
821 ret = 0;
822 bytes = 0;
823
824 while (len) {
825 size_t read_len, max_read_len;
826
827 /*
828 * Do at most PIPE_BUFFERS pages worth of transfer:
829 */
830 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
831
832 ret = do_splice_to(in, pipe, max_read_len, flags);
833 if (unlikely(ret < 0))
834 goto out_release;
835
836 read_len = ret;
837
838 /*
839 * NOTE: nonblocking mode only applies to the input. We
840 * must not do the output in nonblocking mode as then we
841 * could get stuck data in the internal pipe:
842 */
843 ret = do_splice_from(pipe, out, read_len,
844 flags & ~SPLICE_F_NONBLOCK);
845 if (unlikely(ret < 0))
846 goto out_release;
847
848 bytes += ret;
849 len -= ret;
850
851 /*
852 * In nonblocking mode, if we got back a short read then
853 * that was due to either an IO error or due to the
854 * pagecache entry not being there. In the IO error case
855 * the _next_ splice attempt will produce a clean IO error
856 * return value (not a short read), so in both cases it's
857 * correct to break out of the loop here:
858 */
859 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
860 break;
861 }
862
863 pipe->nrbufs = pipe->curbuf = 0;
864
865 return bytes;
866
867 out_release:
868 /*
869 * If we did an incomplete transfer we must release
870 * the pipe buffers in question:
871 */
872 for (i = 0; i < PIPE_BUFFERS; i++) {
873 struct pipe_buffer *buf = pipe->bufs + i;
874
875 if (buf->ops) {
876 buf->ops->release(pipe, buf);
877 buf->ops = NULL;
878 }
879 }
880 pipe->nrbufs = pipe->curbuf = 0;
881
882 /*
883 * If we transferred some data, return the number of bytes:
884 */
885 if (bytes > 0)
886 return bytes;
887
888 return ret;
889 }
890
891 EXPORT_SYMBOL(do_splice_direct);
892
893 /*
894 * Determine where to splice to/from.
895 */
896 static long do_splice(struct file *in, loff_t __user *off_in,
897 struct file *out, loff_t __user *off_out,
898 size_t len, unsigned int flags)
899 {
900 struct pipe_inode_info *pipe;
901
902 pipe = in->f_dentry->d_inode->i_pipe;
903 if (pipe) {
904 if (off_in)
905 return -ESPIPE;
906 if (off_out) {
907 if (out->f_op->llseek == no_llseek)
908 return -EINVAL;
909 if (copy_from_user(&out->f_pos, off_out,
910 sizeof(loff_t)))
911 return -EFAULT;
912 }
913
914 return do_splice_from(pipe, out, len, flags);
915 }
916
917 pipe = out->f_dentry->d_inode->i_pipe;
918 if (pipe) {
919 if (off_out)
920 return -ESPIPE;
921 if (off_in) {
922 if (in->f_op->llseek == no_llseek)
923 return -EINVAL;
924 if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
925 return -EFAULT;
926 }
927
928 return do_splice_to(in, pipe, len, flags);
929 }
930
931 return -EINVAL;
932 }
933
934 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
935 int fd_out, loff_t __user *off_out,
936 size_t len, unsigned int flags)
937 {
938 long error;
939 struct file *in, *out;
940 int fput_in, fput_out;
941
942 if (unlikely(!len))
943 return 0;
944
945 error = -EBADF;
946 in = fget_light(fd_in, &fput_in);
947 if (in) {
948 if (in->f_mode & FMODE_READ) {
949 out = fget_light(fd_out, &fput_out);
950 if (out) {
951 if (out->f_mode & FMODE_WRITE)
952 error = do_splice(in, off_in,
953 out, off_out,
954 len, flags);
955 fput_light(out, fput_out);
956 }
957 }
958
959 fput_light(in, fput_in);
960 }
961
962 return error;
963 }