]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * "splice": joining two ropes together by interweaving their strands. | |
4 | * | |
5 | * This is the "extended pipe" functionality, where a pipe is used as | |
6 | * an arbitrary in-memory buffer. Think of a pipe as a small kernel | |
7 | * buffer that you can use to transfer data from one end to the other. | |
8 | * | |
9 | * The traditional unix read/write is extended with a "splice()" operation | |
10 | * that transfers data buffers to or from a pipe buffer. | |
11 | * | |
12 | * Named by Larry McVoy, original implementation from Linus, extended by | |
13 | * Jens to support splicing to files, network, direct splicing, etc and | |
14 | * fixing lots of bugs. | |
15 | * | |
16 | * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> | |
17 | * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> | |
18 | * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> | |
19 | * | |
20 | */ | |
21 | #include <linux/bvec.h> | |
22 | #include <linux/fs.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/splice.h> | |
26 | #include <linux/memcontrol.h> | |
27 | #include <linux/mm_inline.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/writeback.h> | |
30 | #include <linux/export.h> | |
31 | #include <linux/syscalls.h> | |
32 | #include <linux/uio.h> | |
33 | #include <linux/security.h> | |
34 | #include <linux/gfp.h> | |
35 | #include <linux/socket.h> | |
36 | #include <linux/sched/signal.h> | |
37 | ||
38 | #include "internal.h" | |
39 | ||
40 | /* | |
41 | * Attempt to steal a page from a pipe buffer. This should perhaps go into | |
42 | * a vm helper function, it's already simplified quite a bit by the | |
43 | * addition of remove_mapping(). If success is returned, the caller may | |
44 | * attempt to reuse this page for another destination. | |
45 | */ | |
46 | static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe, | |
47 | struct pipe_buffer *buf) | |
48 | { | |
49 | struct page *page = buf->page; | |
50 | struct address_space *mapping; | |
51 | ||
52 | lock_page(page); | |
53 | ||
54 | mapping = page_mapping(page); | |
55 | if (mapping) { | |
56 | WARN_ON(!PageUptodate(page)); | |
57 | ||
58 | /* | |
59 | * At least for ext2 with nobh option, we need to wait on | |
60 | * writeback completing on this page, since we'll remove it | |
61 | * from the pagecache. Otherwise truncate wont wait on the | |
62 | * page, allowing the disk blocks to be reused by someone else | |
63 | * before we actually wrote our data to them. fs corruption | |
64 | * ensues. | |
65 | */ | |
66 | wait_on_page_writeback(page); | |
67 | ||
68 | if (page_has_private(page) && | |
69 | !try_to_release_page(page, GFP_KERNEL)) | |
70 | goto out_unlock; | |
71 | ||
72 | /* | |
73 | * If we succeeded in removing the mapping, set LRU flag | |
74 | * and return good. | |
75 | */ | |
76 | if (remove_mapping(mapping, page)) { | |
77 | buf->flags |= PIPE_BUF_FLAG_LRU; | |
78 | return true; | |
79 | } | |
80 | } | |
81 | ||
82 | /* | |
83 | * Raced with truncate or failed to remove page from current | |
84 | * address space, unlock and return failure. | |
85 | */ | |
86 | out_unlock: | |
87 | unlock_page(page); | |
88 | return false; | |
89 | } | |
90 | ||
91 | static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, | |
92 | struct pipe_buffer *buf) | |
93 | { | |
94 | put_page(buf->page); | |
95 | buf->flags &= ~PIPE_BUF_FLAG_LRU; | |
96 | } | |
97 | ||
98 | /* | |
99 | * Check whether the contents of buf is OK to access. Since the content | |
100 | * is a page cache page, IO may be in flight. | |
101 | */ | |
102 | static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, | |
103 | struct pipe_buffer *buf) | |
104 | { | |
105 | struct page *page = buf->page; | |
106 | int err; | |
107 | ||
108 | if (!PageUptodate(page)) { | |
109 | lock_page(page); | |
110 | ||
111 | /* | |
112 | * Page got truncated/unhashed. This will cause a 0-byte | |
113 | * splice, if this is the first page. | |
114 | */ | |
115 | if (!page->mapping) { | |
116 | err = -ENODATA; | |
117 | goto error; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Uh oh, read-error from disk. | |
122 | */ | |
123 | if (!PageUptodate(page)) { | |
124 | err = -EIO; | |
125 | goto error; | |
126 | } | |
127 | ||
128 | /* | |
129 | * Page is ok afterall, we are done. | |
130 | */ | |
131 | unlock_page(page); | |
132 | } | |
133 | ||
134 | return 0; | |
135 | error: | |
136 | unlock_page(page); | |
137 | return err; | |
138 | } | |
139 | ||
140 | const struct pipe_buf_operations page_cache_pipe_buf_ops = { | |
141 | .confirm = page_cache_pipe_buf_confirm, | |
142 | .release = page_cache_pipe_buf_release, | |
143 | .try_steal = page_cache_pipe_buf_try_steal, | |
144 | .get = generic_pipe_buf_get, | |
145 | }; | |
146 | ||
147 | static bool user_page_pipe_buf_try_steal(struct pipe_inode_info *pipe, | |
148 | struct pipe_buffer *buf) | |
149 | { | |
150 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) | |
151 | return false; | |
152 | ||
153 | buf->flags |= PIPE_BUF_FLAG_LRU; | |
154 | return generic_pipe_buf_try_steal(pipe, buf); | |
155 | } | |
156 | ||
157 | static const struct pipe_buf_operations user_page_pipe_buf_ops = { | |
158 | .release = page_cache_pipe_buf_release, | |
159 | .try_steal = user_page_pipe_buf_try_steal, | |
160 | .get = generic_pipe_buf_get, | |
161 | }; | |
162 | ||
163 | static void wakeup_pipe_readers(struct pipe_inode_info *pipe) | |
164 | { | |
165 | smp_mb(); | |
166 | if (waitqueue_active(&pipe->rd_wait)) | |
167 | wake_up_interruptible(&pipe->rd_wait); | |
168 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
169 | } | |
170 | ||
171 | /** | |
172 | * splice_to_pipe - fill passed data into a pipe | |
173 | * @pipe: pipe to fill | |
174 | * @spd: data to fill | |
175 | * | |
176 | * Description: | |
177 | * @spd contains a map of pages and len/offset tuples, along with | |
178 | * the struct pipe_buf_operations associated with these pages. This | |
179 | * function will link that data to the pipe. | |
180 | * | |
181 | */ | |
182 | ssize_t splice_to_pipe(struct pipe_inode_info *pipe, | |
183 | struct splice_pipe_desc *spd) | |
184 | { | |
185 | unsigned int spd_pages = spd->nr_pages; | |
186 | unsigned int tail = pipe->tail; | |
187 | unsigned int head = pipe->head; | |
188 | unsigned int mask = pipe->ring_size - 1; | |
189 | int ret = 0, page_nr = 0; | |
190 | ||
191 | if (!spd_pages) | |
192 | return 0; | |
193 | ||
194 | if (unlikely(!pipe->readers)) { | |
195 | send_sig(SIGPIPE, current, 0); | |
196 | ret = -EPIPE; | |
197 | goto out; | |
198 | } | |
199 | ||
200 | while (!pipe_full(head, tail, pipe->max_usage)) { | |
201 | struct pipe_buffer *buf = &pipe->bufs[head & mask]; | |
202 | ||
203 | buf->page = spd->pages[page_nr]; | |
204 | buf->offset = spd->partial[page_nr].offset; | |
205 | buf->len = spd->partial[page_nr].len; | |
206 | buf->private = spd->partial[page_nr].private; | |
207 | buf->ops = spd->ops; | |
208 | buf->flags = 0; | |
209 | ||
210 | head++; | |
211 | pipe->head = head; | |
212 | page_nr++; | |
213 | ret += buf->len; | |
214 | ||
215 | if (!--spd->nr_pages) | |
216 | break; | |
217 | } | |
218 | ||
219 | if (!ret) | |
220 | ret = -EAGAIN; | |
221 | ||
222 | out: | |
223 | while (page_nr < spd_pages) | |
224 | spd->spd_release(spd, page_nr++); | |
225 | ||
226 | return ret; | |
227 | } | |
228 | EXPORT_SYMBOL_GPL(splice_to_pipe); | |
229 | ||
230 | ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf) | |
231 | { | |
232 | unsigned int head = pipe->head; | |
233 | unsigned int tail = pipe->tail; | |
234 | unsigned int mask = pipe->ring_size - 1; | |
235 | int ret; | |
236 | ||
237 | if (unlikely(!pipe->readers)) { | |
238 | send_sig(SIGPIPE, current, 0); | |
239 | ret = -EPIPE; | |
240 | } else if (pipe_full(head, tail, pipe->max_usage)) { | |
241 | ret = -EAGAIN; | |
242 | } else { | |
243 | pipe->bufs[head & mask] = *buf; | |
244 | pipe->head = head + 1; | |
245 | return buf->len; | |
246 | } | |
247 | pipe_buf_release(pipe, buf); | |
248 | return ret; | |
249 | } | |
250 | EXPORT_SYMBOL(add_to_pipe); | |
251 | ||
252 | /* | |
253 | * Check if we need to grow the arrays holding pages and partial page | |
254 | * descriptions. | |
255 | */ | |
256 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) | |
257 | { | |
258 | unsigned int max_usage = READ_ONCE(pipe->max_usage); | |
259 | ||
260 | spd->nr_pages_max = max_usage; | |
261 | if (max_usage <= PIPE_DEF_BUFFERS) | |
262 | return 0; | |
263 | ||
264 | spd->pages = kmalloc_array(max_usage, sizeof(struct page *), GFP_KERNEL); | |
265 | spd->partial = kmalloc_array(max_usage, sizeof(struct partial_page), | |
266 | GFP_KERNEL); | |
267 | ||
268 | if (spd->pages && spd->partial) | |
269 | return 0; | |
270 | ||
271 | kfree(spd->pages); | |
272 | kfree(spd->partial); | |
273 | return -ENOMEM; | |
274 | } | |
275 | ||
276 | void splice_shrink_spd(struct splice_pipe_desc *spd) | |
277 | { | |
278 | if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) | |
279 | return; | |
280 | ||
281 | kfree(spd->pages); | |
282 | kfree(spd->partial); | |
283 | } | |
284 | ||
285 | /** | |
286 | * generic_file_splice_read - splice data from file to a pipe | |
287 | * @in: file to splice from | |
288 | * @ppos: position in @in | |
289 | * @pipe: pipe to splice to | |
290 | * @len: number of bytes to splice | |
291 | * @flags: splice modifier flags | |
292 | * | |
293 | * Description: | |
294 | * Will read pages from given file and fill them into a pipe. Can be | |
295 | * used as long as it has more or less sane ->read_iter(). | |
296 | * | |
297 | */ | |
298 | ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, | |
299 | struct pipe_inode_info *pipe, size_t len, | |
300 | unsigned int flags) | |
301 | { | |
302 | struct iov_iter to; | |
303 | struct kiocb kiocb; | |
304 | unsigned int i_head; | |
305 | int ret; | |
306 | ||
307 | iov_iter_pipe(&to, READ, pipe, len); | |
308 | i_head = to.head; | |
309 | init_sync_kiocb(&kiocb, in); | |
310 | kiocb.ki_pos = *ppos; | |
311 | ret = call_read_iter(in, &kiocb, &to); | |
312 | if (ret > 0) { | |
313 | *ppos = kiocb.ki_pos; | |
314 | file_accessed(in); | |
315 | } else if (ret < 0) { | |
316 | to.head = i_head; | |
317 | to.iov_offset = 0; | |
318 | iov_iter_advance(&to, 0); /* to free what was emitted */ | |
319 | /* | |
320 | * callers of ->splice_read() expect -EAGAIN on | |
321 | * "can't put anything in there", rather than -EFAULT. | |
322 | */ | |
323 | if (ret == -EFAULT) | |
324 | ret = -EAGAIN; | |
325 | } | |
326 | ||
327 | return ret; | |
328 | } | |
329 | EXPORT_SYMBOL(generic_file_splice_read); | |
330 | ||
331 | const struct pipe_buf_operations default_pipe_buf_ops = { | |
332 | .release = generic_pipe_buf_release, | |
333 | .try_steal = generic_pipe_buf_try_steal, | |
334 | .get = generic_pipe_buf_get, | |
335 | }; | |
336 | ||
337 | /* Pipe buffer operations for a socket and similar. */ | |
338 | const struct pipe_buf_operations nosteal_pipe_buf_ops = { | |
339 | .release = generic_pipe_buf_release, | |
340 | .get = generic_pipe_buf_get, | |
341 | }; | |
342 | EXPORT_SYMBOL(nosteal_pipe_buf_ops); | |
343 | ||
344 | /* | |
345 | * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' | |
346 | * using sendpage(). Return the number of bytes sent. | |
347 | */ | |
348 | static int pipe_to_sendpage(struct pipe_inode_info *pipe, | |
349 | struct pipe_buffer *buf, struct splice_desc *sd) | |
350 | { | |
351 | struct file *file = sd->u.file; | |
352 | loff_t pos = sd->pos; | |
353 | int more; | |
354 | ||
355 | if (!likely(file->f_op->sendpage)) | |
356 | return -EINVAL; | |
357 | ||
358 | more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; | |
359 | ||
360 | if (sd->len < sd->total_len && | |
361 | pipe_occupancy(pipe->head, pipe->tail) > 1) | |
362 | more |= MSG_SENDPAGE_NOTLAST; | |
363 | ||
364 | return file->f_op->sendpage(file, buf->page, buf->offset, | |
365 | sd->len, &pos, more); | |
366 | } | |
367 | ||
368 | static void wakeup_pipe_writers(struct pipe_inode_info *pipe) | |
369 | { | |
370 | smp_mb(); | |
371 | if (waitqueue_active(&pipe->wr_wait)) | |
372 | wake_up_interruptible(&pipe->wr_wait); | |
373 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
374 | } | |
375 | ||
376 | /** | |
377 | * splice_from_pipe_feed - feed available data from a pipe to a file | |
378 | * @pipe: pipe to splice from | |
379 | * @sd: information to @actor | |
380 | * @actor: handler that splices the data | |
381 | * | |
382 | * Description: | |
383 | * This function loops over the pipe and calls @actor to do the | |
384 | * actual moving of a single struct pipe_buffer to the desired | |
385 | * destination. It returns when there's no more buffers left in | |
386 | * the pipe or if the requested number of bytes (@sd->total_len) | |
387 | * have been copied. It returns a positive number (one) if the | |
388 | * pipe needs to be filled with more data, zero if the required | |
389 | * number of bytes have been copied and -errno on error. | |
390 | * | |
391 | * This, together with splice_from_pipe_{begin,end,next}, may be | |
392 | * used to implement the functionality of __splice_from_pipe() when | |
393 | * locking is required around copying the pipe buffers to the | |
394 | * destination. | |
395 | */ | |
396 | static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, | |
397 | splice_actor *actor) | |
398 | { | |
399 | unsigned int head = pipe->head; | |
400 | unsigned int tail = pipe->tail; | |
401 | unsigned int mask = pipe->ring_size - 1; | |
402 | int ret; | |
403 | ||
404 | while (!pipe_empty(head, tail)) { | |
405 | struct pipe_buffer *buf = &pipe->bufs[tail & mask]; | |
406 | ||
407 | sd->len = buf->len; | |
408 | if (sd->len > sd->total_len) | |
409 | sd->len = sd->total_len; | |
410 | ||
411 | ret = pipe_buf_confirm(pipe, buf); | |
412 | if (unlikely(ret)) { | |
413 | if (ret == -ENODATA) | |
414 | ret = 0; | |
415 | return ret; | |
416 | } | |
417 | ||
418 | ret = actor(pipe, buf, sd); | |
419 | if (ret <= 0) | |
420 | return ret; | |
421 | ||
422 | buf->offset += ret; | |
423 | buf->len -= ret; | |
424 | ||
425 | sd->num_spliced += ret; | |
426 | sd->len -= ret; | |
427 | sd->pos += ret; | |
428 | sd->total_len -= ret; | |
429 | ||
430 | if (!buf->len) { | |
431 | pipe_buf_release(pipe, buf); | |
432 | tail++; | |
433 | pipe->tail = tail; | |
434 | if (pipe->files) | |
435 | sd->need_wakeup = true; | |
436 | } | |
437 | ||
438 | if (!sd->total_len) | |
439 | return 0; | |
440 | } | |
441 | ||
442 | return 1; | |
443 | } | |
444 | ||
445 | /* We know we have a pipe buffer, but maybe it's empty? */ | |
446 | static inline bool eat_empty_buffer(struct pipe_inode_info *pipe) | |
447 | { | |
448 | unsigned int tail = pipe->tail; | |
449 | unsigned int mask = pipe->ring_size - 1; | |
450 | struct pipe_buffer *buf = &pipe->bufs[tail & mask]; | |
451 | ||
452 | if (unlikely(!buf->len)) { | |
453 | pipe_buf_release(pipe, buf); | |
454 | pipe->tail = tail+1; | |
455 | return true; | |
456 | } | |
457 | ||
458 | return false; | |
459 | } | |
460 | ||
461 | /** | |
462 | * splice_from_pipe_next - wait for some data to splice from | |
463 | * @pipe: pipe to splice from | |
464 | * @sd: information about the splice operation | |
465 | * | |
466 | * Description: | |
467 | * This function will wait for some data and return a positive | |
468 | * value (one) if pipe buffers are available. It will return zero | |
469 | * or -errno if no more data needs to be spliced. | |
470 | */ | |
471 | static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) | |
472 | { | |
473 | /* | |
474 | * Check for signal early to make process killable when there are | |
475 | * always buffers available | |
476 | */ | |
477 | if (signal_pending(current)) | |
478 | return -ERESTARTSYS; | |
479 | ||
480 | repeat: | |
481 | while (pipe_empty(pipe->head, pipe->tail)) { | |
482 | if (!pipe->writers) | |
483 | return 0; | |
484 | ||
485 | if (sd->num_spliced) | |
486 | return 0; | |
487 | ||
488 | if (sd->flags & SPLICE_F_NONBLOCK) | |
489 | return -EAGAIN; | |
490 | ||
491 | if (signal_pending(current)) | |
492 | return -ERESTARTSYS; | |
493 | ||
494 | if (sd->need_wakeup) { | |
495 | wakeup_pipe_writers(pipe); | |
496 | sd->need_wakeup = false; | |
497 | } | |
498 | ||
499 | pipe_wait_readable(pipe); | |
500 | } | |
501 | ||
502 | if (eat_empty_buffer(pipe)) | |
503 | goto repeat; | |
504 | ||
505 | return 1; | |
506 | } | |
507 | ||
508 | /** | |
509 | * splice_from_pipe_begin - start splicing from pipe | |
510 | * @sd: information about the splice operation | |
511 | * | |
512 | * Description: | |
513 | * This function should be called before a loop containing | |
514 | * splice_from_pipe_next() and splice_from_pipe_feed() to | |
515 | * initialize the necessary fields of @sd. | |
516 | */ | |
517 | static void splice_from_pipe_begin(struct splice_desc *sd) | |
518 | { | |
519 | sd->num_spliced = 0; | |
520 | sd->need_wakeup = false; | |
521 | } | |
522 | ||
523 | /** | |
524 | * splice_from_pipe_end - finish splicing from pipe | |
525 | * @pipe: pipe to splice from | |
526 | * @sd: information about the splice operation | |
527 | * | |
528 | * Description: | |
529 | * This function will wake up pipe writers if necessary. It should | |
530 | * be called after a loop containing splice_from_pipe_next() and | |
531 | * splice_from_pipe_feed(). | |
532 | */ | |
533 | static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) | |
534 | { | |
535 | if (sd->need_wakeup) | |
536 | wakeup_pipe_writers(pipe); | |
537 | } | |
538 | ||
539 | /** | |
540 | * __splice_from_pipe - splice data from a pipe to given actor | |
541 | * @pipe: pipe to splice from | |
542 | * @sd: information to @actor | |
543 | * @actor: handler that splices the data | |
544 | * | |
545 | * Description: | |
546 | * This function does little more than loop over the pipe and call | |
547 | * @actor to do the actual moving of a single struct pipe_buffer to | |
548 | * the desired destination. See pipe_to_file, pipe_to_sendpage, or | |
549 | * pipe_to_user. | |
550 | * | |
551 | */ | |
552 | ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, | |
553 | splice_actor *actor) | |
554 | { | |
555 | int ret; | |
556 | ||
557 | splice_from_pipe_begin(sd); | |
558 | do { | |
559 | cond_resched(); | |
560 | ret = splice_from_pipe_next(pipe, sd); | |
561 | if (ret > 0) | |
562 | ret = splice_from_pipe_feed(pipe, sd, actor); | |
563 | } while (ret > 0); | |
564 | splice_from_pipe_end(pipe, sd); | |
565 | ||
566 | return sd->num_spliced ? sd->num_spliced : ret; | |
567 | } | |
568 | EXPORT_SYMBOL(__splice_from_pipe); | |
569 | ||
570 | /** | |
571 | * splice_from_pipe - splice data from a pipe to a file | |
572 | * @pipe: pipe to splice from | |
573 | * @out: file to splice to | |
574 | * @ppos: position in @out | |
575 | * @len: how many bytes to splice | |
576 | * @flags: splice modifier flags | |
577 | * @actor: handler that splices the data | |
578 | * | |
579 | * Description: | |
580 | * See __splice_from_pipe. This function locks the pipe inode, | |
581 | * otherwise it's identical to __splice_from_pipe(). | |
582 | * | |
583 | */ | |
584 | ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, | |
585 | loff_t *ppos, size_t len, unsigned int flags, | |
586 | splice_actor *actor) | |
587 | { | |
588 | ssize_t ret; | |
589 | struct splice_desc sd = { | |
590 | .total_len = len, | |
591 | .flags = flags, | |
592 | .pos = *ppos, | |
593 | .u.file = out, | |
594 | }; | |
595 | ||
596 | pipe_lock(pipe); | |
597 | ret = __splice_from_pipe(pipe, &sd, actor); | |
598 | pipe_unlock(pipe); | |
599 | ||
600 | return ret; | |
601 | } | |
602 | ||
603 | /** | |
604 | * iter_file_splice_write - splice data from a pipe to a file | |
605 | * @pipe: pipe info | |
606 | * @out: file to write to | |
607 | * @ppos: position in @out | |
608 | * @len: number of bytes to splice | |
609 | * @flags: splice modifier flags | |
610 | * | |
611 | * Description: | |
612 | * Will either move or copy pages (determined by @flags options) from | |
613 | * the given pipe inode to the given file. | |
614 | * This one is ->write_iter-based. | |
615 | * | |
616 | */ | |
617 | ssize_t | |
618 | iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, | |
619 | loff_t *ppos, size_t len, unsigned int flags) | |
620 | { | |
621 | struct splice_desc sd = { | |
622 | .total_len = len, | |
623 | .flags = flags, | |
624 | .pos = *ppos, | |
625 | .u.file = out, | |
626 | }; | |
627 | int nbufs = pipe->max_usage; | |
628 | struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec), | |
629 | GFP_KERNEL); | |
630 | ssize_t ret; | |
631 | ||
632 | if (unlikely(!array)) | |
633 | return -ENOMEM; | |
634 | ||
635 | pipe_lock(pipe); | |
636 | ||
637 | splice_from_pipe_begin(&sd); | |
638 | while (sd.total_len) { | |
639 | struct iov_iter from; | |
640 | unsigned int head, tail, mask; | |
641 | size_t left; | |
642 | int n; | |
643 | ||
644 | ret = splice_from_pipe_next(pipe, &sd); | |
645 | if (ret <= 0) | |
646 | break; | |
647 | ||
648 | if (unlikely(nbufs < pipe->max_usage)) { | |
649 | kfree(array); | |
650 | nbufs = pipe->max_usage; | |
651 | array = kcalloc(nbufs, sizeof(struct bio_vec), | |
652 | GFP_KERNEL); | |
653 | if (!array) { | |
654 | ret = -ENOMEM; | |
655 | break; | |
656 | } | |
657 | } | |
658 | ||
659 | head = pipe->head; | |
660 | tail = pipe->tail; | |
661 | mask = pipe->ring_size - 1; | |
662 | ||
663 | /* build the vector */ | |
664 | left = sd.total_len; | |
665 | for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) { | |
666 | struct pipe_buffer *buf = &pipe->bufs[tail & mask]; | |
667 | size_t this_len = buf->len; | |
668 | ||
669 | /* zero-length bvecs are not supported, skip them */ | |
670 | if (!this_len) | |
671 | continue; | |
672 | this_len = min(this_len, left); | |
673 | ||
674 | ret = pipe_buf_confirm(pipe, buf); | |
675 | if (unlikely(ret)) { | |
676 | if (ret == -ENODATA) | |
677 | ret = 0; | |
678 | goto done; | |
679 | } | |
680 | ||
681 | array[n].bv_page = buf->page; | |
682 | array[n].bv_len = this_len; | |
683 | array[n].bv_offset = buf->offset; | |
684 | left -= this_len; | |
685 | n++; | |
686 | } | |
687 | ||
688 | iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); | |
689 | ret = vfs_iter_write(out, &from, &sd.pos, 0); | |
690 | if (ret <= 0) | |
691 | break; | |
692 | ||
693 | sd.num_spliced += ret; | |
694 | sd.total_len -= ret; | |
695 | *ppos = sd.pos; | |
696 | ||
697 | /* dismiss the fully eaten buffers, adjust the partial one */ | |
698 | tail = pipe->tail; | |
699 | while (ret) { | |
700 | struct pipe_buffer *buf = &pipe->bufs[tail & mask]; | |
701 | if (ret >= buf->len) { | |
702 | ret -= buf->len; | |
703 | buf->len = 0; | |
704 | pipe_buf_release(pipe, buf); | |
705 | tail++; | |
706 | pipe->tail = tail; | |
707 | if (pipe->files) | |
708 | sd.need_wakeup = true; | |
709 | } else { | |
710 | buf->offset += ret; | |
711 | buf->len -= ret; | |
712 | ret = 0; | |
713 | } | |
714 | } | |
715 | } | |
716 | done: | |
717 | kfree(array); | |
718 | splice_from_pipe_end(pipe, &sd); | |
719 | ||
720 | pipe_unlock(pipe); | |
721 | ||
722 | if (sd.num_spliced) | |
723 | ret = sd.num_spliced; | |
724 | ||
725 | return ret; | |
726 | } | |
727 | ||
728 | EXPORT_SYMBOL(iter_file_splice_write); | |
729 | ||
730 | /** | |
731 | * generic_splice_sendpage - splice data from a pipe to a socket | |
732 | * @pipe: pipe to splice from | |
733 | * @out: socket to write to | |
734 | * @ppos: position in @out | |
735 | * @len: number of bytes to splice | |
736 | * @flags: splice modifier flags | |
737 | * | |
738 | * Description: | |
739 | * Will send @len bytes from the pipe to a network socket. No data copying | |
740 | * is involved. | |
741 | * | |
742 | */ | |
743 | ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, | |
744 | loff_t *ppos, size_t len, unsigned int flags) | |
745 | { | |
746 | return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); | |
747 | } | |
748 | ||
749 | EXPORT_SYMBOL(generic_splice_sendpage); | |
750 | ||
751 | static int warn_unsupported(struct file *file, const char *op) | |
752 | { | |
753 | pr_debug_ratelimited( | |
754 | "splice %s not supported for file %pD4 (pid: %d comm: %.20s)\n", | |
755 | op, file, current->pid, current->comm); | |
756 | return -EINVAL; | |
757 | } | |
758 | ||
759 | /* | |
760 | * Attempt to initiate a splice from pipe to file. | |
761 | */ | |
762 | long do_splice_from(struct pipe_inode_info *pipe, struct file *out, | |
763 | loff_t *ppos, size_t len, unsigned int flags) | |
764 | { | |
765 | if (unlikely(!out->f_op->splice_write)) | |
766 | return warn_unsupported(out, "write"); | |
767 | return out->f_op->splice_write(pipe, out, ppos, len, flags); | |
768 | } | |
769 | EXPORT_SYMBOL_GPL(do_splice_from); | |
770 | ||
771 | /* | |
772 | * Attempt to initiate a splice from a file to a pipe. | |
773 | */ | |
774 | long do_splice_to(struct file *in, loff_t *ppos, | |
775 | struct pipe_inode_info *pipe, size_t len, | |
776 | unsigned int flags) | |
777 | { | |
778 | unsigned int p_space; | |
779 | int ret; | |
780 | ||
781 | if (unlikely(!(in->f_mode & FMODE_READ))) | |
782 | return -EBADF; | |
783 | ||
784 | /* Don't try to read more the pipe has space for. */ | |
785 | p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail); | |
786 | len = min_t(size_t, len, p_space << PAGE_SHIFT); | |
787 | ||
788 | ret = rw_verify_area(READ, in, ppos, len); | |
789 | if (unlikely(ret < 0)) | |
790 | return ret; | |
791 | ||
792 | if (unlikely(len > MAX_RW_COUNT)) | |
793 | len = MAX_RW_COUNT; | |
794 | ||
795 | if (unlikely(!in->f_op->splice_read)) | |
796 | return warn_unsupported(in, "read"); | |
797 | return in->f_op->splice_read(in, ppos, pipe, len, flags); | |
798 | } | |
799 | EXPORT_SYMBOL_GPL(do_splice_to); | |
800 | ||
801 | /** | |
802 | * splice_direct_to_actor - splices data directly between two non-pipes | |
803 | * @in: file to splice from | |
804 | * @sd: actor information on where to splice to | |
805 | * @actor: handles the data splicing | |
806 | * | |
807 | * Description: | |
808 | * This is a special case helper to splice directly between two | |
809 | * points, without requiring an explicit pipe. Internally an allocated | |
810 | * pipe is cached in the process, and reused during the lifetime of | |
811 | * that process. | |
812 | * | |
813 | */ | |
814 | ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |
815 | splice_direct_actor *actor) | |
816 | { | |
817 | struct pipe_inode_info *pipe; | |
818 | long ret, bytes; | |
819 | umode_t i_mode; | |
820 | size_t len; | |
821 | int i, flags, more; | |
822 | ||
823 | /* | |
824 | * We require the input being a regular file, as we don't want to | |
825 | * randomly drop data for eg socket -> socket splicing. Use the | |
826 | * piped splicing for that! | |
827 | */ | |
828 | i_mode = file_inode(in)->i_mode; | |
829 | if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) | |
830 | return -EINVAL; | |
831 | ||
832 | /* | |
833 | * neither in nor out is a pipe, setup an internal pipe attached to | |
834 | * 'out' and transfer the wanted data from 'in' to 'out' through that | |
835 | */ | |
836 | pipe = current->splice_pipe; | |
837 | if (unlikely(!pipe)) { | |
838 | pipe = alloc_pipe_info(); | |
839 | if (!pipe) | |
840 | return -ENOMEM; | |
841 | ||
842 | /* | |
843 | * We don't have an immediate reader, but we'll read the stuff | |
844 | * out of the pipe right after the splice_to_pipe(). So set | |
845 | * PIPE_READERS appropriately. | |
846 | */ | |
847 | pipe->readers = 1; | |
848 | ||
849 | current->splice_pipe = pipe; | |
850 | } | |
851 | ||
852 | /* | |
853 | * Do the splice. | |
854 | */ | |
855 | ret = 0; | |
856 | bytes = 0; | |
857 | len = sd->total_len; | |
858 | flags = sd->flags; | |
859 | ||
860 | /* | |
861 | * Don't block on output, we have to drain the direct pipe. | |
862 | */ | |
863 | sd->flags &= ~SPLICE_F_NONBLOCK; | |
864 | more = sd->flags & SPLICE_F_MORE; | |
865 | ||
866 | WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail)); | |
867 | ||
868 | while (len) { | |
869 | size_t read_len; | |
870 | loff_t pos = sd->pos, prev_pos = pos; | |
871 | ||
872 | ret = do_splice_to(in, &pos, pipe, len, flags); | |
873 | if (unlikely(ret <= 0)) | |
874 | goto out_release; | |
875 | ||
876 | read_len = ret; | |
877 | sd->total_len = read_len; | |
878 | ||
879 | /* | |
880 | * If more data is pending, set SPLICE_F_MORE | |
881 | * If this is the last data and SPLICE_F_MORE was not set | |
882 | * initially, clears it. | |
883 | */ | |
884 | if (read_len < len) | |
885 | sd->flags |= SPLICE_F_MORE; | |
886 | else if (!more) | |
887 | sd->flags &= ~SPLICE_F_MORE; | |
888 | /* | |
889 | * NOTE: nonblocking mode only applies to the input. We | |
890 | * must not do the output in nonblocking mode as then we | |
891 | * could get stuck data in the internal pipe: | |
892 | */ | |
893 | ret = actor(pipe, sd); | |
894 | if (unlikely(ret <= 0)) { | |
895 | sd->pos = prev_pos; | |
896 | goto out_release; | |
897 | } | |
898 | ||
899 | bytes += ret; | |
900 | len -= ret; | |
901 | sd->pos = pos; | |
902 | ||
903 | if (ret < read_len) { | |
904 | sd->pos = prev_pos + ret; | |
905 | goto out_release; | |
906 | } | |
907 | } | |
908 | ||
909 | done: | |
910 | pipe->tail = pipe->head = 0; | |
911 | file_accessed(in); | |
912 | return bytes; | |
913 | ||
914 | out_release: | |
915 | /* | |
916 | * If we did an incomplete transfer we must release | |
917 | * the pipe buffers in question: | |
918 | */ | |
919 | for (i = 0; i < pipe->ring_size; i++) { | |
920 | struct pipe_buffer *buf = &pipe->bufs[i]; | |
921 | ||
922 | if (buf->ops) | |
923 | pipe_buf_release(pipe, buf); | |
924 | } | |
925 | ||
926 | if (!bytes) | |
927 | bytes = ret; | |
928 | ||
929 | goto done; | |
930 | } | |
931 | EXPORT_SYMBOL(splice_direct_to_actor); | |
932 | ||
933 | static int direct_splice_actor(struct pipe_inode_info *pipe, | |
934 | struct splice_desc *sd) | |
935 | { | |
936 | struct file *file = sd->u.file; | |
937 | ||
938 | return do_splice_from(pipe, file, sd->opos, sd->total_len, | |
939 | sd->flags); | |
940 | } | |
941 | ||
942 | /** | |
943 | * do_splice_direct - splices data directly between two files | |
944 | * @in: file to splice from | |
945 | * @ppos: input file offset | |
946 | * @out: file to splice to | |
947 | * @opos: output file offset | |
948 | * @len: number of bytes to splice | |
949 | * @flags: splice modifier flags | |
950 | * | |
951 | * Description: | |
952 | * For use by do_sendfile(). splice can easily emulate sendfile, but | |
953 | * doing it in the application would incur an extra system call | |
954 | * (splice in + splice out, as compared to just sendfile()). So this helper | |
955 | * can splice directly through a process-private pipe. | |
956 | * | |
957 | */ | |
958 | long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | |
959 | loff_t *opos, size_t len, unsigned int flags) | |
960 | { | |
961 | struct splice_desc sd = { | |
962 | .len = len, | |
963 | .total_len = len, | |
964 | .flags = flags, | |
965 | .pos = *ppos, | |
966 | .u.file = out, | |
967 | .opos = opos, | |
968 | }; | |
969 | long ret; | |
970 | ||
971 | if (unlikely(!(out->f_mode & FMODE_WRITE))) | |
972 | return -EBADF; | |
973 | ||
974 | if (unlikely(out->f_flags & O_APPEND)) | |
975 | return -EINVAL; | |
976 | ||
977 | ret = rw_verify_area(WRITE, out, opos, len); | |
978 | if (unlikely(ret < 0)) | |
979 | return ret; | |
980 | ||
981 | ret = splice_direct_to_actor(in, &sd, direct_splice_actor); | |
982 | if (ret > 0) | |
983 | *ppos = sd.pos; | |
984 | ||
985 | return ret; | |
986 | } | |
987 | EXPORT_SYMBOL(do_splice_direct); | |
988 | ||
989 | static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags) | |
990 | { | |
991 | for (;;) { | |
992 | if (unlikely(!pipe->readers)) { | |
993 | send_sig(SIGPIPE, current, 0); | |
994 | return -EPIPE; | |
995 | } | |
996 | if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) | |
997 | return 0; | |
998 | if (flags & SPLICE_F_NONBLOCK) | |
999 | return -EAGAIN; | |
1000 | if (signal_pending(current)) | |
1001 | return -ERESTARTSYS; | |
1002 | pipe_wait_writable(pipe); | |
1003 | } | |
1004 | } | |
1005 | ||
1006 | static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, | |
1007 | struct pipe_inode_info *opipe, | |
1008 | size_t len, unsigned int flags); | |
1009 | ||
1010 | long splice_file_to_pipe(struct file *in, | |
1011 | struct pipe_inode_info *opipe, | |
1012 | loff_t *offset, | |
1013 | size_t len, unsigned int flags) | |
1014 | { | |
1015 | long ret; | |
1016 | ||
1017 | pipe_lock(opipe); | |
1018 | ret = wait_for_space(opipe, flags); | |
1019 | if (!ret) | |
1020 | ret = do_splice_to(in, offset, opipe, len, flags); | |
1021 | pipe_unlock(opipe); | |
1022 | if (ret > 0) | |
1023 | wakeup_pipe_readers(opipe); | |
1024 | return ret; | |
1025 | } | |
1026 | ||
1027 | /* | |
1028 | * Determine where to splice to/from. | |
1029 | */ | |
1030 | long do_splice(struct file *in, loff_t *off_in, struct file *out, | |
1031 | loff_t *off_out, size_t len, unsigned int flags) | |
1032 | { | |
1033 | struct pipe_inode_info *ipipe; | |
1034 | struct pipe_inode_info *opipe; | |
1035 | loff_t offset; | |
1036 | long ret; | |
1037 | ||
1038 | if (unlikely(!(in->f_mode & FMODE_READ) || | |
1039 | !(out->f_mode & FMODE_WRITE))) | |
1040 | return -EBADF; | |
1041 | ||
1042 | ipipe = get_pipe_info(in, true); | |
1043 | opipe = get_pipe_info(out, true); | |
1044 | ||
1045 | if (ipipe && opipe) { | |
1046 | if (off_in || off_out) | |
1047 | return -ESPIPE; | |
1048 | ||
1049 | /* Splicing to self would be fun, but... */ | |
1050 | if (ipipe == opipe) | |
1051 | return -EINVAL; | |
1052 | ||
1053 | if ((in->f_flags | out->f_flags) & O_NONBLOCK) | |
1054 | flags |= SPLICE_F_NONBLOCK; | |
1055 | ||
1056 | return splice_pipe_to_pipe(ipipe, opipe, len, flags); | |
1057 | } | |
1058 | ||
1059 | if (ipipe) { | |
1060 | if (off_in) | |
1061 | return -ESPIPE; | |
1062 | if (off_out) { | |
1063 | if (!(out->f_mode & FMODE_PWRITE)) | |
1064 | return -EINVAL; | |
1065 | offset = *off_out; | |
1066 | } else { | |
1067 | offset = out->f_pos; | |
1068 | } | |
1069 | ||
1070 | if (unlikely(out->f_flags & O_APPEND)) | |
1071 | return -EINVAL; | |
1072 | ||
1073 | ret = rw_verify_area(WRITE, out, &offset, len); | |
1074 | if (unlikely(ret < 0)) | |
1075 | return ret; | |
1076 | ||
1077 | if (in->f_flags & O_NONBLOCK) | |
1078 | flags |= SPLICE_F_NONBLOCK; | |
1079 | ||
1080 | file_start_write(out); | |
1081 | ret = do_splice_from(ipipe, out, &offset, len, flags); | |
1082 | file_end_write(out); | |
1083 | ||
1084 | if (!off_out) | |
1085 | out->f_pos = offset; | |
1086 | else | |
1087 | *off_out = offset; | |
1088 | ||
1089 | return ret; | |
1090 | } | |
1091 | ||
1092 | if (opipe) { | |
1093 | if (off_out) | |
1094 | return -ESPIPE; | |
1095 | if (off_in) { | |
1096 | if (!(in->f_mode & FMODE_PREAD)) | |
1097 | return -EINVAL; | |
1098 | offset = *off_in; | |
1099 | } else { | |
1100 | offset = in->f_pos; | |
1101 | } | |
1102 | ||
1103 | if (out->f_flags & O_NONBLOCK) | |
1104 | flags |= SPLICE_F_NONBLOCK; | |
1105 | ||
1106 | ret = splice_file_to_pipe(in, opipe, &offset, len, flags); | |
1107 | if (!off_in) | |
1108 | in->f_pos = offset; | |
1109 | else | |
1110 | *off_in = offset; | |
1111 | ||
1112 | return ret; | |
1113 | } | |
1114 | ||
1115 | return -EINVAL; | |
1116 | } | |
1117 | ||
1118 | static long __do_splice(struct file *in, loff_t __user *off_in, | |
1119 | struct file *out, loff_t __user *off_out, | |
1120 | size_t len, unsigned int flags) | |
1121 | { | |
1122 | struct pipe_inode_info *ipipe; | |
1123 | struct pipe_inode_info *opipe; | |
1124 | loff_t offset, *__off_in = NULL, *__off_out = NULL; | |
1125 | long ret; | |
1126 | ||
1127 | ipipe = get_pipe_info(in, true); | |
1128 | opipe = get_pipe_info(out, true); | |
1129 | ||
1130 | if (ipipe && off_in) | |
1131 | return -ESPIPE; | |
1132 | if (opipe && off_out) | |
1133 | return -ESPIPE; | |
1134 | ||
1135 | if (off_out) { | |
1136 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) | |
1137 | return -EFAULT; | |
1138 | __off_out = &offset; | |
1139 | } | |
1140 | if (off_in) { | |
1141 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) | |
1142 | return -EFAULT; | |
1143 | __off_in = &offset; | |
1144 | } | |
1145 | ||
1146 | ret = do_splice(in, __off_in, out, __off_out, len, flags); | |
1147 | if (ret < 0) | |
1148 | return ret; | |
1149 | ||
1150 | if (__off_out && copy_to_user(off_out, __off_out, sizeof(loff_t))) | |
1151 | return -EFAULT; | |
1152 | if (__off_in && copy_to_user(off_in, __off_in, sizeof(loff_t))) | |
1153 | return -EFAULT; | |
1154 | ||
1155 | return ret; | |
1156 | } | |
1157 | ||
1158 | static int iter_to_pipe(struct iov_iter *from, | |
1159 | struct pipe_inode_info *pipe, | |
1160 | unsigned flags) | |
1161 | { | |
1162 | struct pipe_buffer buf = { | |
1163 | .ops = &user_page_pipe_buf_ops, | |
1164 | .flags = flags | |
1165 | }; | |
1166 | size_t total = 0; | |
1167 | int ret = 0; | |
1168 | bool failed = false; | |
1169 | ||
1170 | while (iov_iter_count(from) && !failed) { | |
1171 | struct page *pages[16]; | |
1172 | ssize_t copied; | |
1173 | size_t start; | |
1174 | int n; | |
1175 | ||
1176 | copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start); | |
1177 | if (copied <= 0) { | |
1178 | ret = copied; | |
1179 | break; | |
1180 | } | |
1181 | ||
1182 | for (n = 0; copied; n++, start = 0) { | |
1183 | int size = min_t(int, copied, PAGE_SIZE - start); | |
1184 | if (!failed) { | |
1185 | buf.page = pages[n]; | |
1186 | buf.offset = start; | |
1187 | buf.len = size; | |
1188 | ret = add_to_pipe(pipe, &buf); | |
1189 | if (unlikely(ret < 0)) { | |
1190 | failed = true; | |
1191 | } else { | |
1192 | iov_iter_advance(from, ret); | |
1193 | total += ret; | |
1194 | } | |
1195 | } else { | |
1196 | put_page(pages[n]); | |
1197 | } | |
1198 | copied -= size; | |
1199 | } | |
1200 | } | |
1201 | return total ? total : ret; | |
1202 | } | |
1203 | ||
1204 | static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |
1205 | struct splice_desc *sd) | |
1206 | { | |
1207 | int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data); | |
1208 | return n == sd->len ? n : -EFAULT; | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * For lack of a better implementation, implement vmsplice() to userspace | |
1213 | * as a simple copy of the pipes pages to the user iov. | |
1214 | */ | |
1215 | static long vmsplice_to_user(struct file *file, struct iov_iter *iter, | |
1216 | unsigned int flags) | |
1217 | { | |
1218 | struct pipe_inode_info *pipe = get_pipe_info(file, true); | |
1219 | struct splice_desc sd = { | |
1220 | .total_len = iov_iter_count(iter), | |
1221 | .flags = flags, | |
1222 | .u.data = iter | |
1223 | }; | |
1224 | long ret = 0; | |
1225 | ||
1226 | if (!pipe) | |
1227 | return -EBADF; | |
1228 | ||
1229 | if (sd.total_len) { | |
1230 | pipe_lock(pipe); | |
1231 | ret = __splice_from_pipe(pipe, &sd, pipe_to_user); | |
1232 | pipe_unlock(pipe); | |
1233 | } | |
1234 | ||
1235 | return ret; | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * vmsplice splices a user address range into a pipe. It can be thought of | |
1240 | * as splice-from-memory, where the regular splice is splice-from-file (or | |
1241 | * to file). In both cases the output is a pipe, naturally. | |
1242 | */ | |
1243 | static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, | |
1244 | unsigned int flags) | |
1245 | { | |
1246 | struct pipe_inode_info *pipe; | |
1247 | long ret = 0; | |
1248 | unsigned buf_flag = 0; | |
1249 | ||
1250 | if (flags & SPLICE_F_GIFT) | |
1251 | buf_flag = PIPE_BUF_FLAG_GIFT; | |
1252 | ||
1253 | pipe = get_pipe_info(file, true); | |
1254 | if (!pipe) | |
1255 | return -EBADF; | |
1256 | ||
1257 | pipe_lock(pipe); | |
1258 | ret = wait_for_space(pipe, flags); | |
1259 | if (!ret) | |
1260 | ret = iter_to_pipe(iter, pipe, buf_flag); | |
1261 | pipe_unlock(pipe); | |
1262 | if (ret > 0) | |
1263 | wakeup_pipe_readers(pipe); | |
1264 | return ret; | |
1265 | } | |
1266 | ||
1267 | static int vmsplice_type(struct fd f, int *type) | |
1268 | { | |
1269 | if (!f.file) | |
1270 | return -EBADF; | |
1271 | if (f.file->f_mode & FMODE_WRITE) { | |
1272 | *type = WRITE; | |
1273 | } else if (f.file->f_mode & FMODE_READ) { | |
1274 | *type = READ; | |
1275 | } else { | |
1276 | fdput(f); | |
1277 | return -EBADF; | |
1278 | } | |
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | /* | |
1283 | * Note that vmsplice only really supports true splicing _from_ user memory | |
1284 | * to a pipe, not the other way around. Splicing from user memory is a simple | |
1285 | * operation that can be supported without any funky alignment restrictions | |
1286 | * or nasty vm tricks. We simply map in the user memory and fill them into | |
1287 | * a pipe. The reverse isn't quite as easy, though. There are two possible | |
1288 | * solutions for that: | |
1289 | * | |
1290 | * - memcpy() the data internally, at which point we might as well just | |
1291 | * do a regular read() on the buffer anyway. | |
1292 | * - Lots of nasty vm tricks, that are neither fast nor flexible (it | |
1293 | * has restriction limitations on both ends of the pipe). | |
1294 | * | |
1295 | * Currently we punt and implement it as a normal copy, see pipe_to_user(). | |
1296 | * | |
1297 | */ | |
1298 | SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov, | |
1299 | unsigned long, nr_segs, unsigned int, flags) | |
1300 | { | |
1301 | struct iovec iovstack[UIO_FASTIOV]; | |
1302 | struct iovec *iov = iovstack; | |
1303 | struct iov_iter iter; | |
1304 | ssize_t error; | |
1305 | struct fd f; | |
1306 | int type; | |
1307 | ||
1308 | if (unlikely(flags & ~SPLICE_F_ALL)) | |
1309 | return -EINVAL; | |
1310 | ||
1311 | f = fdget(fd); | |
1312 | error = vmsplice_type(f, &type); | |
1313 | if (error) | |
1314 | return error; | |
1315 | ||
1316 | error = import_iovec(type, uiov, nr_segs, | |
1317 | ARRAY_SIZE(iovstack), &iov, &iter); | |
1318 | if (error < 0) | |
1319 | goto out_fdput; | |
1320 | ||
1321 | if (!iov_iter_count(&iter)) | |
1322 | error = 0; | |
1323 | else if (iov_iter_rw(&iter) == WRITE) | |
1324 | error = vmsplice_to_pipe(f.file, &iter, flags); | |
1325 | else | |
1326 | error = vmsplice_to_user(f.file, &iter, flags); | |
1327 | ||
1328 | kfree(iov); | |
1329 | out_fdput: | |
1330 | fdput(f); | |
1331 | return error; | |
1332 | } | |
1333 | ||
1334 | SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, | |
1335 | int, fd_out, loff_t __user *, off_out, | |
1336 | size_t, len, unsigned int, flags) | |
1337 | { | |
1338 | struct fd in, out; | |
1339 | long error; | |
1340 | ||
1341 | if (unlikely(!len)) | |
1342 | return 0; | |
1343 | ||
1344 | if (unlikely(flags & ~SPLICE_F_ALL)) | |
1345 | return -EINVAL; | |
1346 | ||
1347 | error = -EBADF; | |
1348 | in = fdget(fd_in); | |
1349 | if (in.file) { | |
1350 | out = fdget(fd_out); | |
1351 | if (out.file) { | |
1352 | error = __do_splice(in.file, off_in, out.file, off_out, | |
1353 | len, flags); | |
1354 | fdput(out); | |
1355 | } | |
1356 | fdput(in); | |
1357 | } | |
1358 | return error; | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * Make sure there's data to read. Wait for input if we can, otherwise | |
1363 | * return an appropriate error. | |
1364 | */ | |
1365 | static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) | |
1366 | { | |
1367 | int ret; | |
1368 | ||
1369 | /* | |
1370 | * Check the pipe occupancy without the inode lock first. This function | |
1371 | * is speculative anyways, so missing one is ok. | |
1372 | */ | |
1373 | if (!pipe_empty(pipe->head, pipe->tail)) | |
1374 | return 0; | |
1375 | ||
1376 | ret = 0; | |
1377 | pipe_lock(pipe); | |
1378 | ||
1379 | while (pipe_empty(pipe->head, pipe->tail)) { | |
1380 | if (signal_pending(current)) { | |
1381 | ret = -ERESTARTSYS; | |
1382 | break; | |
1383 | } | |
1384 | if (!pipe->writers) | |
1385 | break; | |
1386 | if (flags & SPLICE_F_NONBLOCK) { | |
1387 | ret = -EAGAIN; | |
1388 | break; | |
1389 | } | |
1390 | pipe_wait_readable(pipe); | |
1391 | } | |
1392 | ||
1393 | pipe_unlock(pipe); | |
1394 | return ret; | |
1395 | } | |
1396 | ||
1397 | /* | |
1398 | * Make sure there's writeable room. Wait for room if we can, otherwise | |
1399 | * return an appropriate error. | |
1400 | */ | |
1401 | static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) | |
1402 | { | |
1403 | int ret; | |
1404 | ||
1405 | /* | |
1406 | * Check pipe occupancy without the inode lock first. This function | |
1407 | * is speculative anyways, so missing one is ok. | |
1408 | */ | |
1409 | if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) | |
1410 | return 0; | |
1411 | ||
1412 | ret = 0; | |
1413 | pipe_lock(pipe); | |
1414 | ||
1415 | while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { | |
1416 | if (!pipe->readers) { | |
1417 | send_sig(SIGPIPE, current, 0); | |
1418 | ret = -EPIPE; | |
1419 | break; | |
1420 | } | |
1421 | if (flags & SPLICE_F_NONBLOCK) { | |
1422 | ret = -EAGAIN; | |
1423 | break; | |
1424 | } | |
1425 | if (signal_pending(current)) { | |
1426 | ret = -ERESTARTSYS; | |
1427 | break; | |
1428 | } | |
1429 | pipe_wait_writable(pipe); | |
1430 | } | |
1431 | ||
1432 | pipe_unlock(pipe); | |
1433 | return ret; | |
1434 | } | |
1435 | ||
1436 | /* | |
1437 | * Splice contents of ipipe to opipe. | |
1438 | */ | |
1439 | static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, | |
1440 | struct pipe_inode_info *opipe, | |
1441 | size_t len, unsigned int flags) | |
1442 | { | |
1443 | struct pipe_buffer *ibuf, *obuf; | |
1444 | unsigned int i_head, o_head; | |
1445 | unsigned int i_tail, o_tail; | |
1446 | unsigned int i_mask, o_mask; | |
1447 | int ret = 0; | |
1448 | bool input_wakeup = false; | |
1449 | ||
1450 | ||
1451 | retry: | |
1452 | ret = ipipe_prep(ipipe, flags); | |
1453 | if (ret) | |
1454 | return ret; | |
1455 | ||
1456 | ret = opipe_prep(opipe, flags); | |
1457 | if (ret) | |
1458 | return ret; | |
1459 | ||
1460 | /* | |
1461 | * Potential ABBA deadlock, work around it by ordering lock | |
1462 | * grabbing by pipe info address. Otherwise two different processes | |
1463 | * could deadlock (one doing tee from A -> B, the other from B -> A). | |
1464 | */ | |
1465 | pipe_double_lock(ipipe, opipe); | |
1466 | ||
1467 | i_tail = ipipe->tail; | |
1468 | i_mask = ipipe->ring_size - 1; | |
1469 | o_head = opipe->head; | |
1470 | o_mask = opipe->ring_size - 1; | |
1471 | ||
1472 | do { | |
1473 | size_t o_len; | |
1474 | ||
1475 | if (!opipe->readers) { | |
1476 | send_sig(SIGPIPE, current, 0); | |
1477 | if (!ret) | |
1478 | ret = -EPIPE; | |
1479 | break; | |
1480 | } | |
1481 | ||
1482 | i_head = ipipe->head; | |
1483 | o_tail = opipe->tail; | |
1484 | ||
1485 | if (pipe_empty(i_head, i_tail) && !ipipe->writers) | |
1486 | break; | |
1487 | ||
1488 | /* | |
1489 | * Cannot make any progress, because either the input | |
1490 | * pipe is empty or the output pipe is full. | |
1491 | */ | |
1492 | if (pipe_empty(i_head, i_tail) || | |
1493 | pipe_full(o_head, o_tail, opipe->max_usage)) { | |
1494 | /* Already processed some buffers, break */ | |
1495 | if (ret) | |
1496 | break; | |
1497 | ||
1498 | if (flags & SPLICE_F_NONBLOCK) { | |
1499 | ret = -EAGAIN; | |
1500 | break; | |
1501 | } | |
1502 | ||
1503 | /* | |
1504 | * We raced with another reader/writer and haven't | |
1505 | * managed to process any buffers. A zero return | |
1506 | * value means EOF, so retry instead. | |
1507 | */ | |
1508 | pipe_unlock(ipipe); | |
1509 | pipe_unlock(opipe); | |
1510 | goto retry; | |
1511 | } | |
1512 | ||
1513 | ibuf = &ipipe->bufs[i_tail & i_mask]; | |
1514 | obuf = &opipe->bufs[o_head & o_mask]; | |
1515 | ||
1516 | if (len >= ibuf->len) { | |
1517 | /* | |
1518 | * Simply move the whole buffer from ipipe to opipe | |
1519 | */ | |
1520 | *obuf = *ibuf; | |
1521 | ibuf->ops = NULL; | |
1522 | i_tail++; | |
1523 | ipipe->tail = i_tail; | |
1524 | input_wakeup = true; | |
1525 | o_len = obuf->len; | |
1526 | o_head++; | |
1527 | opipe->head = o_head; | |
1528 | } else { | |
1529 | /* | |
1530 | * Get a reference to this pipe buffer, | |
1531 | * so we can copy the contents over. | |
1532 | */ | |
1533 | if (!pipe_buf_get(ipipe, ibuf)) { | |
1534 | if (ret == 0) | |
1535 | ret = -EFAULT; | |
1536 | break; | |
1537 | } | |
1538 | *obuf = *ibuf; | |
1539 | ||
1540 | /* | |
1541 | * Don't inherit the gift and merge flags, we need to | |
1542 | * prevent multiple steals of this page. | |
1543 | */ | |
1544 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; | |
1545 | obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; | |
1546 | ||
1547 | obuf->len = len; | |
1548 | ibuf->offset += len; | |
1549 | ibuf->len -= len; | |
1550 | o_len = len; | |
1551 | o_head++; | |
1552 | opipe->head = o_head; | |
1553 | } | |
1554 | ret += o_len; | |
1555 | len -= o_len; | |
1556 | } while (len); | |
1557 | ||
1558 | pipe_unlock(ipipe); | |
1559 | pipe_unlock(opipe); | |
1560 | ||
1561 | /* | |
1562 | * If we put data in the output pipe, wakeup any potential readers. | |
1563 | */ | |
1564 | if (ret > 0) | |
1565 | wakeup_pipe_readers(opipe); | |
1566 | ||
1567 | if (input_wakeup) | |
1568 | wakeup_pipe_writers(ipipe); | |
1569 | ||
1570 | return ret; | |
1571 | } | |
1572 | ||
1573 | /* | |
1574 | * Link contents of ipipe to opipe. | |
1575 | */ | |
1576 | static int link_pipe(struct pipe_inode_info *ipipe, | |
1577 | struct pipe_inode_info *opipe, | |
1578 | size_t len, unsigned int flags) | |
1579 | { | |
1580 | struct pipe_buffer *ibuf, *obuf; | |
1581 | unsigned int i_head, o_head; | |
1582 | unsigned int i_tail, o_tail; | |
1583 | unsigned int i_mask, o_mask; | |
1584 | int ret = 0; | |
1585 | ||
1586 | /* | |
1587 | * Potential ABBA deadlock, work around it by ordering lock | |
1588 | * grabbing by pipe info address. Otherwise two different processes | |
1589 | * could deadlock (one doing tee from A -> B, the other from B -> A). | |
1590 | */ | |
1591 | pipe_double_lock(ipipe, opipe); | |
1592 | ||
1593 | i_tail = ipipe->tail; | |
1594 | i_mask = ipipe->ring_size - 1; | |
1595 | o_head = opipe->head; | |
1596 | o_mask = opipe->ring_size - 1; | |
1597 | ||
1598 | do { | |
1599 | if (!opipe->readers) { | |
1600 | send_sig(SIGPIPE, current, 0); | |
1601 | if (!ret) | |
1602 | ret = -EPIPE; | |
1603 | break; | |
1604 | } | |
1605 | ||
1606 | i_head = ipipe->head; | |
1607 | o_tail = opipe->tail; | |
1608 | ||
1609 | /* | |
1610 | * If we have iterated all input buffers or run out of | |
1611 | * output room, break. | |
1612 | */ | |
1613 | if (pipe_empty(i_head, i_tail) || | |
1614 | pipe_full(o_head, o_tail, opipe->max_usage)) | |
1615 | break; | |
1616 | ||
1617 | ibuf = &ipipe->bufs[i_tail & i_mask]; | |
1618 | obuf = &opipe->bufs[o_head & o_mask]; | |
1619 | ||
1620 | /* | |
1621 | * Get a reference to this pipe buffer, | |
1622 | * so we can copy the contents over. | |
1623 | */ | |
1624 | if (!pipe_buf_get(ipipe, ibuf)) { | |
1625 | if (ret == 0) | |
1626 | ret = -EFAULT; | |
1627 | break; | |
1628 | } | |
1629 | ||
1630 | *obuf = *ibuf; | |
1631 | ||
1632 | /* | |
1633 | * Don't inherit the gift and merge flag, we need to prevent | |
1634 | * multiple steals of this page. | |
1635 | */ | |
1636 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; | |
1637 | obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; | |
1638 | ||
1639 | if (obuf->len > len) | |
1640 | obuf->len = len; | |
1641 | ret += obuf->len; | |
1642 | len -= obuf->len; | |
1643 | ||
1644 | o_head++; | |
1645 | opipe->head = o_head; | |
1646 | i_tail++; | |
1647 | } while (len); | |
1648 | ||
1649 | pipe_unlock(ipipe); | |
1650 | pipe_unlock(opipe); | |
1651 | ||
1652 | /* | |
1653 | * If we put data in the output pipe, wakeup any potential readers. | |
1654 | */ | |
1655 | if (ret > 0) | |
1656 | wakeup_pipe_readers(opipe); | |
1657 | ||
1658 | return ret; | |
1659 | } | |
1660 | ||
1661 | /* | |
1662 | * This is a tee(1) implementation that works on pipes. It doesn't copy | |
1663 | * any data, it simply references the 'in' pages on the 'out' pipe. | |
1664 | * The 'flags' used are the SPLICE_F_* variants, currently the only | |
1665 | * applicable one is SPLICE_F_NONBLOCK. | |
1666 | */ | |
1667 | long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) | |
1668 | { | |
1669 | struct pipe_inode_info *ipipe = get_pipe_info(in, true); | |
1670 | struct pipe_inode_info *opipe = get_pipe_info(out, true); | |
1671 | int ret = -EINVAL; | |
1672 | ||
1673 | if (unlikely(!(in->f_mode & FMODE_READ) || | |
1674 | !(out->f_mode & FMODE_WRITE))) | |
1675 | return -EBADF; | |
1676 | ||
1677 | /* | |
1678 | * Duplicate the contents of ipipe to opipe without actually | |
1679 | * copying the data. | |
1680 | */ | |
1681 | if (ipipe && opipe && ipipe != opipe) { | |
1682 | if ((in->f_flags | out->f_flags) & O_NONBLOCK) | |
1683 | flags |= SPLICE_F_NONBLOCK; | |
1684 | ||
1685 | /* | |
1686 | * Keep going, unless we encounter an error. The ipipe/opipe | |
1687 | * ordering doesn't really matter. | |
1688 | */ | |
1689 | ret = ipipe_prep(ipipe, flags); | |
1690 | if (!ret) { | |
1691 | ret = opipe_prep(opipe, flags); | |
1692 | if (!ret) | |
1693 | ret = link_pipe(ipipe, opipe, len, flags); | |
1694 | } | |
1695 | } | |
1696 | ||
1697 | return ret; | |
1698 | } | |
1699 | ||
1700 | SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) | |
1701 | { | |
1702 | struct fd in, out; | |
1703 | int error; | |
1704 | ||
1705 | if (unlikely(flags & ~SPLICE_F_ALL)) | |
1706 | return -EINVAL; | |
1707 | ||
1708 | if (unlikely(!len)) | |
1709 | return 0; | |
1710 | ||
1711 | error = -EBADF; | |
1712 | in = fdget(fdin); | |
1713 | if (in.file) { | |
1714 | out = fdget(fdout); | |
1715 | if (out.file) { | |
1716 | error = do_tee(in.file, out.file, len, flags); | |
1717 | fdput(out); | |
1718 | } | |
1719 | fdput(in); | |
1720 | } | |
1721 | ||
1722 | return error; | |
1723 | } |