]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/pipe.c
Merge branch 'sfc-fix-bugs-introduced-by-XDP-patches'
[mirror_ubuntu-jammy-kernel.git] / fs / pipe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30
31 #include "internal.h"
32
33 /*
34 * The max size that a non-root user is allowed to grow the pipe. Can
35 * be set by root in /proc/sys/fs/pipe-max-size
36 */
37 unsigned int pipe_max_size = 1048576;
38
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40 * matches default values.
41 */
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44
45 /*
46 * We use head and tail indices that aren't masked off, except at the point of
47 * dereference, but rather they're allowed to wrap naturally. This means there
48 * isn't a dead spot in the buffer, but the ring has to be a power of two and
49 * <= 2^31.
50 * -- David Howells 2019-09-23.
51 *
52 * Reads with count = 0 should always return 0.
53 * -- Julian Bradfield 1999-06-07.
54 *
55 * FIFOs and Pipes now generate SIGIO for both readers and writers.
56 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
57 *
58 * pipe_read & write cleanup
59 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
60 */
61
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63 {
64 if (pipe->files)
65 mutex_lock_nested(&pipe->mutex, subclass);
66 }
67
68 void pipe_lock(struct pipe_inode_info *pipe)
69 {
70 /*
71 * pipe_lock() nests non-pipe inode locks (for writing to a file)
72 */
73 pipe_lock_nested(pipe, I_MUTEX_PARENT);
74 }
75 EXPORT_SYMBOL(pipe_lock);
76
77 void pipe_unlock(struct pipe_inode_info *pipe)
78 {
79 if (pipe->files)
80 mutex_unlock(&pipe->mutex);
81 }
82 EXPORT_SYMBOL(pipe_unlock);
83
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
85 {
86 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
87 }
88
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
90 {
91 mutex_unlock(&pipe->mutex);
92 }
93
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95 struct pipe_inode_info *pipe2)
96 {
97 BUG_ON(pipe1 == pipe2);
98
99 if (pipe1 < pipe2) {
100 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
102 } else {
103 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105 }
106 }
107
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
110 {
111 DEFINE_WAIT(wait);
112
113 /*
114 * Pipes are system-local resources, so sleeping on them
115 * is considered a noninteractive wait:
116 */
117 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
118 pipe_unlock(pipe);
119 schedule();
120 finish_wait(&pipe->wait, &wait);
121 pipe_lock(pipe);
122 }
123
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 struct pipe_buffer *buf)
126 {
127 struct page *page = buf->page;
128
129 /*
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
133 */
134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page;
136 else
137 put_page(page);
138 }
139
140 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
142 {
143 struct page *page = buf->page;
144
145 if (page_count(page) == 1) {
146 memcg_kmem_uncharge(page, 0);
147 __SetPageLocked(page);
148 return 0;
149 }
150 return 1;
151 }
152
153 /**
154 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
155 * @pipe: the pipe that the buffer belongs to
156 * @buf: the buffer to attempt to steal
157 *
158 * Description:
159 * This function attempts to steal the &struct page attached to
160 * @buf. If successful, this function returns 0 and returns with
161 * the page locked. The caller may then reuse the page for whatever
162 * he wishes; the typical use is insertion into a different file
163 * page cache.
164 */
165 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
166 struct pipe_buffer *buf)
167 {
168 struct page *page = buf->page;
169
170 /*
171 * A reference of one is golden, that means that the owner of this
172 * page is the only one holding a reference to it. lock the page
173 * and return OK.
174 */
175 if (page_count(page) == 1) {
176 lock_page(page);
177 return 0;
178 }
179
180 return 1;
181 }
182 EXPORT_SYMBOL(generic_pipe_buf_steal);
183
184 /**
185 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
186 * @pipe: the pipe that the buffer belongs to
187 * @buf: the buffer to get a reference to
188 *
189 * Description:
190 * This function grabs an extra reference to @buf. It's used in
191 * in the tee() system call, when we duplicate the buffers in one
192 * pipe into another.
193 */
194 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
195 {
196 return try_get_page(buf->page);
197 }
198 EXPORT_SYMBOL(generic_pipe_buf_get);
199
200 /**
201 * generic_pipe_buf_confirm - verify contents of the pipe buffer
202 * @info: the pipe that the buffer belongs to
203 * @buf: the buffer to confirm
204 *
205 * Description:
206 * This function does nothing, because the generic pipe code uses
207 * pages that are always good when inserted into the pipe.
208 */
209 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
210 struct pipe_buffer *buf)
211 {
212 return 0;
213 }
214 EXPORT_SYMBOL(generic_pipe_buf_confirm);
215
216 /**
217 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
218 * @pipe: the pipe that the buffer belongs to
219 * @buf: the buffer to put a reference to
220 *
221 * Description:
222 * This function releases a reference to @buf.
223 */
224 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
225 struct pipe_buffer *buf)
226 {
227 put_page(buf->page);
228 }
229 EXPORT_SYMBOL(generic_pipe_buf_release);
230
231 /* New data written to a pipe may be appended to a buffer with this type. */
232 static const struct pipe_buf_operations anon_pipe_buf_ops = {
233 .confirm = generic_pipe_buf_confirm,
234 .release = anon_pipe_buf_release,
235 .steal = anon_pipe_buf_steal,
236 .get = generic_pipe_buf_get,
237 };
238
239 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
240 .confirm = generic_pipe_buf_confirm,
241 .release = anon_pipe_buf_release,
242 .steal = anon_pipe_buf_steal,
243 .get = generic_pipe_buf_get,
244 };
245
246 static const struct pipe_buf_operations packet_pipe_buf_ops = {
247 .confirm = generic_pipe_buf_confirm,
248 .release = anon_pipe_buf_release,
249 .steal = anon_pipe_buf_steal,
250 .get = generic_pipe_buf_get,
251 };
252
253 /**
254 * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
255 * @buf: the buffer to mark
256 *
257 * Description:
258 * This function ensures that no future writes will be merged into the
259 * given &struct pipe_buffer. This is necessary when multiple pipe buffers
260 * share the same backing page.
261 */
262 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
263 {
264 if (buf->ops == &anon_pipe_buf_ops)
265 buf->ops = &anon_pipe_buf_nomerge_ops;
266 }
267
268 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
269 {
270 return buf->ops == &anon_pipe_buf_ops;
271 }
272
273 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
274 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
275 {
276 unsigned int head = READ_ONCE(pipe->head);
277 unsigned int tail = READ_ONCE(pipe->tail);
278 unsigned int writers = READ_ONCE(pipe->writers);
279
280 return !pipe_empty(head, tail) || !writers;
281 }
282
283 static ssize_t
284 pipe_read(struct kiocb *iocb, struct iov_iter *to)
285 {
286 size_t total_len = iov_iter_count(to);
287 struct file *filp = iocb->ki_filp;
288 struct pipe_inode_info *pipe = filp->private_data;
289 bool was_full;
290 ssize_t ret;
291
292 /* Null read succeeds. */
293 if (unlikely(total_len == 0))
294 return 0;
295
296 ret = 0;
297 __pipe_lock(pipe);
298
299 /*
300 * We only wake up writers if the pipe was full when we started
301 * reading in order to avoid unnecessary wakeups.
302 *
303 * But when we do wake up writers, we do so using a sync wakeup
304 * (WF_SYNC), because we want them to get going and generate more
305 * data for us.
306 */
307 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
308 for (;;) {
309 unsigned int head = pipe->head;
310 unsigned int tail = pipe->tail;
311 unsigned int mask = pipe->ring_size - 1;
312
313 if (!pipe_empty(head, tail)) {
314 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
315 size_t chars = buf->len;
316 size_t written;
317 int error;
318
319 if (chars > total_len)
320 chars = total_len;
321
322 error = pipe_buf_confirm(pipe, buf);
323 if (error) {
324 if (!ret)
325 ret = error;
326 break;
327 }
328
329 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
330 if (unlikely(written < chars)) {
331 if (!ret)
332 ret = -EFAULT;
333 break;
334 }
335 ret += chars;
336 buf->offset += chars;
337 buf->len -= chars;
338
339 /* Was it a packet buffer? Clean up and exit */
340 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
341 total_len = chars;
342 buf->len = 0;
343 }
344
345 if (!buf->len) {
346 pipe_buf_release(pipe, buf);
347 spin_lock_irq(&pipe->wait.lock);
348 tail++;
349 pipe->tail = tail;
350 spin_unlock_irq(&pipe->wait.lock);
351 }
352 total_len -= chars;
353 if (!total_len)
354 break; /* common path: read succeeded */
355 if (!pipe_empty(head, tail)) /* More to do? */
356 continue;
357 }
358
359 if (!pipe->writers)
360 break;
361 if (ret)
362 break;
363 if (filp->f_flags & O_NONBLOCK) {
364 ret = -EAGAIN;
365 break;
366 }
367 if (signal_pending(current)) {
368 if (!ret)
369 ret = -ERESTARTSYS;
370 break;
371 }
372 __pipe_unlock(pipe);
373 if (was_full) {
374 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
375 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
376 }
377 wait_event_interruptible(pipe->wait, pipe_readable(pipe));
378 __pipe_lock(pipe);
379 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
380 }
381 __pipe_unlock(pipe);
382
383 if (was_full) {
384 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
385 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
386 }
387 if (ret > 0)
388 file_accessed(filp);
389 return ret;
390 }
391
392 static inline int is_packetized(struct file *file)
393 {
394 return (file->f_flags & O_DIRECT) != 0;
395 }
396
397 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
398 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
399 {
400 unsigned int head = READ_ONCE(pipe->head);
401 unsigned int tail = READ_ONCE(pipe->tail);
402 unsigned int max_usage = READ_ONCE(pipe->max_usage);
403
404 return !pipe_full(head, tail, max_usage) ||
405 !READ_ONCE(pipe->readers);
406 }
407
408 static ssize_t
409 pipe_write(struct kiocb *iocb, struct iov_iter *from)
410 {
411 struct file *filp = iocb->ki_filp;
412 struct pipe_inode_info *pipe = filp->private_data;
413 unsigned int head;
414 ssize_t ret = 0;
415 size_t total_len = iov_iter_count(from);
416 ssize_t chars;
417 bool was_empty = false;
418
419 /* Null write succeeds. */
420 if (unlikely(total_len == 0))
421 return 0;
422
423 __pipe_lock(pipe);
424
425 if (!pipe->readers) {
426 send_sig(SIGPIPE, current, 0);
427 ret = -EPIPE;
428 goto out;
429 }
430
431 /*
432 * Only wake up if the pipe started out empty, since
433 * otherwise there should be no readers waiting.
434 *
435 * If it wasn't empty we try to merge new data into
436 * the last buffer.
437 *
438 * That naturally merges small writes, but it also
439 * page-aligs the rest of the writes for large writes
440 * spanning multiple pages.
441 */
442 head = pipe->head;
443 was_empty = pipe_empty(head, pipe->tail);
444 chars = total_len & (PAGE_SIZE-1);
445 if (chars && !was_empty) {
446 unsigned int mask = pipe->ring_size - 1;
447 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
448 int offset = buf->offset + buf->len;
449
450 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
451 ret = pipe_buf_confirm(pipe, buf);
452 if (ret)
453 goto out;
454
455 ret = copy_page_from_iter(buf->page, offset, chars, from);
456 if (unlikely(ret < chars)) {
457 ret = -EFAULT;
458 goto out;
459 }
460
461 buf->len += ret;
462 if (!iov_iter_count(from))
463 goto out;
464 }
465 }
466
467 for (;;) {
468 if (!pipe->readers) {
469 send_sig(SIGPIPE, current, 0);
470 if (!ret)
471 ret = -EPIPE;
472 break;
473 }
474
475 head = pipe->head;
476 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
477 unsigned int mask = pipe->ring_size - 1;
478 struct pipe_buffer *buf = &pipe->bufs[head & mask];
479 struct page *page = pipe->tmp_page;
480 int copied;
481
482 if (!page) {
483 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
484 if (unlikely(!page)) {
485 ret = ret ? : -ENOMEM;
486 break;
487 }
488 pipe->tmp_page = page;
489 }
490
491 /* Allocate a slot in the ring in advance and attach an
492 * empty buffer. If we fault or otherwise fail to use
493 * it, either the reader will consume it or it'll still
494 * be there for the next write.
495 */
496 spin_lock_irq(&pipe->wait.lock);
497
498 head = pipe->head;
499 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
500 spin_unlock_irq(&pipe->wait.lock);
501 continue;
502 }
503
504 pipe->head = head + 1;
505 spin_unlock_irq(&pipe->wait.lock);
506
507 /* Insert it into the buffer array */
508 buf = &pipe->bufs[head & mask];
509 buf->page = page;
510 buf->ops = &anon_pipe_buf_ops;
511 buf->offset = 0;
512 buf->len = 0;
513 buf->flags = 0;
514 if (is_packetized(filp)) {
515 buf->ops = &packet_pipe_buf_ops;
516 buf->flags = PIPE_BUF_FLAG_PACKET;
517 }
518 pipe->tmp_page = NULL;
519
520 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
521 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
522 if (!ret)
523 ret = -EFAULT;
524 break;
525 }
526 ret += copied;
527 buf->offset = 0;
528 buf->len = copied;
529
530 if (!iov_iter_count(from))
531 break;
532 }
533
534 if (!pipe_full(head, pipe->tail, pipe->max_usage))
535 continue;
536
537 /* Wait for buffer space to become available. */
538 if (filp->f_flags & O_NONBLOCK) {
539 if (!ret)
540 ret = -EAGAIN;
541 break;
542 }
543 if (signal_pending(current)) {
544 if (!ret)
545 ret = -ERESTARTSYS;
546 break;
547 }
548
549 /*
550 * We're going to release the pipe lock and wait for more
551 * space. We wake up any readers if necessary, and then
552 * after waiting we need to re-check whether the pipe
553 * become empty while we dropped the lock.
554 */
555 __pipe_unlock(pipe);
556 if (was_empty) {
557 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
558 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
559 }
560 wait_event_interruptible(pipe->wait, pipe_writable(pipe));
561 __pipe_lock(pipe);
562 was_empty = pipe_empty(head, pipe->tail);
563 }
564 out:
565 __pipe_unlock(pipe);
566
567 /*
568 * If we do do a wakeup event, we do a 'sync' wakeup, because we
569 * want the reader to start processing things asap, rather than
570 * leave the data pending.
571 *
572 * This is particularly important for small writes, because of
573 * how (for example) the GNU make jobserver uses small writes to
574 * wake up pending jobs
575 */
576 if (was_empty) {
577 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
578 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
579 }
580 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
581 int err = file_update_time(filp);
582 if (err)
583 ret = err;
584 sb_end_write(file_inode(filp)->i_sb);
585 }
586 return ret;
587 }
588
589 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
590 {
591 struct pipe_inode_info *pipe = filp->private_data;
592 int count, head, tail, mask;
593
594 switch (cmd) {
595 case FIONREAD:
596 __pipe_lock(pipe);
597 count = 0;
598 head = pipe->head;
599 tail = pipe->tail;
600 mask = pipe->ring_size - 1;
601
602 while (tail != head) {
603 count += pipe->bufs[tail & mask].len;
604 tail++;
605 }
606 __pipe_unlock(pipe);
607
608 return put_user(count, (int __user *)arg);
609 default:
610 return -ENOIOCTLCMD;
611 }
612 }
613
614 /* No kernel lock held - fine */
615 static __poll_t
616 pipe_poll(struct file *filp, poll_table *wait)
617 {
618 __poll_t mask;
619 struct pipe_inode_info *pipe = filp->private_data;
620 unsigned int head, tail;
621
622 /*
623 * Reading only -- no need for acquiring the semaphore.
624 *
625 * But because this is racy, the code has to add the
626 * entry to the poll table _first_ ..
627 */
628 poll_wait(filp, &pipe->wait, wait);
629
630 /*
631 * .. and only then can you do the racy tests. That way,
632 * if something changes and you got it wrong, the poll
633 * table entry will wake you up and fix it.
634 */
635 head = READ_ONCE(pipe->head);
636 tail = READ_ONCE(pipe->tail);
637
638 mask = 0;
639 if (filp->f_mode & FMODE_READ) {
640 if (!pipe_empty(head, tail))
641 mask |= EPOLLIN | EPOLLRDNORM;
642 if (!pipe->writers && filp->f_version != pipe->w_counter)
643 mask |= EPOLLHUP;
644 }
645
646 if (filp->f_mode & FMODE_WRITE) {
647 if (!pipe_full(head, tail, pipe->max_usage))
648 mask |= EPOLLOUT | EPOLLWRNORM;
649 /*
650 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
651 * behave exactly like pipes for poll().
652 */
653 if (!pipe->readers)
654 mask |= EPOLLERR;
655 }
656
657 return mask;
658 }
659
660 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
661 {
662 int kill = 0;
663
664 spin_lock(&inode->i_lock);
665 if (!--pipe->files) {
666 inode->i_pipe = NULL;
667 kill = 1;
668 }
669 spin_unlock(&inode->i_lock);
670
671 if (kill)
672 free_pipe_info(pipe);
673 }
674
675 static int
676 pipe_release(struct inode *inode, struct file *file)
677 {
678 struct pipe_inode_info *pipe = file->private_data;
679
680 __pipe_lock(pipe);
681 if (file->f_mode & FMODE_READ)
682 pipe->readers--;
683 if (file->f_mode & FMODE_WRITE)
684 pipe->writers--;
685
686 if (pipe->readers || pipe->writers) {
687 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
688 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
689 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
690 }
691 __pipe_unlock(pipe);
692
693 put_pipe_info(inode, pipe);
694 return 0;
695 }
696
697 static int
698 pipe_fasync(int fd, struct file *filp, int on)
699 {
700 struct pipe_inode_info *pipe = filp->private_data;
701 int retval = 0;
702
703 __pipe_lock(pipe);
704 if (filp->f_mode & FMODE_READ)
705 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
706 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
707 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
708 if (retval < 0 && (filp->f_mode & FMODE_READ))
709 /* this can happen only if on == T */
710 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
711 }
712 __pipe_unlock(pipe);
713 return retval;
714 }
715
716 static unsigned long account_pipe_buffers(struct user_struct *user,
717 unsigned long old, unsigned long new)
718 {
719 return atomic_long_add_return(new - old, &user->pipe_bufs);
720 }
721
722 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
723 {
724 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
725
726 return soft_limit && user_bufs > soft_limit;
727 }
728
729 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
730 {
731 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
732
733 return hard_limit && user_bufs > hard_limit;
734 }
735
736 static bool is_unprivileged_user(void)
737 {
738 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
739 }
740
741 struct pipe_inode_info *alloc_pipe_info(void)
742 {
743 struct pipe_inode_info *pipe;
744 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
745 struct user_struct *user = get_current_user();
746 unsigned long user_bufs;
747 unsigned int max_size = READ_ONCE(pipe_max_size);
748
749 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
750 if (pipe == NULL)
751 goto out_free_uid;
752
753 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
754 pipe_bufs = max_size >> PAGE_SHIFT;
755
756 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
757
758 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
759 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
760 pipe_bufs = 1;
761 }
762
763 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
764 goto out_revert_acct;
765
766 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
767 GFP_KERNEL_ACCOUNT);
768
769 if (pipe->bufs) {
770 init_waitqueue_head(&pipe->wait);
771 pipe->r_counter = pipe->w_counter = 1;
772 pipe->max_usage = pipe_bufs;
773 pipe->ring_size = pipe_bufs;
774 pipe->user = user;
775 mutex_init(&pipe->mutex);
776 return pipe;
777 }
778
779 out_revert_acct:
780 (void) account_pipe_buffers(user, pipe_bufs, 0);
781 kfree(pipe);
782 out_free_uid:
783 free_uid(user);
784 return NULL;
785 }
786
787 void free_pipe_info(struct pipe_inode_info *pipe)
788 {
789 int i;
790
791 (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
792 free_uid(pipe->user);
793 for (i = 0; i < pipe->ring_size; i++) {
794 struct pipe_buffer *buf = pipe->bufs + i;
795 if (buf->ops)
796 pipe_buf_release(pipe, buf);
797 }
798 if (pipe->tmp_page)
799 __free_page(pipe->tmp_page);
800 kfree(pipe->bufs);
801 kfree(pipe);
802 }
803
804 static struct vfsmount *pipe_mnt __read_mostly;
805
806 /*
807 * pipefs_dname() is called from d_path().
808 */
809 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
810 {
811 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
812 d_inode(dentry)->i_ino);
813 }
814
815 static const struct dentry_operations pipefs_dentry_operations = {
816 .d_dname = pipefs_dname,
817 };
818
819 static struct inode * get_pipe_inode(void)
820 {
821 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
822 struct pipe_inode_info *pipe;
823
824 if (!inode)
825 goto fail_inode;
826
827 inode->i_ino = get_next_ino();
828
829 pipe = alloc_pipe_info();
830 if (!pipe)
831 goto fail_iput;
832
833 inode->i_pipe = pipe;
834 pipe->files = 2;
835 pipe->readers = pipe->writers = 1;
836 inode->i_fop = &pipefifo_fops;
837
838 /*
839 * Mark the inode dirty from the very beginning,
840 * that way it will never be moved to the dirty
841 * list because "mark_inode_dirty()" will think
842 * that it already _is_ on the dirty list.
843 */
844 inode->i_state = I_DIRTY;
845 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
846 inode->i_uid = current_fsuid();
847 inode->i_gid = current_fsgid();
848 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
849
850 return inode;
851
852 fail_iput:
853 iput(inode);
854
855 fail_inode:
856 return NULL;
857 }
858
859 int create_pipe_files(struct file **res, int flags)
860 {
861 struct inode *inode = get_pipe_inode();
862 struct file *f;
863
864 if (!inode)
865 return -ENFILE;
866
867 f = alloc_file_pseudo(inode, pipe_mnt, "",
868 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
869 &pipefifo_fops);
870 if (IS_ERR(f)) {
871 free_pipe_info(inode->i_pipe);
872 iput(inode);
873 return PTR_ERR(f);
874 }
875
876 f->private_data = inode->i_pipe;
877
878 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
879 &pipefifo_fops);
880 if (IS_ERR(res[0])) {
881 put_pipe_info(inode, inode->i_pipe);
882 fput(f);
883 return PTR_ERR(res[0]);
884 }
885 res[0]->private_data = inode->i_pipe;
886 res[1] = f;
887 stream_open(inode, res[0]);
888 stream_open(inode, res[1]);
889 return 0;
890 }
891
892 static int __do_pipe_flags(int *fd, struct file **files, int flags)
893 {
894 int error;
895 int fdw, fdr;
896
897 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
898 return -EINVAL;
899
900 error = create_pipe_files(files, flags);
901 if (error)
902 return error;
903
904 error = get_unused_fd_flags(flags);
905 if (error < 0)
906 goto err_read_pipe;
907 fdr = error;
908
909 error = get_unused_fd_flags(flags);
910 if (error < 0)
911 goto err_fdr;
912 fdw = error;
913
914 audit_fd_pair(fdr, fdw);
915 fd[0] = fdr;
916 fd[1] = fdw;
917 return 0;
918
919 err_fdr:
920 put_unused_fd(fdr);
921 err_read_pipe:
922 fput(files[0]);
923 fput(files[1]);
924 return error;
925 }
926
927 int do_pipe_flags(int *fd, int flags)
928 {
929 struct file *files[2];
930 int error = __do_pipe_flags(fd, files, flags);
931 if (!error) {
932 fd_install(fd[0], files[0]);
933 fd_install(fd[1], files[1]);
934 }
935 return error;
936 }
937
938 /*
939 * sys_pipe() is the normal C calling standard for creating
940 * a pipe. It's not the way Unix traditionally does this, though.
941 */
942 static int do_pipe2(int __user *fildes, int flags)
943 {
944 struct file *files[2];
945 int fd[2];
946 int error;
947
948 error = __do_pipe_flags(fd, files, flags);
949 if (!error) {
950 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
951 fput(files[0]);
952 fput(files[1]);
953 put_unused_fd(fd[0]);
954 put_unused_fd(fd[1]);
955 error = -EFAULT;
956 } else {
957 fd_install(fd[0], files[0]);
958 fd_install(fd[1], files[1]);
959 }
960 }
961 return error;
962 }
963
964 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
965 {
966 return do_pipe2(fildes, flags);
967 }
968
969 SYSCALL_DEFINE1(pipe, int __user *, fildes)
970 {
971 return do_pipe2(fildes, 0);
972 }
973
974 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
975 {
976 int cur = *cnt;
977
978 while (cur == *cnt) {
979 pipe_wait(pipe);
980 if (signal_pending(current))
981 break;
982 }
983 return cur == *cnt ? -ERESTARTSYS : 0;
984 }
985
986 static void wake_up_partner(struct pipe_inode_info *pipe)
987 {
988 wake_up_interruptible(&pipe->wait);
989 }
990
991 static int fifo_open(struct inode *inode, struct file *filp)
992 {
993 struct pipe_inode_info *pipe;
994 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
995 int ret;
996
997 filp->f_version = 0;
998
999 spin_lock(&inode->i_lock);
1000 if (inode->i_pipe) {
1001 pipe = inode->i_pipe;
1002 pipe->files++;
1003 spin_unlock(&inode->i_lock);
1004 } else {
1005 spin_unlock(&inode->i_lock);
1006 pipe = alloc_pipe_info();
1007 if (!pipe)
1008 return -ENOMEM;
1009 pipe->files = 1;
1010 spin_lock(&inode->i_lock);
1011 if (unlikely(inode->i_pipe)) {
1012 inode->i_pipe->files++;
1013 spin_unlock(&inode->i_lock);
1014 free_pipe_info(pipe);
1015 pipe = inode->i_pipe;
1016 } else {
1017 inode->i_pipe = pipe;
1018 spin_unlock(&inode->i_lock);
1019 }
1020 }
1021 filp->private_data = pipe;
1022 /* OK, we have a pipe and it's pinned down */
1023
1024 __pipe_lock(pipe);
1025
1026 /* We can only do regular read/write on fifos */
1027 stream_open(inode, filp);
1028
1029 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1030 case FMODE_READ:
1031 /*
1032 * O_RDONLY
1033 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1034 * opened, even when there is no process writing the FIFO.
1035 */
1036 pipe->r_counter++;
1037 if (pipe->readers++ == 0)
1038 wake_up_partner(pipe);
1039
1040 if (!is_pipe && !pipe->writers) {
1041 if ((filp->f_flags & O_NONBLOCK)) {
1042 /* suppress EPOLLHUP until we have
1043 * seen a writer */
1044 filp->f_version = pipe->w_counter;
1045 } else {
1046 if (wait_for_partner(pipe, &pipe->w_counter))
1047 goto err_rd;
1048 }
1049 }
1050 break;
1051
1052 case FMODE_WRITE:
1053 /*
1054 * O_WRONLY
1055 * POSIX.1 says that O_NONBLOCK means return -1 with
1056 * errno=ENXIO when there is no process reading the FIFO.
1057 */
1058 ret = -ENXIO;
1059 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1060 goto err;
1061
1062 pipe->w_counter++;
1063 if (!pipe->writers++)
1064 wake_up_partner(pipe);
1065
1066 if (!is_pipe && !pipe->readers) {
1067 if (wait_for_partner(pipe, &pipe->r_counter))
1068 goto err_wr;
1069 }
1070 break;
1071
1072 case FMODE_READ | FMODE_WRITE:
1073 /*
1074 * O_RDWR
1075 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1076 * This implementation will NEVER block on a O_RDWR open, since
1077 * the process can at least talk to itself.
1078 */
1079
1080 pipe->readers++;
1081 pipe->writers++;
1082 pipe->r_counter++;
1083 pipe->w_counter++;
1084 if (pipe->readers == 1 || pipe->writers == 1)
1085 wake_up_partner(pipe);
1086 break;
1087
1088 default:
1089 ret = -EINVAL;
1090 goto err;
1091 }
1092
1093 /* Ok! */
1094 __pipe_unlock(pipe);
1095 return 0;
1096
1097 err_rd:
1098 if (!--pipe->readers)
1099 wake_up_interruptible(&pipe->wait);
1100 ret = -ERESTARTSYS;
1101 goto err;
1102
1103 err_wr:
1104 if (!--pipe->writers)
1105 wake_up_interruptible(&pipe->wait);
1106 ret = -ERESTARTSYS;
1107 goto err;
1108
1109 err:
1110 __pipe_unlock(pipe);
1111
1112 put_pipe_info(inode, pipe);
1113 return ret;
1114 }
1115
1116 const struct file_operations pipefifo_fops = {
1117 .open = fifo_open,
1118 .llseek = no_llseek,
1119 .read_iter = pipe_read,
1120 .write_iter = pipe_write,
1121 .poll = pipe_poll,
1122 .unlocked_ioctl = pipe_ioctl,
1123 .release = pipe_release,
1124 .fasync = pipe_fasync,
1125 };
1126
1127 /*
1128 * Currently we rely on the pipe array holding a power-of-2 number
1129 * of pages. Returns 0 on error.
1130 */
1131 unsigned int round_pipe_size(unsigned long size)
1132 {
1133 if (size > (1U << 31))
1134 return 0;
1135
1136 /* Minimum pipe size, as required by POSIX */
1137 if (size < PAGE_SIZE)
1138 return PAGE_SIZE;
1139
1140 return roundup_pow_of_two(size);
1141 }
1142
1143 /*
1144 * Allocate a new array of pipe buffers and copy the info over. Returns the
1145 * pipe size if successful, or return -ERROR on error.
1146 */
1147 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1148 {
1149 struct pipe_buffer *bufs;
1150 unsigned int size, nr_slots, head, tail, mask, n;
1151 unsigned long user_bufs;
1152 long ret = 0;
1153
1154 size = round_pipe_size(arg);
1155 nr_slots = size >> PAGE_SHIFT;
1156
1157 if (!nr_slots)
1158 return -EINVAL;
1159
1160 /*
1161 * If trying to increase the pipe capacity, check that an
1162 * unprivileged user is not trying to exceed various limits
1163 * (soft limit check here, hard limit check just below).
1164 * Decreasing the pipe capacity is always permitted, even
1165 * if the user is currently over a limit.
1166 */
1167 if (nr_slots > pipe->ring_size &&
1168 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1169 return -EPERM;
1170
1171 user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1172
1173 if (nr_slots > pipe->ring_size &&
1174 (too_many_pipe_buffers_hard(user_bufs) ||
1175 too_many_pipe_buffers_soft(user_bufs)) &&
1176 is_unprivileged_user()) {
1177 ret = -EPERM;
1178 goto out_revert_acct;
1179 }
1180
1181 /*
1182 * We can shrink the pipe, if arg is greater than the ring occupancy.
1183 * Since we don't expect a lot of shrink+grow operations, just free and
1184 * allocate again like we would do for growing. If the pipe currently
1185 * contains more buffers than arg, then return busy.
1186 */
1187 mask = pipe->ring_size - 1;
1188 head = pipe->head;
1189 tail = pipe->tail;
1190 n = pipe_occupancy(pipe->head, pipe->tail);
1191 if (nr_slots < n) {
1192 ret = -EBUSY;
1193 goto out_revert_acct;
1194 }
1195
1196 bufs = kcalloc(nr_slots, sizeof(*bufs),
1197 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1198 if (unlikely(!bufs)) {
1199 ret = -ENOMEM;
1200 goto out_revert_acct;
1201 }
1202
1203 /*
1204 * The pipe array wraps around, so just start the new one at zero
1205 * and adjust the indices.
1206 */
1207 if (n > 0) {
1208 unsigned int h = head & mask;
1209 unsigned int t = tail & mask;
1210 if (h > t) {
1211 memcpy(bufs, pipe->bufs + t,
1212 n * sizeof(struct pipe_buffer));
1213 } else {
1214 unsigned int tsize = pipe->ring_size - t;
1215 if (h > 0)
1216 memcpy(bufs + tsize, pipe->bufs,
1217 h * sizeof(struct pipe_buffer));
1218 memcpy(bufs, pipe->bufs + t,
1219 tsize * sizeof(struct pipe_buffer));
1220 }
1221 }
1222
1223 head = n;
1224 tail = 0;
1225
1226 kfree(pipe->bufs);
1227 pipe->bufs = bufs;
1228 pipe->ring_size = nr_slots;
1229 pipe->max_usage = nr_slots;
1230 pipe->tail = tail;
1231 pipe->head = head;
1232 wake_up_interruptible_all(&pipe->wait);
1233 return pipe->max_usage * PAGE_SIZE;
1234
1235 out_revert_acct:
1236 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1237 return ret;
1238 }
1239
1240 /*
1241 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1242 * location, so checking ->i_pipe is not enough to verify that this is a
1243 * pipe.
1244 */
1245 struct pipe_inode_info *get_pipe_info(struct file *file)
1246 {
1247 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1248 }
1249
1250 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1251 {
1252 struct pipe_inode_info *pipe;
1253 long ret;
1254
1255 pipe = get_pipe_info(file);
1256 if (!pipe)
1257 return -EBADF;
1258
1259 __pipe_lock(pipe);
1260
1261 switch (cmd) {
1262 case F_SETPIPE_SZ:
1263 ret = pipe_set_size(pipe, arg);
1264 break;
1265 case F_GETPIPE_SZ:
1266 ret = pipe->max_usage * PAGE_SIZE;
1267 break;
1268 default:
1269 ret = -EINVAL;
1270 break;
1271 }
1272
1273 __pipe_unlock(pipe);
1274 return ret;
1275 }
1276
1277 static const struct super_operations pipefs_ops = {
1278 .destroy_inode = free_inode_nonrcu,
1279 .statfs = simple_statfs,
1280 };
1281
1282 /*
1283 * pipefs should _never_ be mounted by userland - too much of security hassle,
1284 * no real gain from having the whole whorehouse mounted. So we don't need
1285 * any operations on the root directory. However, we need a non-trivial
1286 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1287 */
1288
1289 static int pipefs_init_fs_context(struct fs_context *fc)
1290 {
1291 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1292 if (!ctx)
1293 return -ENOMEM;
1294 ctx->ops = &pipefs_ops;
1295 ctx->dops = &pipefs_dentry_operations;
1296 return 0;
1297 }
1298
1299 static struct file_system_type pipe_fs_type = {
1300 .name = "pipefs",
1301 .init_fs_context = pipefs_init_fs_context,
1302 .kill_sb = kill_anon_super,
1303 };
1304
1305 static int __init init_pipe_fs(void)
1306 {
1307 int err = register_filesystem(&pipe_fs_type);
1308
1309 if (!err) {
1310 pipe_mnt = kern_mount(&pipe_fs_type);
1311 if (IS_ERR(pipe_mnt)) {
1312 err = PTR_ERR(pipe_mnt);
1313 unregister_filesystem(&pipe_fs_type);
1314 }
1315 }
1316 return err;
1317 }
1318
1319 fs_initcall(init_pipe_fs);