1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
29 #include <linux/uaccess.h>
30 #include <asm/ioctls.h>
35 * New pipe buffers will be restricted to this size while the user is exceeding
36 * their pipe buffer quota. The general pipe use case needs at least two
37 * buffers: one for data yet to be read, and one for new data. If this is less
38 * than two, then a write to a non-empty pipe may block even if the pipe is not
39 * full. This can occur with GNU make jobserver or similar uses of pipes as
40 * semaphores: multiple processes may be waiting to write tokens back to the
41 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
47 #define PIPE_MIN_DEF_BUFFERS 2
50 * The max size that a non-root user is allowed to grow the pipe. Can
51 * be set by root in /proc/sys/fs/pipe-max-size
53 unsigned int pipe_max_size
= 1048576;
55 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56 * matches default values.
58 unsigned long pipe_user_pages_hard
;
59 unsigned long pipe_user_pages_soft
= PIPE_DEF_BUFFERS
* INR_OPEN_CUR
;
62 * We use head and tail indices that aren't masked off, except at the point of
63 * dereference, but rather they're allowed to wrap naturally. This means there
64 * isn't a dead spot in the buffer, but the ring has to be a power of two and
66 * -- David Howells 2019-09-23.
68 * Reads with count = 0 should always return 0.
69 * -- Julian Bradfield 1999-06-07.
71 * FIFOs and Pipes now generate SIGIO for both readers and writers.
72 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74 * pipe_read & write cleanup
75 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
78 static void pipe_lock_nested(struct pipe_inode_info
*pipe
, int subclass
)
81 mutex_lock_nested(&pipe
->mutex
, subclass
);
84 void pipe_lock(struct pipe_inode_info
*pipe
)
87 * pipe_lock() nests non-pipe inode locks (for writing to a file)
89 pipe_lock_nested(pipe
, I_MUTEX_PARENT
);
91 EXPORT_SYMBOL(pipe_lock
);
93 void pipe_unlock(struct pipe_inode_info
*pipe
)
96 mutex_unlock(&pipe
->mutex
);
98 EXPORT_SYMBOL(pipe_unlock
);
100 static inline void __pipe_lock(struct pipe_inode_info
*pipe
)
102 mutex_lock_nested(&pipe
->mutex
, I_MUTEX_PARENT
);
105 static inline void __pipe_unlock(struct pipe_inode_info
*pipe
)
107 mutex_unlock(&pipe
->mutex
);
110 void pipe_double_lock(struct pipe_inode_info
*pipe1
,
111 struct pipe_inode_info
*pipe2
)
113 BUG_ON(pipe1
== pipe2
);
116 pipe_lock_nested(pipe1
, I_MUTEX_PARENT
);
117 pipe_lock_nested(pipe2
, I_MUTEX_CHILD
);
119 pipe_lock_nested(pipe2
, I_MUTEX_PARENT
);
120 pipe_lock_nested(pipe1
, I_MUTEX_CHILD
);
124 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
125 struct pipe_buffer
*buf
)
127 struct page
*page
= buf
->page
;
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
134 if (page_count(page
) == 1 && !pipe
->tmp_page
)
135 pipe
->tmp_page
= page
;
140 static bool anon_pipe_buf_try_steal(struct pipe_inode_info
*pipe
,
141 struct pipe_buffer
*buf
)
143 struct page
*page
= buf
->page
;
145 if (page_count(page
) != 1)
147 memcg_kmem_uncharge_page(page
, 0);
148 __SetPageLocked(page
);
153 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154 * @pipe: the pipe that the buffer belongs to
155 * @buf: the buffer to attempt to steal
158 * This function attempts to steal the &struct page attached to
159 * @buf. If successful, this function returns 0 and returns with
160 * the page locked. The caller may then reuse the page for whatever
161 * he wishes; the typical use is insertion into a different file
164 bool generic_pipe_buf_try_steal(struct pipe_inode_info
*pipe
,
165 struct pipe_buffer
*buf
)
167 struct page
*page
= buf
->page
;
170 * A reference of one is golden, that means that the owner of this
171 * page is the only one holding a reference to it. lock the page
174 if (page_count(page
) == 1) {
180 EXPORT_SYMBOL(generic_pipe_buf_try_steal
);
183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184 * @pipe: the pipe that the buffer belongs to
185 * @buf: the buffer to get a reference to
188 * This function grabs an extra reference to @buf. It's used in
189 * the tee() system call, when we duplicate the buffers in one
192 bool generic_pipe_buf_get(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
)
194 return try_get_page(buf
->page
);
196 EXPORT_SYMBOL(generic_pipe_buf_get
);
199 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200 * @pipe: the pipe that the buffer belongs to
201 * @buf: the buffer to put a reference to
204 * This function releases a reference to @buf.
206 void generic_pipe_buf_release(struct pipe_inode_info
*pipe
,
207 struct pipe_buffer
*buf
)
211 EXPORT_SYMBOL(generic_pipe_buf_release
);
213 static const struct pipe_buf_operations anon_pipe_buf_ops
= {
214 .release
= anon_pipe_buf_release
,
215 .try_steal
= anon_pipe_buf_try_steal
,
216 .get
= generic_pipe_buf_get
,
219 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
220 static inline bool pipe_readable(const struct pipe_inode_info
*pipe
)
222 unsigned int head
= READ_ONCE(pipe
->head
);
223 unsigned int tail
= READ_ONCE(pipe
->tail
);
224 unsigned int writers
= READ_ONCE(pipe
->writers
);
226 return !pipe_empty(head
, tail
) || !writers
;
230 pipe_read(struct kiocb
*iocb
, struct iov_iter
*to
)
232 size_t total_len
= iov_iter_count(to
);
233 struct file
*filp
= iocb
->ki_filp
;
234 struct pipe_inode_info
*pipe
= filp
->private_data
;
235 bool was_full
, wake_next_reader
= false;
238 /* Null read succeeds. */
239 if (unlikely(total_len
== 0))
246 * We only wake up writers if the pipe was full when we started
247 * reading in order to avoid unnecessary wakeups.
249 * But when we do wake up writers, we do so using a sync wakeup
250 * (WF_SYNC), because we want them to get going and generate more
253 was_full
= pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
);
255 /* Read ->head with a barrier vs post_one_notification() */
256 unsigned int head
= smp_load_acquire(&pipe
->head
);
257 unsigned int tail
= pipe
->tail
;
258 unsigned int mask
= pipe
->ring_size
- 1;
260 #ifdef CONFIG_WATCH_QUEUE
261 if (pipe
->note_loss
) {
262 struct watch_notification n
;
270 n
.type
= WATCH_TYPE_META
;
271 n
.subtype
= WATCH_META_LOSS_NOTIFICATION
;
272 n
.info
= watch_sizeof(n
);
273 if (copy_to_iter(&n
, sizeof(n
), to
) != sizeof(n
)) {
279 total_len
-= sizeof(n
);
280 pipe
->note_loss
= false;
284 if (!pipe_empty(head
, tail
)) {
285 struct pipe_buffer
*buf
= &pipe
->bufs
[tail
& mask
];
286 size_t chars
= buf
->len
;
290 if (chars
> total_len
) {
291 if (buf
->flags
& PIPE_BUF_FLAG_WHOLE
) {
299 error
= pipe_buf_confirm(pipe
, buf
);
306 written
= copy_page_to_iter(buf
->page
, buf
->offset
, chars
, to
);
307 if (unlikely(written
< chars
)) {
313 buf
->offset
+= chars
;
316 /* Was it a packet buffer? Clean up and exit */
317 if (buf
->flags
& PIPE_BUF_FLAG_PACKET
) {
323 pipe_buf_release(pipe
, buf
);
324 spin_lock_irq(&pipe
->rd_wait
.lock
);
325 #ifdef CONFIG_WATCH_QUEUE
326 if (buf
->flags
& PIPE_BUF_FLAG_LOSS
)
327 pipe
->note_loss
= true;
331 spin_unlock_irq(&pipe
->rd_wait
.lock
);
335 break; /* common path: read succeeded */
336 if (!pipe_empty(head
, tail
)) /* More to do? */
344 if (filp
->f_flags
& O_NONBLOCK
) {
351 * We only get here if we didn't actually read anything.
353 * However, we could have seen (and removed) a zero-sized
354 * pipe buffer, and might have made space in the buffers
357 * You can't make zero-sized pipe buffers by doing an empty
358 * write (not even in packet mode), but they can happen if
359 * the writer gets an EFAULT when trying to fill a buffer
360 * that already got allocated and inserted in the buffer
363 * So we still need to wake up any pending writers in the
364 * _very_ unlikely case that the pipe was full, but we got
367 if (unlikely(was_full
))
368 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
369 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
372 * But because we didn't read anything, at this point we can
373 * just return directly with -ERESTARTSYS if we're interrupted,
374 * since we've done any required wakeups and there's no need
375 * to mark anything accessed. And we've dropped the lock.
377 if (wait_event_interruptible_exclusive(pipe
->rd_wait
, pipe_readable(pipe
)) < 0)
381 was_full
= pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
);
382 wake_next_reader
= true;
384 if (pipe_empty(pipe
->head
, pipe
->tail
))
385 wake_next_reader
= false;
389 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
390 if (wake_next_reader
)
391 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
392 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
398 static inline int is_packetized(struct file
*file
)
400 return (file
->f_flags
& O_DIRECT
) != 0;
403 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
404 static inline bool pipe_writable(const struct pipe_inode_info
*pipe
)
406 unsigned int head
= READ_ONCE(pipe
->head
);
407 unsigned int tail
= READ_ONCE(pipe
->tail
);
408 unsigned int max_usage
= READ_ONCE(pipe
->max_usage
);
410 return !pipe_full(head
, tail
, max_usage
) ||
411 !READ_ONCE(pipe
->readers
);
415 pipe_write(struct kiocb
*iocb
, struct iov_iter
*from
)
417 struct file
*filp
= iocb
->ki_filp
;
418 struct pipe_inode_info
*pipe
= filp
->private_data
;
421 size_t total_len
= iov_iter_count(from
);
423 bool was_empty
= false;
424 bool wake_next_writer
= false;
426 /* Null write succeeds. */
427 if (unlikely(total_len
== 0))
432 if (!pipe
->readers
) {
433 send_sig(SIGPIPE
, current
, 0);
438 #ifdef CONFIG_WATCH_QUEUE
439 if (pipe
->watch_queue
) {
446 * If it wasn't empty we try to merge new data into
449 * That naturally merges small writes, but it also
450 * page-aligns the rest of the writes for large writes
451 * spanning multiple pages.
454 was_empty
= pipe_empty(head
, pipe
->tail
);
455 chars
= total_len
& (PAGE_SIZE
-1);
456 if (chars
&& !was_empty
) {
457 unsigned int mask
= pipe
->ring_size
- 1;
458 struct pipe_buffer
*buf
= &pipe
->bufs
[(head
- 1) & mask
];
459 int offset
= buf
->offset
+ buf
->len
;
461 if ((buf
->flags
& PIPE_BUF_FLAG_CAN_MERGE
) &&
462 offset
+ chars
<= PAGE_SIZE
) {
463 ret
= pipe_buf_confirm(pipe
, buf
);
467 ret
= copy_page_from_iter(buf
->page
, offset
, chars
, from
);
468 if (unlikely(ret
< chars
)) {
474 if (!iov_iter_count(from
))
480 if (!pipe
->readers
) {
481 send_sig(SIGPIPE
, current
, 0);
488 if (!pipe_full(head
, pipe
->tail
, pipe
->max_usage
)) {
489 unsigned int mask
= pipe
->ring_size
- 1;
490 struct pipe_buffer
*buf
= &pipe
->bufs
[head
& mask
];
491 struct page
*page
= pipe
->tmp_page
;
495 page
= alloc_page(GFP_HIGHUSER
| __GFP_ACCOUNT
);
496 if (unlikely(!page
)) {
497 ret
= ret
? : -ENOMEM
;
500 pipe
->tmp_page
= page
;
503 /* Allocate a slot in the ring in advance and attach an
504 * empty buffer. If we fault or otherwise fail to use
505 * it, either the reader will consume it or it'll still
506 * be there for the next write.
508 spin_lock_irq(&pipe
->rd_wait
.lock
);
511 if (pipe_full(head
, pipe
->tail
, pipe
->max_usage
)) {
512 spin_unlock_irq(&pipe
->rd_wait
.lock
);
516 pipe
->head
= head
+ 1;
517 spin_unlock_irq(&pipe
->rd_wait
.lock
);
519 /* Insert it into the buffer array */
520 buf
= &pipe
->bufs
[head
& mask
];
522 buf
->ops
= &anon_pipe_buf_ops
;
525 if (is_packetized(filp
))
526 buf
->flags
= PIPE_BUF_FLAG_PACKET
;
528 buf
->flags
= PIPE_BUF_FLAG_CAN_MERGE
;
529 pipe
->tmp_page
= NULL
;
531 copied
= copy_page_from_iter(page
, 0, PAGE_SIZE
, from
);
532 if (unlikely(copied
< PAGE_SIZE
&& iov_iter_count(from
))) {
541 if (!iov_iter_count(from
))
545 if (!pipe_full(head
, pipe
->tail
, pipe
->max_usage
))
548 /* Wait for buffer space to become available. */
549 if (filp
->f_flags
& O_NONBLOCK
) {
554 if (signal_pending(current
)) {
561 * We're going to release the pipe lock and wait for more
562 * space. We wake up any readers if necessary, and then
563 * after waiting we need to re-check whether the pipe
564 * become empty while we dropped the lock.
568 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
569 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
570 wait_event_interruptible_exclusive(pipe
->wr_wait
, pipe_writable(pipe
));
572 was_empty
= pipe_empty(pipe
->head
, pipe
->tail
);
573 wake_next_writer
= true;
576 if (pipe_full(pipe
->head
, pipe
->tail
, pipe
->max_usage
))
577 wake_next_writer
= false;
581 * If we do do a wakeup event, we do a 'sync' wakeup, because we
582 * want the reader to start processing things asap, rather than
583 * leave the data pending.
585 * This is particularly important for small writes, because of
586 * how (for example) the GNU make jobserver uses small writes to
587 * wake up pending jobs
589 * Epoll nonsensically wants a wakeup whether the pipe
590 * was already empty or not.
592 if (was_empty
|| pipe
->poll_usage
)
593 wake_up_interruptible_sync_poll(&pipe
->rd_wait
, EPOLLIN
| EPOLLRDNORM
);
594 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
595 if (wake_next_writer
)
596 wake_up_interruptible_sync_poll(&pipe
->wr_wait
, EPOLLOUT
| EPOLLWRNORM
);
597 if (ret
> 0 && sb_start_write_trylock(file_inode(filp
)->i_sb
)) {
598 int err
= file_update_time(filp
);
601 sb_end_write(file_inode(filp
)->i_sb
);
606 static long pipe_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
608 struct pipe_inode_info
*pipe
= filp
->private_data
;
609 int count
, head
, tail
, mask
;
617 mask
= pipe
->ring_size
- 1;
619 while (tail
!= head
) {
620 count
+= pipe
->bufs
[tail
& mask
].len
;
625 return put_user(count
, (int __user
*)arg
);
627 #ifdef CONFIG_WATCH_QUEUE
628 case IOC_WATCH_QUEUE_SET_SIZE
: {
631 ret
= watch_queue_set_size(pipe
, arg
);
636 case IOC_WATCH_QUEUE_SET_FILTER
:
637 return watch_queue_set_filter(
638 pipe
, (struct watch_notification_filter __user
*)arg
);
646 /* No kernel lock held - fine */
648 pipe_poll(struct file
*filp
, poll_table
*wait
)
651 struct pipe_inode_info
*pipe
= filp
->private_data
;
652 unsigned int head
, tail
;
654 /* Epoll has some historical nasty semantics, this enables them */
655 WRITE_ONCE(pipe
->poll_usage
, true);
658 * Reading pipe state only -- no need for acquiring the semaphore.
660 * But because this is racy, the code has to add the
661 * entry to the poll table _first_ ..
663 if (filp
->f_mode
& FMODE_READ
)
664 poll_wait(filp
, &pipe
->rd_wait
, wait
);
665 if (filp
->f_mode
& FMODE_WRITE
)
666 poll_wait(filp
, &pipe
->wr_wait
, wait
);
669 * .. and only then can you do the racy tests. That way,
670 * if something changes and you got it wrong, the poll
671 * table entry will wake you up and fix it.
673 head
= READ_ONCE(pipe
->head
);
674 tail
= READ_ONCE(pipe
->tail
);
677 if (filp
->f_mode
& FMODE_READ
) {
678 if (!pipe_empty(head
, tail
))
679 mask
|= EPOLLIN
| EPOLLRDNORM
;
680 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
684 if (filp
->f_mode
& FMODE_WRITE
) {
685 if (!pipe_full(head
, tail
, pipe
->max_usage
))
686 mask
|= EPOLLOUT
| EPOLLWRNORM
;
688 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
689 * behave exactly like pipes for poll().
698 static void put_pipe_info(struct inode
*inode
, struct pipe_inode_info
*pipe
)
702 spin_lock(&inode
->i_lock
);
703 if (!--pipe
->files
) {
704 inode
->i_pipe
= NULL
;
707 spin_unlock(&inode
->i_lock
);
710 free_pipe_info(pipe
);
714 pipe_release(struct inode
*inode
, struct file
*file
)
716 struct pipe_inode_info
*pipe
= file
->private_data
;
719 if (file
->f_mode
& FMODE_READ
)
721 if (file
->f_mode
& FMODE_WRITE
)
724 /* Was that the last reader or writer, but not the other side? */
725 if (!pipe
->readers
!= !pipe
->writers
) {
726 wake_up_interruptible_all(&pipe
->rd_wait
);
727 wake_up_interruptible_all(&pipe
->wr_wait
);
728 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
729 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
733 put_pipe_info(inode
, pipe
);
738 pipe_fasync(int fd
, struct file
*filp
, int on
)
740 struct pipe_inode_info
*pipe
= filp
->private_data
;
744 if (filp
->f_mode
& FMODE_READ
)
745 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
746 if ((filp
->f_mode
& FMODE_WRITE
) && retval
>= 0) {
747 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
748 if (retval
< 0 && (filp
->f_mode
& FMODE_READ
))
749 /* this can happen only if on == T */
750 fasync_helper(-1, filp
, 0, &pipe
->fasync_readers
);
756 unsigned long account_pipe_buffers(struct user_struct
*user
,
757 unsigned long old
, unsigned long new)
759 return atomic_long_add_return(new - old
, &user
->pipe_bufs
);
762 bool too_many_pipe_buffers_soft(unsigned long user_bufs
)
764 unsigned long soft_limit
= READ_ONCE(pipe_user_pages_soft
);
766 return soft_limit
&& user_bufs
> soft_limit
;
769 bool too_many_pipe_buffers_hard(unsigned long user_bufs
)
771 unsigned long hard_limit
= READ_ONCE(pipe_user_pages_hard
);
773 return hard_limit
&& user_bufs
> hard_limit
;
776 bool pipe_is_unprivileged_user(void)
778 return !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
);
781 struct pipe_inode_info
*alloc_pipe_info(void)
783 struct pipe_inode_info
*pipe
;
784 unsigned long pipe_bufs
= PIPE_DEF_BUFFERS
;
785 struct user_struct
*user
= get_current_user();
786 unsigned long user_bufs
;
787 unsigned int max_size
= READ_ONCE(pipe_max_size
);
789 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL_ACCOUNT
);
793 if (pipe_bufs
* PAGE_SIZE
> max_size
&& !capable(CAP_SYS_RESOURCE
))
794 pipe_bufs
= max_size
>> PAGE_SHIFT
;
796 user_bufs
= account_pipe_buffers(user
, 0, pipe_bufs
);
798 if (too_many_pipe_buffers_soft(user_bufs
) && pipe_is_unprivileged_user()) {
799 user_bufs
= account_pipe_buffers(user
, pipe_bufs
, PIPE_MIN_DEF_BUFFERS
);
800 pipe_bufs
= PIPE_MIN_DEF_BUFFERS
;
803 if (too_many_pipe_buffers_hard(user_bufs
) && pipe_is_unprivileged_user())
804 goto out_revert_acct
;
806 pipe
->bufs
= kcalloc(pipe_bufs
, sizeof(struct pipe_buffer
),
810 init_waitqueue_head(&pipe
->rd_wait
);
811 init_waitqueue_head(&pipe
->wr_wait
);
812 pipe
->r_counter
= pipe
->w_counter
= 1;
813 pipe
->max_usage
= pipe_bufs
;
814 pipe
->ring_size
= pipe_bufs
;
815 pipe
->nr_accounted
= pipe_bufs
;
817 mutex_init(&pipe
->mutex
);
822 (void) account_pipe_buffers(user
, pipe_bufs
, 0);
829 void free_pipe_info(struct pipe_inode_info
*pipe
)
833 #ifdef CONFIG_WATCH_QUEUE
834 if (pipe
->watch_queue
)
835 watch_queue_clear(pipe
->watch_queue
);
838 (void) account_pipe_buffers(pipe
->user
, pipe
->nr_accounted
, 0);
839 free_uid(pipe
->user
);
840 for (i
= 0; i
< pipe
->ring_size
; i
++) {
841 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
843 pipe_buf_release(pipe
, buf
);
845 #ifdef CONFIG_WATCH_QUEUE
846 if (pipe
->watch_queue
)
847 put_watch_queue(pipe
->watch_queue
);
850 __free_page(pipe
->tmp_page
);
855 static struct vfsmount
*pipe_mnt __read_mostly
;
858 * pipefs_dname() is called from d_path().
860 static char *pipefs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
862 return dynamic_dname(dentry
, buffer
, buflen
, "pipe:[%lu]",
863 d_inode(dentry
)->i_ino
);
866 static const struct dentry_operations pipefs_dentry_operations
= {
867 .d_dname
= pipefs_dname
,
870 static struct inode
* get_pipe_inode(void)
872 struct inode
*inode
= new_inode_pseudo(pipe_mnt
->mnt_sb
);
873 struct pipe_inode_info
*pipe
;
878 inode
->i_ino
= get_next_ino();
880 pipe
= alloc_pipe_info();
884 inode
->i_pipe
= pipe
;
886 pipe
->readers
= pipe
->writers
= 1;
887 inode
->i_fop
= &pipefifo_fops
;
890 * Mark the inode dirty from the very beginning,
891 * that way it will never be moved to the dirty
892 * list because "mark_inode_dirty()" will think
893 * that it already _is_ on the dirty list.
895 inode
->i_state
= I_DIRTY
;
896 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
897 inode
->i_uid
= current_fsuid();
898 inode
->i_gid
= current_fsgid();
899 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
910 int create_pipe_files(struct file
**res
, int flags
)
912 struct inode
*inode
= get_pipe_inode();
919 if (flags
& O_NOTIFICATION_PIPE
) {
920 error
= watch_queue_init(inode
->i_pipe
);
922 free_pipe_info(inode
->i_pipe
);
928 f
= alloc_file_pseudo(inode
, pipe_mnt
, "",
929 O_WRONLY
| (flags
& (O_NONBLOCK
| O_DIRECT
)),
932 free_pipe_info(inode
->i_pipe
);
937 f
->private_data
= inode
->i_pipe
;
939 res
[0] = alloc_file_clone(f
, O_RDONLY
| (flags
& O_NONBLOCK
),
941 if (IS_ERR(res
[0])) {
942 put_pipe_info(inode
, inode
->i_pipe
);
944 return PTR_ERR(res
[0]);
946 res
[0]->private_data
= inode
->i_pipe
;
948 stream_open(inode
, res
[0]);
949 stream_open(inode
, res
[1]);
953 static int __do_pipe_flags(int *fd
, struct file
**files
, int flags
)
958 if (flags
& ~(O_CLOEXEC
| O_NONBLOCK
| O_DIRECT
| O_NOTIFICATION_PIPE
))
961 error
= create_pipe_files(files
, flags
);
965 error
= get_unused_fd_flags(flags
);
970 error
= get_unused_fd_flags(flags
);
975 audit_fd_pair(fdr
, fdw
);
988 int do_pipe_flags(int *fd
, int flags
)
990 struct file
*files
[2];
991 int error
= __do_pipe_flags(fd
, files
, flags
);
993 fd_install(fd
[0], files
[0]);
994 fd_install(fd
[1], files
[1]);
1000 * sys_pipe() is the normal C calling standard for creating
1001 * a pipe. It's not the way Unix traditionally does this, though.
1003 static int do_pipe2(int __user
*fildes
, int flags
)
1005 struct file
*files
[2];
1009 error
= __do_pipe_flags(fd
, files
, flags
);
1011 if (unlikely(copy_to_user(fildes
, fd
, sizeof(fd
)))) {
1014 put_unused_fd(fd
[0]);
1015 put_unused_fd(fd
[1]);
1018 fd_install(fd
[0], files
[0]);
1019 fd_install(fd
[1], files
[1]);
1025 SYSCALL_DEFINE2(pipe2
, int __user
*, fildes
, int, flags
)
1027 return do_pipe2(fildes
, flags
);
1030 SYSCALL_DEFINE1(pipe
, int __user
*, fildes
)
1032 return do_pipe2(fildes
, 0);
1036 * This is the stupid "wait for pipe to be readable or writable"
1039 * See pipe_read/write() for the proper kind of exclusive wait,
1040 * but that requires that we wake up any other readers/writers
1041 * if we then do not end up reading everything (ie the whole
1042 * "wake_next_reader/writer" logic in pipe_read/write()).
1044 void pipe_wait_readable(struct pipe_inode_info
*pipe
)
1047 wait_event_interruptible(pipe
->rd_wait
, pipe_readable(pipe
));
1051 void pipe_wait_writable(struct pipe_inode_info
*pipe
)
1054 wait_event_interruptible(pipe
->wr_wait
, pipe_writable(pipe
));
1059 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1060 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1061 * race with the count check and waitqueue prep.
1063 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1064 * then check the condition you're waiting for, and only then sleep. But
1065 * because of the pipe lock, we can check the condition before being on
1068 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1070 static int wait_for_partner(struct pipe_inode_info
*pipe
, unsigned int *cnt
)
1072 DEFINE_WAIT(rdwait
);
1075 while (cur
== *cnt
) {
1076 prepare_to_wait(&pipe
->rd_wait
, &rdwait
, TASK_INTERRUPTIBLE
);
1079 finish_wait(&pipe
->rd_wait
, &rdwait
);
1081 if (signal_pending(current
))
1084 return cur
== *cnt
? -ERESTARTSYS
: 0;
1087 static void wake_up_partner(struct pipe_inode_info
*pipe
)
1089 wake_up_interruptible_all(&pipe
->rd_wait
);
1092 static int fifo_open(struct inode
*inode
, struct file
*filp
)
1094 struct pipe_inode_info
*pipe
;
1095 bool is_pipe
= inode
->i_sb
->s_magic
== PIPEFS_MAGIC
;
1098 filp
->f_version
= 0;
1100 spin_lock(&inode
->i_lock
);
1101 if (inode
->i_pipe
) {
1102 pipe
= inode
->i_pipe
;
1104 spin_unlock(&inode
->i_lock
);
1106 spin_unlock(&inode
->i_lock
);
1107 pipe
= alloc_pipe_info();
1111 spin_lock(&inode
->i_lock
);
1112 if (unlikely(inode
->i_pipe
)) {
1113 inode
->i_pipe
->files
++;
1114 spin_unlock(&inode
->i_lock
);
1115 free_pipe_info(pipe
);
1116 pipe
= inode
->i_pipe
;
1118 inode
->i_pipe
= pipe
;
1119 spin_unlock(&inode
->i_lock
);
1122 filp
->private_data
= pipe
;
1123 /* OK, we have a pipe and it's pinned down */
1127 /* We can only do regular read/write on fifos */
1128 stream_open(inode
, filp
);
1130 switch (filp
->f_mode
& (FMODE_READ
| FMODE_WRITE
)) {
1134 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1135 * opened, even when there is no process writing the FIFO.
1138 if (pipe
->readers
++ == 0)
1139 wake_up_partner(pipe
);
1141 if (!is_pipe
&& !pipe
->writers
) {
1142 if ((filp
->f_flags
& O_NONBLOCK
)) {
1143 /* suppress EPOLLHUP until we have
1145 filp
->f_version
= pipe
->w_counter
;
1147 if (wait_for_partner(pipe
, &pipe
->w_counter
))
1156 * POSIX.1 says that O_NONBLOCK means return -1 with
1157 * errno=ENXIO when there is no process reading the FIFO.
1160 if (!is_pipe
&& (filp
->f_flags
& O_NONBLOCK
) && !pipe
->readers
)
1164 if (!pipe
->writers
++)
1165 wake_up_partner(pipe
);
1167 if (!is_pipe
&& !pipe
->readers
) {
1168 if (wait_for_partner(pipe
, &pipe
->r_counter
))
1173 case FMODE_READ
| FMODE_WRITE
:
1176 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1177 * This implementation will NEVER block on a O_RDWR open, since
1178 * the process can at least talk to itself.
1185 if (pipe
->readers
== 1 || pipe
->writers
== 1)
1186 wake_up_partner(pipe
);
1195 __pipe_unlock(pipe
);
1199 if (!--pipe
->readers
)
1200 wake_up_interruptible(&pipe
->wr_wait
);
1205 if (!--pipe
->writers
)
1206 wake_up_interruptible_all(&pipe
->rd_wait
);
1211 __pipe_unlock(pipe
);
1213 put_pipe_info(inode
, pipe
);
1217 const struct file_operations pipefifo_fops
= {
1219 .llseek
= no_llseek
,
1220 .read_iter
= pipe_read
,
1221 .write_iter
= pipe_write
,
1223 .unlocked_ioctl
= pipe_ioctl
,
1224 .release
= pipe_release
,
1225 .fasync
= pipe_fasync
,
1226 .splice_write
= iter_file_splice_write
,
1230 * Currently we rely on the pipe array holding a power-of-2 number
1231 * of pages. Returns 0 on error.
1233 unsigned int round_pipe_size(unsigned long size
)
1235 if (size
> (1U << 31))
1238 /* Minimum pipe size, as required by POSIX */
1239 if (size
< PAGE_SIZE
)
1242 return roundup_pow_of_two(size
);
1246 * Resize the pipe ring to a number of slots.
1248 * Note the pipe can be reduced in capacity, but only if the current
1249 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1252 int pipe_resize_ring(struct pipe_inode_info
*pipe
, unsigned int nr_slots
)
1254 struct pipe_buffer
*bufs
;
1255 unsigned int head
, tail
, mask
, n
;
1257 bufs
= kcalloc(nr_slots
, sizeof(*bufs
),
1258 GFP_KERNEL_ACCOUNT
| __GFP_NOWARN
);
1259 if (unlikely(!bufs
))
1262 spin_lock_irq(&pipe
->rd_wait
.lock
);
1263 mask
= pipe
->ring_size
- 1;
1267 n
= pipe_occupancy(head
, tail
);
1269 spin_unlock_irq(&pipe
->rd_wait
.lock
);
1275 * The pipe array wraps around, so just start the new one at zero
1276 * and adjust the indices.
1279 unsigned int h
= head
& mask
;
1280 unsigned int t
= tail
& mask
;
1282 memcpy(bufs
, pipe
->bufs
+ t
,
1283 n
* sizeof(struct pipe_buffer
));
1285 unsigned int tsize
= pipe
->ring_size
- t
;
1287 memcpy(bufs
+ tsize
, pipe
->bufs
,
1288 h
* sizeof(struct pipe_buffer
));
1289 memcpy(bufs
, pipe
->bufs
+ t
,
1290 tsize
* sizeof(struct pipe_buffer
));
1299 pipe
->ring_size
= nr_slots
;
1300 if (pipe
->max_usage
> nr_slots
)
1301 pipe
->max_usage
= nr_slots
;
1305 spin_unlock_irq(&pipe
->rd_wait
.lock
);
1307 /* This might have made more room for writers */
1308 wake_up_interruptible(&pipe
->wr_wait
);
1313 * Allocate a new array of pipe buffers and copy the info over. Returns the
1314 * pipe size if successful, or return -ERROR on error.
1316 static long pipe_set_size(struct pipe_inode_info
*pipe
, unsigned long arg
)
1318 unsigned long user_bufs
;
1319 unsigned int nr_slots
, size
;
1322 #ifdef CONFIG_WATCH_QUEUE
1323 if (pipe
->watch_queue
)
1327 size
= round_pipe_size(arg
);
1328 nr_slots
= size
>> PAGE_SHIFT
;
1334 * If trying to increase the pipe capacity, check that an
1335 * unprivileged user is not trying to exceed various limits
1336 * (soft limit check here, hard limit check just below).
1337 * Decreasing the pipe capacity is always permitted, even
1338 * if the user is currently over a limit.
1340 if (nr_slots
> pipe
->max_usage
&&
1341 size
> pipe_max_size
&& !capable(CAP_SYS_RESOURCE
))
1344 user_bufs
= account_pipe_buffers(pipe
->user
, pipe
->nr_accounted
, nr_slots
);
1346 if (nr_slots
> pipe
->max_usage
&&
1347 (too_many_pipe_buffers_hard(user_bufs
) ||
1348 too_many_pipe_buffers_soft(user_bufs
)) &&
1349 pipe_is_unprivileged_user()) {
1351 goto out_revert_acct
;
1354 ret
= pipe_resize_ring(pipe
, nr_slots
);
1356 goto out_revert_acct
;
1358 pipe
->max_usage
= nr_slots
;
1359 pipe
->nr_accounted
= nr_slots
;
1360 return pipe
->max_usage
* PAGE_SIZE
;
1363 (void) account_pipe_buffers(pipe
->user
, nr_slots
, pipe
->nr_accounted
);
1368 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1369 * not enough to verify that this is a pipe.
1371 struct pipe_inode_info
*get_pipe_info(struct file
*file
, bool for_splice
)
1373 struct pipe_inode_info
*pipe
= file
->private_data
;
1375 if (file
->f_op
!= &pipefifo_fops
|| !pipe
)
1377 #ifdef CONFIG_WATCH_QUEUE
1378 if (for_splice
&& pipe
->watch_queue
)
1384 long pipe_fcntl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1386 struct pipe_inode_info
*pipe
;
1389 pipe
= get_pipe_info(file
, false);
1397 ret
= pipe_set_size(pipe
, arg
);
1400 ret
= pipe
->max_usage
* PAGE_SIZE
;
1407 __pipe_unlock(pipe
);
1411 static const struct super_operations pipefs_ops
= {
1412 .destroy_inode
= free_inode_nonrcu
,
1413 .statfs
= simple_statfs
,
1417 * pipefs should _never_ be mounted by userland - too much of security hassle,
1418 * no real gain from having the whole whorehouse mounted. So we don't need
1419 * any operations on the root directory. However, we need a non-trivial
1420 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1423 static int pipefs_init_fs_context(struct fs_context
*fc
)
1425 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, PIPEFS_MAGIC
);
1428 ctx
->ops
= &pipefs_ops
;
1429 ctx
->dops
= &pipefs_dentry_operations
;
1433 static struct file_system_type pipe_fs_type
= {
1435 .init_fs_context
= pipefs_init_fs_context
,
1436 .kill_sb
= kill_anon_super
,
1439 static int __init
init_pipe_fs(void)
1441 int err
= register_filesystem(&pipe_fs_type
);
1444 pipe_mnt
= kern_mount(&pipe_fs_type
);
1445 if (IS_ERR(pipe_mnt
)) {
1446 err
= PTR_ERR(pipe_mnt
);
1447 unregister_filesystem(&pipe_fs_type
);
1453 fs_initcall(init_pipe_fs
);