]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * linux/fs/pipe.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1999 Linus Torvalds | |
6 | */ | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/file.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/log2.h> | |
16 | #include <linux/mount.h> | |
17 | #include <linux/magic.h> | |
18 | #include <linux/pipe_fs_i.h> | |
19 | #include <linux/uio.h> | |
20 | #include <linux/highmem.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/audit.h> | |
23 | #include <linux/syscalls.h> | |
24 | #include <linux/fcntl.h> | |
25 | #include <linux/memcontrol.h> | |
26 | ||
27 | #include <linux/uaccess.h> | |
28 | #include <asm/ioctls.h> | |
29 | ||
30 | #include "internal.h" | |
31 | ||
32 | /* | |
33 | * The max size that a non-root user is allowed to grow the pipe. Can | |
34 | * be set by root in /proc/sys/fs/pipe-max-size | |
35 | */ | |
36 | unsigned int pipe_max_size = 1048576; | |
37 | ||
38 | /* | |
39 | * Minimum pipe size, as required by POSIX | |
40 | */ | |
41 | unsigned int pipe_min_size = PAGE_SIZE; | |
42 | ||
43 | /* Maximum allocatable pages per user. Hard limit is unset by default, soft | |
44 | * matches default values. | |
45 | */ | |
46 | unsigned long pipe_user_pages_hard; | |
47 | unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; | |
48 | ||
49 | /* | |
50 | * We use a start+len construction, which provides full use of the | |
51 | * allocated memory. | |
52 | * -- Florian Coosmann (FGC) | |
53 | * | |
54 | * Reads with count = 0 should always return 0. | |
55 | * -- Julian Bradfield 1999-06-07. | |
56 | * | |
57 | * FIFOs and Pipes now generate SIGIO for both readers and writers. | |
58 | * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 | |
59 | * | |
60 | * pipe_read & write cleanup | |
61 | * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 | |
62 | */ | |
63 | ||
64 | static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) | |
65 | { | |
66 | if (pipe->files) | |
67 | mutex_lock_nested(&pipe->mutex, subclass); | |
68 | } | |
69 | ||
70 | void pipe_lock(struct pipe_inode_info *pipe) | |
71 | { | |
72 | /* | |
73 | * pipe_lock() nests non-pipe inode locks (for writing to a file) | |
74 | */ | |
75 | pipe_lock_nested(pipe, I_MUTEX_PARENT); | |
76 | } | |
77 | EXPORT_SYMBOL(pipe_lock); | |
78 | ||
79 | void pipe_unlock(struct pipe_inode_info *pipe) | |
80 | { | |
81 | if (pipe->files) | |
82 | mutex_unlock(&pipe->mutex); | |
83 | } | |
84 | EXPORT_SYMBOL(pipe_unlock); | |
85 | ||
86 | static inline void __pipe_lock(struct pipe_inode_info *pipe) | |
87 | { | |
88 | mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); | |
89 | } | |
90 | ||
91 | static inline void __pipe_unlock(struct pipe_inode_info *pipe) | |
92 | { | |
93 | mutex_unlock(&pipe->mutex); | |
94 | } | |
95 | ||
96 | void pipe_double_lock(struct pipe_inode_info *pipe1, | |
97 | struct pipe_inode_info *pipe2) | |
98 | { | |
99 | BUG_ON(pipe1 == pipe2); | |
100 | ||
101 | if (pipe1 < pipe2) { | |
102 | pipe_lock_nested(pipe1, I_MUTEX_PARENT); | |
103 | pipe_lock_nested(pipe2, I_MUTEX_CHILD); | |
104 | } else { | |
105 | pipe_lock_nested(pipe2, I_MUTEX_PARENT); | |
106 | pipe_lock_nested(pipe1, I_MUTEX_CHILD); | |
107 | } | |
108 | } | |
109 | ||
110 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | |
111 | void pipe_wait(struct pipe_inode_info *pipe) | |
112 | { | |
113 | DEFINE_WAIT(wait); | |
114 | ||
115 | /* | |
116 | * Pipes are system-local resources, so sleeping on them | |
117 | * is considered a noninteractive wait: | |
118 | */ | |
119 | prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); | |
120 | pipe_unlock(pipe); | |
121 | schedule(); | |
122 | finish_wait(&pipe->wait, &wait); | |
123 | pipe_lock(pipe); | |
124 | } | |
125 | ||
126 | static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | |
127 | struct pipe_buffer *buf) | |
128 | { | |
129 | struct page *page = buf->page; | |
130 | ||
131 | /* | |
132 | * If nobody else uses this page, and we don't already have a | |
133 | * temporary page, let's keep track of it as a one-deep | |
134 | * allocation cache. (Otherwise just release our reference to it) | |
135 | */ | |
136 | if (page_count(page) == 1 && !pipe->tmp_page) | |
137 | pipe->tmp_page = page; | |
138 | else | |
139 | put_page(page); | |
140 | } | |
141 | ||
142 | static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, | |
143 | struct pipe_buffer *buf) | |
144 | { | |
145 | struct page *page = buf->page; | |
146 | ||
147 | if (page_count(page) == 1) { | |
148 | if (memcg_kmem_enabled()) | |
149 | memcg_kmem_uncharge(page, 0); | |
150 | __SetPageLocked(page); | |
151 | return 0; | |
152 | } | |
153 | return 1; | |
154 | } | |
155 | ||
156 | /** | |
157 | * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer | |
158 | * @pipe: the pipe that the buffer belongs to | |
159 | * @buf: the buffer to attempt to steal | |
160 | * | |
161 | * Description: | |
162 | * This function attempts to steal the &struct page attached to | |
163 | * @buf. If successful, this function returns 0 and returns with | |
164 | * the page locked. The caller may then reuse the page for whatever | |
165 | * he wishes; the typical use is insertion into a different file | |
166 | * page cache. | |
167 | */ | |
168 | int generic_pipe_buf_steal(struct pipe_inode_info *pipe, | |
169 | struct pipe_buffer *buf) | |
170 | { | |
171 | struct page *page = buf->page; | |
172 | ||
173 | /* | |
174 | * A reference of one is golden, that means that the owner of this | |
175 | * page is the only one holding a reference to it. lock the page | |
176 | * and return OK. | |
177 | */ | |
178 | if (page_count(page) == 1) { | |
179 | lock_page(page); | |
180 | return 0; | |
181 | } | |
182 | ||
183 | return 1; | |
184 | } | |
185 | EXPORT_SYMBOL(generic_pipe_buf_steal); | |
186 | ||
187 | /** | |
188 | * generic_pipe_buf_get - get a reference to a &struct pipe_buffer | |
189 | * @pipe: the pipe that the buffer belongs to | |
190 | * @buf: the buffer to get a reference to | |
191 | * | |
192 | * Description: | |
193 | * This function grabs an extra reference to @buf. It's used in | |
194 | * in the tee() system call, when we duplicate the buffers in one | |
195 | * pipe into another. | |
196 | */ | |
197 | void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) | |
198 | { | |
199 | get_page(buf->page); | |
200 | } | |
201 | EXPORT_SYMBOL(generic_pipe_buf_get); | |
202 | ||
203 | /** | |
204 | * generic_pipe_buf_confirm - verify contents of the pipe buffer | |
205 | * @info: the pipe that the buffer belongs to | |
206 | * @buf: the buffer to confirm | |
207 | * | |
208 | * Description: | |
209 | * This function does nothing, because the generic pipe code uses | |
210 | * pages that are always good when inserted into the pipe. | |
211 | */ | |
212 | int generic_pipe_buf_confirm(struct pipe_inode_info *info, | |
213 | struct pipe_buffer *buf) | |
214 | { | |
215 | return 0; | |
216 | } | |
217 | EXPORT_SYMBOL(generic_pipe_buf_confirm); | |
218 | ||
219 | /** | |
220 | * generic_pipe_buf_release - put a reference to a &struct pipe_buffer | |
221 | * @pipe: the pipe that the buffer belongs to | |
222 | * @buf: the buffer to put a reference to | |
223 | * | |
224 | * Description: | |
225 | * This function releases a reference to @buf. | |
226 | */ | |
227 | void generic_pipe_buf_release(struct pipe_inode_info *pipe, | |
228 | struct pipe_buffer *buf) | |
229 | { | |
230 | put_page(buf->page); | |
231 | } | |
232 | EXPORT_SYMBOL(generic_pipe_buf_release); | |
233 | ||
234 | static const struct pipe_buf_operations anon_pipe_buf_ops = { | |
235 | .can_merge = 1, | |
236 | .confirm = generic_pipe_buf_confirm, | |
237 | .release = anon_pipe_buf_release, | |
238 | .steal = anon_pipe_buf_steal, | |
239 | .get = generic_pipe_buf_get, | |
240 | }; | |
241 | ||
242 | static const struct pipe_buf_operations packet_pipe_buf_ops = { | |
243 | .can_merge = 0, | |
244 | .confirm = generic_pipe_buf_confirm, | |
245 | .release = anon_pipe_buf_release, | |
246 | .steal = anon_pipe_buf_steal, | |
247 | .get = generic_pipe_buf_get, | |
248 | }; | |
249 | ||
250 | static ssize_t | |
251 | pipe_read(struct kiocb *iocb, struct iov_iter *to) | |
252 | { | |
253 | size_t total_len = iov_iter_count(to); | |
254 | struct file *filp = iocb->ki_filp; | |
255 | struct pipe_inode_info *pipe = filp->private_data; | |
256 | int do_wakeup; | |
257 | ssize_t ret; | |
258 | ||
259 | /* Null read succeeds. */ | |
260 | if (unlikely(total_len == 0)) | |
261 | return 0; | |
262 | ||
263 | do_wakeup = 0; | |
264 | ret = 0; | |
265 | __pipe_lock(pipe); | |
266 | for (;;) { | |
267 | int bufs = pipe->nrbufs; | |
268 | if (bufs) { | |
269 | int curbuf = pipe->curbuf; | |
270 | struct pipe_buffer *buf = pipe->bufs + curbuf; | |
271 | size_t chars = buf->len; | |
272 | size_t written; | |
273 | int error; | |
274 | ||
275 | if (chars > total_len) | |
276 | chars = total_len; | |
277 | ||
278 | error = pipe_buf_confirm(pipe, buf); | |
279 | if (error) { | |
280 | if (!ret) | |
281 | ret = error; | |
282 | break; | |
283 | } | |
284 | ||
285 | written = copy_page_to_iter(buf->page, buf->offset, chars, to); | |
286 | if (unlikely(written < chars)) { | |
287 | if (!ret) | |
288 | ret = -EFAULT; | |
289 | break; | |
290 | } | |
291 | ret += chars; | |
292 | buf->offset += chars; | |
293 | buf->len -= chars; | |
294 | ||
295 | /* Was it a packet buffer? Clean up and exit */ | |
296 | if (buf->flags & PIPE_BUF_FLAG_PACKET) { | |
297 | total_len = chars; | |
298 | buf->len = 0; | |
299 | } | |
300 | ||
301 | if (!buf->len) { | |
302 | pipe_buf_release(pipe, buf); | |
303 | curbuf = (curbuf + 1) & (pipe->buffers - 1); | |
304 | pipe->curbuf = curbuf; | |
305 | pipe->nrbufs = --bufs; | |
306 | do_wakeup = 1; | |
307 | } | |
308 | total_len -= chars; | |
309 | if (!total_len) | |
310 | break; /* common path: read succeeded */ | |
311 | } | |
312 | if (bufs) /* More to do? */ | |
313 | continue; | |
314 | if (!pipe->writers) | |
315 | break; | |
316 | if (!pipe->waiting_writers) { | |
317 | /* syscall merging: Usually we must not sleep | |
318 | * if O_NONBLOCK is set, or if we got some data. | |
319 | * But if a writer sleeps in kernel space, then | |
320 | * we can wait for that data without violating POSIX. | |
321 | */ | |
322 | if (ret) | |
323 | break; | |
324 | if (filp->f_flags & O_NONBLOCK) { | |
325 | ret = -EAGAIN; | |
326 | break; | |
327 | } | |
328 | } | |
329 | if (signal_pending(current)) { | |
330 | if (!ret) | |
331 | ret = -ERESTARTSYS; | |
332 | break; | |
333 | } | |
334 | if (do_wakeup) { | |
335 | wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); | |
336 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
337 | } | |
338 | pipe_wait(pipe); | |
339 | } | |
340 | __pipe_unlock(pipe); | |
341 | ||
342 | /* Signal writers asynchronously that there is more room. */ | |
343 | if (do_wakeup) { | |
344 | wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); | |
345 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
346 | } | |
347 | if (ret > 0) | |
348 | file_accessed(filp); | |
349 | return ret; | |
350 | } | |
351 | ||
352 | static inline int is_packetized(struct file *file) | |
353 | { | |
354 | return (file->f_flags & O_DIRECT) != 0; | |
355 | } | |
356 | ||
357 | static ssize_t | |
358 | pipe_write(struct kiocb *iocb, struct iov_iter *from) | |
359 | { | |
360 | struct file *filp = iocb->ki_filp; | |
361 | struct pipe_inode_info *pipe = filp->private_data; | |
362 | ssize_t ret = 0; | |
363 | int do_wakeup = 0; | |
364 | size_t total_len = iov_iter_count(from); | |
365 | ssize_t chars; | |
366 | ||
367 | /* Null write succeeds. */ | |
368 | if (unlikely(total_len == 0)) | |
369 | return 0; | |
370 | ||
371 | __pipe_lock(pipe); | |
372 | ||
373 | if (!pipe->readers) { | |
374 | send_sig(SIGPIPE, current, 0); | |
375 | ret = -EPIPE; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | /* We try to merge small writes */ | |
380 | chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ | |
381 | if (pipe->nrbufs && chars != 0) { | |
382 | int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & | |
383 | (pipe->buffers - 1); | |
384 | struct pipe_buffer *buf = pipe->bufs + lastbuf; | |
385 | int offset = buf->offset + buf->len; | |
386 | ||
387 | if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) { | |
388 | ret = pipe_buf_confirm(pipe, buf); | |
389 | if (ret) | |
390 | goto out; | |
391 | ||
392 | ret = copy_page_from_iter(buf->page, offset, chars, from); | |
393 | if (unlikely(ret < chars)) { | |
394 | ret = -EFAULT; | |
395 | goto out; | |
396 | } | |
397 | do_wakeup = 1; | |
398 | buf->len += ret; | |
399 | if (!iov_iter_count(from)) | |
400 | goto out; | |
401 | } | |
402 | } | |
403 | ||
404 | for (;;) { | |
405 | int bufs; | |
406 | ||
407 | if (!pipe->readers) { | |
408 | send_sig(SIGPIPE, current, 0); | |
409 | if (!ret) | |
410 | ret = -EPIPE; | |
411 | break; | |
412 | } | |
413 | bufs = pipe->nrbufs; | |
414 | if (bufs < pipe->buffers) { | |
415 | int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); | |
416 | struct pipe_buffer *buf = pipe->bufs + newbuf; | |
417 | struct page *page = pipe->tmp_page; | |
418 | int copied; | |
419 | ||
420 | if (!page) { | |
421 | page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); | |
422 | if (unlikely(!page)) { | |
423 | ret = ret ? : -ENOMEM; | |
424 | break; | |
425 | } | |
426 | pipe->tmp_page = page; | |
427 | } | |
428 | /* Always wake up, even if the copy fails. Otherwise | |
429 | * we lock up (O_NONBLOCK-)readers that sleep due to | |
430 | * syscall merging. | |
431 | * FIXME! Is this really true? | |
432 | */ | |
433 | do_wakeup = 1; | |
434 | copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); | |
435 | if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { | |
436 | if (!ret) | |
437 | ret = -EFAULT; | |
438 | break; | |
439 | } | |
440 | ret += copied; | |
441 | ||
442 | /* Insert it into the buffer array */ | |
443 | buf->page = page; | |
444 | buf->ops = &anon_pipe_buf_ops; | |
445 | buf->offset = 0; | |
446 | buf->len = copied; | |
447 | buf->flags = 0; | |
448 | if (is_packetized(filp)) { | |
449 | buf->ops = &packet_pipe_buf_ops; | |
450 | buf->flags = PIPE_BUF_FLAG_PACKET; | |
451 | } | |
452 | pipe->nrbufs = ++bufs; | |
453 | pipe->tmp_page = NULL; | |
454 | ||
455 | if (!iov_iter_count(from)) | |
456 | break; | |
457 | } | |
458 | if (bufs < pipe->buffers) | |
459 | continue; | |
460 | if (filp->f_flags & O_NONBLOCK) { | |
461 | if (!ret) | |
462 | ret = -EAGAIN; | |
463 | break; | |
464 | } | |
465 | if (signal_pending(current)) { | |
466 | if (!ret) | |
467 | ret = -ERESTARTSYS; | |
468 | break; | |
469 | } | |
470 | if (do_wakeup) { | |
471 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); | |
472 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
473 | do_wakeup = 0; | |
474 | } | |
475 | pipe->waiting_writers++; | |
476 | pipe_wait(pipe); | |
477 | pipe->waiting_writers--; | |
478 | } | |
479 | out: | |
480 | __pipe_unlock(pipe); | |
481 | if (do_wakeup) { | |
482 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); | |
483 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
484 | } | |
485 | if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { | |
486 | int err = file_update_time(filp); | |
487 | if (err) | |
488 | ret = err; | |
489 | sb_end_write(file_inode(filp)->i_sb); | |
490 | } | |
491 | return ret; | |
492 | } | |
493 | ||
494 | static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
495 | { | |
496 | struct pipe_inode_info *pipe = filp->private_data; | |
497 | int count, buf, nrbufs; | |
498 | ||
499 | switch (cmd) { | |
500 | case FIONREAD: | |
501 | __pipe_lock(pipe); | |
502 | count = 0; | |
503 | buf = pipe->curbuf; | |
504 | nrbufs = pipe->nrbufs; | |
505 | while (--nrbufs >= 0) { | |
506 | count += pipe->bufs[buf].len; | |
507 | buf = (buf+1) & (pipe->buffers - 1); | |
508 | } | |
509 | __pipe_unlock(pipe); | |
510 | ||
511 | return put_user(count, (int __user *)arg); | |
512 | default: | |
513 | return -ENOIOCTLCMD; | |
514 | } | |
515 | } | |
516 | ||
517 | /* No kernel lock held - fine */ | |
518 | static unsigned int | |
519 | pipe_poll(struct file *filp, poll_table *wait) | |
520 | { | |
521 | unsigned int mask; | |
522 | struct pipe_inode_info *pipe = filp->private_data; | |
523 | int nrbufs; | |
524 | ||
525 | poll_wait(filp, &pipe->wait, wait); | |
526 | ||
527 | /* Reading only -- no need for acquiring the semaphore. */ | |
528 | nrbufs = pipe->nrbufs; | |
529 | mask = 0; | |
530 | if (filp->f_mode & FMODE_READ) { | |
531 | mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; | |
532 | if (!pipe->writers && filp->f_version != pipe->w_counter) | |
533 | mask |= POLLHUP; | |
534 | } | |
535 | ||
536 | if (filp->f_mode & FMODE_WRITE) { | |
537 | mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0; | |
538 | /* | |
539 | * Most Unices do not set POLLERR for FIFOs but on Linux they | |
540 | * behave exactly like pipes for poll(). | |
541 | */ | |
542 | if (!pipe->readers) | |
543 | mask |= POLLERR; | |
544 | } | |
545 | ||
546 | return mask; | |
547 | } | |
548 | ||
549 | static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) | |
550 | { | |
551 | int kill = 0; | |
552 | ||
553 | spin_lock(&inode->i_lock); | |
554 | if (!--pipe->files) { | |
555 | inode->i_pipe = NULL; | |
556 | kill = 1; | |
557 | } | |
558 | spin_unlock(&inode->i_lock); | |
559 | ||
560 | if (kill) | |
561 | free_pipe_info(pipe); | |
562 | } | |
563 | ||
564 | static int | |
565 | pipe_release(struct inode *inode, struct file *file) | |
566 | { | |
567 | struct pipe_inode_info *pipe = file->private_data; | |
568 | ||
569 | __pipe_lock(pipe); | |
570 | if (file->f_mode & FMODE_READ) | |
571 | pipe->readers--; | |
572 | if (file->f_mode & FMODE_WRITE) | |
573 | pipe->writers--; | |
574 | ||
575 | if (pipe->readers || pipe->writers) { | |
576 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); | |
577 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
578 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
579 | } | |
580 | __pipe_unlock(pipe); | |
581 | ||
582 | put_pipe_info(inode, pipe); | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static int | |
587 | pipe_fasync(int fd, struct file *filp, int on) | |
588 | { | |
589 | struct pipe_inode_info *pipe = filp->private_data; | |
590 | int retval = 0; | |
591 | ||
592 | __pipe_lock(pipe); | |
593 | if (filp->f_mode & FMODE_READ) | |
594 | retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); | |
595 | if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { | |
596 | retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); | |
597 | if (retval < 0 && (filp->f_mode & FMODE_READ)) | |
598 | /* this can happen only if on == T */ | |
599 | fasync_helper(-1, filp, 0, &pipe->fasync_readers); | |
600 | } | |
601 | __pipe_unlock(pipe); | |
602 | return retval; | |
603 | } | |
604 | ||
605 | static unsigned long account_pipe_buffers(struct user_struct *user, | |
606 | unsigned long old, unsigned long new) | |
607 | { | |
608 | return atomic_long_add_return(new - old, &user->pipe_bufs); | |
609 | } | |
610 | ||
611 | static bool too_many_pipe_buffers_soft(unsigned long user_bufs) | |
612 | { | |
613 | return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft; | |
614 | } | |
615 | ||
616 | static bool too_many_pipe_buffers_hard(unsigned long user_bufs) | |
617 | { | |
618 | return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard; | |
619 | } | |
620 | ||
621 | static bool is_unprivileged_user(void) | |
622 | { | |
623 | return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); | |
624 | } | |
625 | ||
626 | struct pipe_inode_info *alloc_pipe_info(void) | |
627 | { | |
628 | struct pipe_inode_info *pipe; | |
629 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; | |
630 | struct user_struct *user = get_current_user(); | |
631 | unsigned long user_bufs; | |
632 | ||
633 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); | |
634 | if (pipe == NULL) | |
635 | goto out_free_uid; | |
636 | ||
637 | if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | |
638 | pipe_bufs = pipe_max_size >> PAGE_SHIFT; | |
639 | ||
640 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); | |
641 | ||
642 | if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { | |
643 | user_bufs = account_pipe_buffers(user, pipe_bufs, 1); | |
644 | pipe_bufs = 1; | |
645 | } | |
646 | ||
647 | if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) | |
648 | goto out_revert_acct; | |
649 | ||
650 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), | |
651 | GFP_KERNEL_ACCOUNT); | |
652 | ||
653 | if (pipe->bufs) { | |
654 | init_waitqueue_head(&pipe->wait); | |
655 | pipe->r_counter = pipe->w_counter = 1; | |
656 | pipe->buffers = pipe_bufs; | |
657 | pipe->user = user; | |
658 | mutex_init(&pipe->mutex); | |
659 | return pipe; | |
660 | } | |
661 | ||
662 | out_revert_acct: | |
663 | (void) account_pipe_buffers(user, pipe_bufs, 0); | |
664 | kfree(pipe); | |
665 | out_free_uid: | |
666 | free_uid(user); | |
667 | return NULL; | |
668 | } | |
669 | ||
670 | void free_pipe_info(struct pipe_inode_info *pipe) | |
671 | { | |
672 | int i; | |
673 | ||
674 | (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); | |
675 | free_uid(pipe->user); | |
676 | for (i = 0; i < pipe->buffers; i++) { | |
677 | struct pipe_buffer *buf = pipe->bufs + i; | |
678 | if (buf->ops) | |
679 | pipe_buf_release(pipe, buf); | |
680 | } | |
681 | if (pipe->tmp_page) | |
682 | __free_page(pipe->tmp_page); | |
683 | kfree(pipe->bufs); | |
684 | kfree(pipe); | |
685 | } | |
686 | ||
687 | static struct vfsmount *pipe_mnt __read_mostly; | |
688 | ||
689 | /* | |
690 | * pipefs_dname() is called from d_path(). | |
691 | */ | |
692 | static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) | |
693 | { | |
694 | return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", | |
695 | d_inode(dentry)->i_ino); | |
696 | } | |
697 | ||
698 | static const struct dentry_operations pipefs_dentry_operations = { | |
699 | .d_dname = pipefs_dname, | |
700 | }; | |
701 | ||
702 | static struct inode * get_pipe_inode(void) | |
703 | { | |
704 | struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); | |
705 | struct pipe_inode_info *pipe; | |
706 | ||
707 | if (!inode) | |
708 | goto fail_inode; | |
709 | ||
710 | inode->i_ino = get_next_ino(); | |
711 | ||
712 | pipe = alloc_pipe_info(); | |
713 | if (!pipe) | |
714 | goto fail_iput; | |
715 | ||
716 | inode->i_pipe = pipe; | |
717 | pipe->files = 2; | |
718 | pipe->readers = pipe->writers = 1; | |
719 | inode->i_fop = &pipefifo_fops; | |
720 | ||
721 | /* | |
722 | * Mark the inode dirty from the very beginning, | |
723 | * that way it will never be moved to the dirty | |
724 | * list because "mark_inode_dirty()" will think | |
725 | * that it already _is_ on the dirty list. | |
726 | */ | |
727 | inode->i_state = I_DIRTY; | |
728 | inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; | |
729 | inode->i_uid = current_fsuid(); | |
730 | inode->i_gid = current_fsgid(); | |
731 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); | |
732 | ||
733 | return inode; | |
734 | ||
735 | fail_iput: | |
736 | iput(inode); | |
737 | ||
738 | fail_inode: | |
739 | return NULL; | |
740 | } | |
741 | ||
742 | int create_pipe_files(struct file **res, int flags) | |
743 | { | |
744 | int err; | |
745 | struct inode *inode = get_pipe_inode(); | |
746 | struct file *f; | |
747 | struct path path; | |
748 | ||
749 | if (!inode) | |
750 | return -ENFILE; | |
751 | ||
752 | err = -ENOMEM; | |
753 | path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name); | |
754 | if (!path.dentry) | |
755 | goto err_inode; | |
756 | path.mnt = mntget(pipe_mnt); | |
757 | ||
758 | d_instantiate(path.dentry, inode); | |
759 | ||
760 | f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops); | |
761 | if (IS_ERR(f)) { | |
762 | err = PTR_ERR(f); | |
763 | goto err_dentry; | |
764 | } | |
765 | ||
766 | f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); | |
767 | f->private_data = inode->i_pipe; | |
768 | ||
769 | res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops); | |
770 | if (IS_ERR(res[0])) { | |
771 | err = PTR_ERR(res[0]); | |
772 | goto err_file; | |
773 | } | |
774 | ||
775 | path_get(&path); | |
776 | res[0]->private_data = inode->i_pipe; | |
777 | res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); | |
778 | res[1] = f; | |
779 | return 0; | |
780 | ||
781 | err_file: | |
782 | put_filp(f); | |
783 | err_dentry: | |
784 | free_pipe_info(inode->i_pipe); | |
785 | path_put(&path); | |
786 | return err; | |
787 | ||
788 | err_inode: | |
789 | free_pipe_info(inode->i_pipe); | |
790 | iput(inode); | |
791 | return err; | |
792 | } | |
793 | ||
794 | static int __do_pipe_flags(int *fd, struct file **files, int flags) | |
795 | { | |
796 | int error; | |
797 | int fdw, fdr; | |
798 | ||
799 | if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) | |
800 | return -EINVAL; | |
801 | ||
802 | error = create_pipe_files(files, flags); | |
803 | if (error) | |
804 | return error; | |
805 | ||
806 | error = get_unused_fd_flags(flags); | |
807 | if (error < 0) | |
808 | goto err_read_pipe; | |
809 | fdr = error; | |
810 | ||
811 | error = get_unused_fd_flags(flags); | |
812 | if (error < 0) | |
813 | goto err_fdr; | |
814 | fdw = error; | |
815 | ||
816 | audit_fd_pair(fdr, fdw); | |
817 | fd[0] = fdr; | |
818 | fd[1] = fdw; | |
819 | return 0; | |
820 | ||
821 | err_fdr: | |
822 | put_unused_fd(fdr); | |
823 | err_read_pipe: | |
824 | fput(files[0]); | |
825 | fput(files[1]); | |
826 | return error; | |
827 | } | |
828 | ||
829 | int do_pipe_flags(int *fd, int flags) | |
830 | { | |
831 | struct file *files[2]; | |
832 | int error = __do_pipe_flags(fd, files, flags); | |
833 | if (!error) { | |
834 | fd_install(fd[0], files[0]); | |
835 | fd_install(fd[1], files[1]); | |
836 | } | |
837 | return error; | |
838 | } | |
839 | ||
840 | /* | |
841 | * sys_pipe() is the normal C calling standard for creating | |
842 | * a pipe. It's not the way Unix traditionally does this, though. | |
843 | */ | |
844 | SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) | |
845 | { | |
846 | struct file *files[2]; | |
847 | int fd[2]; | |
848 | int error; | |
849 | ||
850 | error = __do_pipe_flags(fd, files, flags); | |
851 | if (!error) { | |
852 | if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { | |
853 | fput(files[0]); | |
854 | fput(files[1]); | |
855 | put_unused_fd(fd[0]); | |
856 | put_unused_fd(fd[1]); | |
857 | error = -EFAULT; | |
858 | } else { | |
859 | fd_install(fd[0], files[0]); | |
860 | fd_install(fd[1], files[1]); | |
861 | } | |
862 | } | |
863 | return error; | |
864 | } | |
865 | ||
866 | SYSCALL_DEFINE1(pipe, int __user *, fildes) | |
867 | { | |
868 | return sys_pipe2(fildes, 0); | |
869 | } | |
870 | ||
871 | static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) | |
872 | { | |
873 | int cur = *cnt; | |
874 | ||
875 | while (cur == *cnt) { | |
876 | pipe_wait(pipe); | |
877 | if (signal_pending(current)) | |
878 | break; | |
879 | } | |
880 | return cur == *cnt ? -ERESTARTSYS : 0; | |
881 | } | |
882 | ||
883 | static void wake_up_partner(struct pipe_inode_info *pipe) | |
884 | { | |
885 | wake_up_interruptible(&pipe->wait); | |
886 | } | |
887 | ||
888 | static int fifo_open(struct inode *inode, struct file *filp) | |
889 | { | |
890 | struct pipe_inode_info *pipe; | |
891 | bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; | |
892 | int ret; | |
893 | ||
894 | filp->f_version = 0; | |
895 | ||
896 | spin_lock(&inode->i_lock); | |
897 | if (inode->i_pipe) { | |
898 | pipe = inode->i_pipe; | |
899 | pipe->files++; | |
900 | spin_unlock(&inode->i_lock); | |
901 | } else { | |
902 | spin_unlock(&inode->i_lock); | |
903 | pipe = alloc_pipe_info(); | |
904 | if (!pipe) | |
905 | return -ENOMEM; | |
906 | pipe->files = 1; | |
907 | spin_lock(&inode->i_lock); | |
908 | if (unlikely(inode->i_pipe)) { | |
909 | inode->i_pipe->files++; | |
910 | spin_unlock(&inode->i_lock); | |
911 | free_pipe_info(pipe); | |
912 | pipe = inode->i_pipe; | |
913 | } else { | |
914 | inode->i_pipe = pipe; | |
915 | spin_unlock(&inode->i_lock); | |
916 | } | |
917 | } | |
918 | filp->private_data = pipe; | |
919 | /* OK, we have a pipe and it's pinned down */ | |
920 | ||
921 | __pipe_lock(pipe); | |
922 | ||
923 | /* We can only do regular read/write on fifos */ | |
924 | filp->f_mode &= (FMODE_READ | FMODE_WRITE); | |
925 | ||
926 | switch (filp->f_mode) { | |
927 | case FMODE_READ: | |
928 | /* | |
929 | * O_RDONLY | |
930 | * POSIX.1 says that O_NONBLOCK means return with the FIFO | |
931 | * opened, even when there is no process writing the FIFO. | |
932 | */ | |
933 | pipe->r_counter++; | |
934 | if (pipe->readers++ == 0) | |
935 | wake_up_partner(pipe); | |
936 | ||
937 | if (!is_pipe && !pipe->writers) { | |
938 | if ((filp->f_flags & O_NONBLOCK)) { | |
939 | /* suppress POLLHUP until we have | |
940 | * seen a writer */ | |
941 | filp->f_version = pipe->w_counter; | |
942 | } else { | |
943 | if (wait_for_partner(pipe, &pipe->w_counter)) | |
944 | goto err_rd; | |
945 | } | |
946 | } | |
947 | break; | |
948 | ||
949 | case FMODE_WRITE: | |
950 | /* | |
951 | * O_WRONLY | |
952 | * POSIX.1 says that O_NONBLOCK means return -1 with | |
953 | * errno=ENXIO when there is no process reading the FIFO. | |
954 | */ | |
955 | ret = -ENXIO; | |
956 | if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) | |
957 | goto err; | |
958 | ||
959 | pipe->w_counter++; | |
960 | if (!pipe->writers++) | |
961 | wake_up_partner(pipe); | |
962 | ||
963 | if (!is_pipe && !pipe->readers) { | |
964 | if (wait_for_partner(pipe, &pipe->r_counter)) | |
965 | goto err_wr; | |
966 | } | |
967 | break; | |
968 | ||
969 | case FMODE_READ | FMODE_WRITE: | |
970 | /* | |
971 | * O_RDWR | |
972 | * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. | |
973 | * This implementation will NEVER block on a O_RDWR open, since | |
974 | * the process can at least talk to itself. | |
975 | */ | |
976 | ||
977 | pipe->readers++; | |
978 | pipe->writers++; | |
979 | pipe->r_counter++; | |
980 | pipe->w_counter++; | |
981 | if (pipe->readers == 1 || pipe->writers == 1) | |
982 | wake_up_partner(pipe); | |
983 | break; | |
984 | ||
985 | default: | |
986 | ret = -EINVAL; | |
987 | goto err; | |
988 | } | |
989 | ||
990 | /* Ok! */ | |
991 | __pipe_unlock(pipe); | |
992 | return 0; | |
993 | ||
994 | err_rd: | |
995 | if (!--pipe->readers) | |
996 | wake_up_interruptible(&pipe->wait); | |
997 | ret = -ERESTARTSYS; | |
998 | goto err; | |
999 | ||
1000 | err_wr: | |
1001 | if (!--pipe->writers) | |
1002 | wake_up_interruptible(&pipe->wait); | |
1003 | ret = -ERESTARTSYS; | |
1004 | goto err; | |
1005 | ||
1006 | err: | |
1007 | __pipe_unlock(pipe); | |
1008 | ||
1009 | put_pipe_info(inode, pipe); | |
1010 | return ret; | |
1011 | } | |
1012 | ||
1013 | const struct file_operations pipefifo_fops = { | |
1014 | .open = fifo_open, | |
1015 | .llseek = no_llseek, | |
1016 | .read_iter = pipe_read, | |
1017 | .write_iter = pipe_write, | |
1018 | .poll = pipe_poll, | |
1019 | .unlocked_ioctl = pipe_ioctl, | |
1020 | .release = pipe_release, | |
1021 | .fasync = pipe_fasync, | |
1022 | }; | |
1023 | ||
1024 | /* | |
1025 | * Currently we rely on the pipe array holding a power-of-2 number | |
1026 | * of pages. Returns 0 on error. | |
1027 | */ | |
1028 | unsigned int round_pipe_size(unsigned int size) | |
1029 | { | |
1030 | unsigned long nr_pages; | |
1031 | ||
1032 | if (size < pipe_min_size) | |
1033 | size = pipe_min_size; | |
1034 | ||
1035 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1036 | if (nr_pages == 0) | |
1037 | return 0; | |
1038 | ||
1039 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * Allocate a new array of pipe buffers and copy the info over. Returns the | |
1044 | * pipe size if successful, or return -ERROR on error. | |
1045 | */ | |
1046 | static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) | |
1047 | { | |
1048 | struct pipe_buffer *bufs; | |
1049 | unsigned int size, nr_pages; | |
1050 | unsigned long user_bufs; | |
1051 | long ret = 0; | |
1052 | ||
1053 | size = round_pipe_size(arg); | |
1054 | if (size == 0) | |
1055 | return -EINVAL; | |
1056 | nr_pages = size >> PAGE_SHIFT; | |
1057 | ||
1058 | if (!nr_pages) | |
1059 | return -EINVAL; | |
1060 | ||
1061 | /* | |
1062 | * If trying to increase the pipe capacity, check that an | |
1063 | * unprivileged user is not trying to exceed various limits | |
1064 | * (soft limit check here, hard limit check just below). | |
1065 | * Decreasing the pipe capacity is always permitted, even | |
1066 | * if the user is currently over a limit. | |
1067 | */ | |
1068 | if (nr_pages > pipe->buffers && | |
1069 | size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | |
1070 | return -EPERM; | |
1071 | ||
1072 | user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); | |
1073 | ||
1074 | if (nr_pages > pipe->buffers && | |
1075 | (too_many_pipe_buffers_hard(user_bufs) || | |
1076 | too_many_pipe_buffers_soft(user_bufs)) && | |
1077 | is_unprivileged_user()) { | |
1078 | ret = -EPERM; | |
1079 | goto out_revert_acct; | |
1080 | } | |
1081 | ||
1082 | /* | |
1083 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't | |
1084 | * expect a lot of shrink+grow operations, just free and allocate | |
1085 | * again like we would do for growing. If the pipe currently | |
1086 | * contains more buffers than arg, then return busy. | |
1087 | */ | |
1088 | if (nr_pages < pipe->nrbufs) { | |
1089 | ret = -EBUSY; | |
1090 | goto out_revert_acct; | |
1091 | } | |
1092 | ||
1093 | bufs = kcalloc(nr_pages, sizeof(*bufs), | |
1094 | GFP_KERNEL_ACCOUNT | __GFP_NOWARN); | |
1095 | if (unlikely(!bufs)) { | |
1096 | ret = -ENOMEM; | |
1097 | goto out_revert_acct; | |
1098 | } | |
1099 | ||
1100 | /* | |
1101 | * The pipe array wraps around, so just start the new one at zero | |
1102 | * and adjust the indexes. | |
1103 | */ | |
1104 | if (pipe->nrbufs) { | |
1105 | unsigned int tail; | |
1106 | unsigned int head; | |
1107 | ||
1108 | tail = pipe->curbuf + pipe->nrbufs; | |
1109 | if (tail < pipe->buffers) | |
1110 | tail = 0; | |
1111 | else | |
1112 | tail &= (pipe->buffers - 1); | |
1113 | ||
1114 | head = pipe->nrbufs - tail; | |
1115 | if (head) | |
1116 | memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); | |
1117 | if (tail) | |
1118 | memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); | |
1119 | } | |
1120 | ||
1121 | pipe->curbuf = 0; | |
1122 | kfree(pipe->bufs); | |
1123 | pipe->bufs = bufs; | |
1124 | pipe->buffers = nr_pages; | |
1125 | return nr_pages * PAGE_SIZE; | |
1126 | ||
1127 | out_revert_acct: | |
1128 | (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); | |
1129 | return ret; | |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size | |
1134 | * will return an error. | |
1135 | */ | |
1136 | int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, | |
1137 | size_t *lenp, loff_t *ppos) | |
1138 | { | |
1139 | return proc_dopipe_max_size(table, write, buf, lenp, ppos); | |
1140 | } | |
1141 | ||
1142 | /* | |
1143 | * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same | |
1144 | * location, so checking ->i_pipe is not enough to verify that this is a | |
1145 | * pipe. | |
1146 | */ | |
1147 | struct pipe_inode_info *get_pipe_info(struct file *file) | |
1148 | { | |
1149 | return file->f_op == &pipefifo_fops ? file->private_data : NULL; | |
1150 | } | |
1151 | ||
1152 | long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) | |
1153 | { | |
1154 | struct pipe_inode_info *pipe; | |
1155 | long ret; | |
1156 | ||
1157 | pipe = get_pipe_info(file); | |
1158 | if (!pipe) | |
1159 | return -EBADF; | |
1160 | ||
1161 | __pipe_lock(pipe); | |
1162 | ||
1163 | switch (cmd) { | |
1164 | case F_SETPIPE_SZ: | |
1165 | ret = pipe_set_size(pipe, arg); | |
1166 | break; | |
1167 | case F_GETPIPE_SZ: | |
1168 | ret = pipe->buffers * PAGE_SIZE; | |
1169 | break; | |
1170 | default: | |
1171 | ret = -EINVAL; | |
1172 | break; | |
1173 | } | |
1174 | ||
1175 | __pipe_unlock(pipe); | |
1176 | return ret; | |
1177 | } | |
1178 | ||
1179 | static const struct super_operations pipefs_ops = { | |
1180 | .destroy_inode = free_inode_nonrcu, | |
1181 | .statfs = simple_statfs, | |
1182 | }; | |
1183 | ||
1184 | /* | |
1185 | * pipefs should _never_ be mounted by userland - too much of security hassle, | |
1186 | * no real gain from having the whole whorehouse mounted. So we don't need | |
1187 | * any operations on the root directory. However, we need a non-trivial | |
1188 | * d_name - pipe: will go nicely and kill the special-casing in procfs. | |
1189 | */ | |
1190 | static struct dentry *pipefs_mount(struct file_system_type *fs_type, | |
1191 | int flags, const char *dev_name, void *data) | |
1192 | { | |
1193 | return mount_pseudo(fs_type, "pipe:", &pipefs_ops, | |
1194 | &pipefs_dentry_operations, PIPEFS_MAGIC); | |
1195 | } | |
1196 | ||
1197 | static struct file_system_type pipe_fs_type = { | |
1198 | .name = "pipefs", | |
1199 | .mount = pipefs_mount, | |
1200 | .kill_sb = kill_anon_super, | |
1201 | }; | |
1202 | ||
1203 | static int __init init_pipe_fs(void) | |
1204 | { | |
1205 | int err = register_filesystem(&pipe_fs_type); | |
1206 | ||
1207 | if (!err) { | |
1208 | pipe_mnt = kern_mount(&pipe_fs_type); | |
1209 | if (IS_ERR(pipe_mnt)) { | |
1210 | err = PTR_ERR(pipe_mnt); | |
1211 | unregister_filesystem(&pipe_fs_type); | |
1212 | } | |
1213 | } | |
1214 | return err; | |
1215 | } | |
1216 | ||
1217 | fs_initcall(init_pipe_fs); |