]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/pipe.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1999 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/mm.h> | |
8 | #include <linux/file.h> | |
9 | #include <linux/poll.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/log2.h> | |
15 | #include <linux/mount.h> | |
16 | #include <linux/magic.h> | |
17 | #include <linux/pipe_fs_i.h> | |
18 | #include <linux/uio.h> | |
19 | #include <linux/highmem.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/audit.h> | |
22 | #include <linux/syscalls.h> | |
23 | #include <linux/fcntl.h> | |
24 | #include <linux/memcontrol.h> | |
25 | ||
26 | #include <asm/uaccess.h> | |
27 | #include <asm/ioctls.h> | |
28 | ||
29 | #include "internal.h" | |
30 | ||
31 | /* | |
32 | * The max size that a non-root user is allowed to grow the pipe. Can | |
33 | * be set by root in /proc/sys/fs/pipe-max-size | |
34 | */ | |
35 | unsigned int pipe_max_size = 1048576; | |
36 | ||
37 | /* | |
38 | * Minimum pipe size, as required by POSIX | |
39 | */ | |
40 | unsigned int pipe_min_size = PAGE_SIZE; | |
41 | ||
42 | /* Maximum allocatable pages per user. Hard limit is unset by default, soft | |
43 | * matches default values. | |
44 | */ | |
45 | unsigned long pipe_user_pages_hard; | |
46 | unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; | |
47 | ||
48 | /* | |
49 | * We use a start+len construction, which provides full use of the | |
50 | * allocated memory. | |
51 | * -- Florian Coosmann (FGC) | |
52 | * | |
53 | * Reads with count = 0 should always return 0. | |
54 | * -- Julian Bradfield 1999-06-07. | |
55 | * | |
56 | * FIFOs and Pipes now generate SIGIO for both readers and writers. | |
57 | * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 | |
58 | * | |
59 | * pipe_read & write cleanup | |
60 | * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 | |
61 | */ | |
62 | ||
63 | static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) | |
64 | { | |
65 | if (pipe->files) | |
66 | mutex_lock_nested(&pipe->mutex, subclass); | |
67 | } | |
68 | ||
69 | void pipe_lock(struct pipe_inode_info *pipe) | |
70 | { | |
71 | /* | |
72 | * pipe_lock() nests non-pipe inode locks (for writing to a file) | |
73 | */ | |
74 | pipe_lock_nested(pipe, I_MUTEX_PARENT); | |
75 | } | |
76 | EXPORT_SYMBOL(pipe_lock); | |
77 | ||
78 | void pipe_unlock(struct pipe_inode_info *pipe) | |
79 | { | |
80 | if (pipe->files) | |
81 | mutex_unlock(&pipe->mutex); | |
82 | } | |
83 | EXPORT_SYMBOL(pipe_unlock); | |
84 | ||
85 | static inline void __pipe_lock(struct pipe_inode_info *pipe) | |
86 | { | |
87 | mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); | |
88 | } | |
89 | ||
90 | static inline void __pipe_unlock(struct pipe_inode_info *pipe) | |
91 | { | |
92 | mutex_unlock(&pipe->mutex); | |
93 | } | |
94 | ||
95 | void pipe_double_lock(struct pipe_inode_info *pipe1, | |
96 | struct pipe_inode_info *pipe2) | |
97 | { | |
98 | BUG_ON(pipe1 == pipe2); | |
99 | ||
100 | if (pipe1 < pipe2) { | |
101 | pipe_lock_nested(pipe1, I_MUTEX_PARENT); | |
102 | pipe_lock_nested(pipe2, I_MUTEX_CHILD); | |
103 | } else { | |
104 | pipe_lock_nested(pipe2, I_MUTEX_PARENT); | |
105 | pipe_lock_nested(pipe1, I_MUTEX_CHILD); | |
106 | } | |
107 | } | |
108 | ||
109 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | |
110 | void pipe_wait(struct pipe_inode_info *pipe) | |
111 | { | |
112 | DEFINE_WAIT(wait); | |
113 | ||
114 | /* | |
115 | * Pipes are system-local resources, so sleeping on them | |
116 | * is considered a noninteractive wait: | |
117 | */ | |
118 | prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); | |
119 | pipe_unlock(pipe); | |
120 | schedule(); | |
121 | finish_wait(&pipe->wait, &wait); | |
122 | pipe_lock(pipe); | |
123 | } | |
124 | ||
125 | static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | |
126 | struct pipe_buffer *buf) | |
127 | { | |
128 | struct page *page = buf->page; | |
129 | ||
130 | /* | |
131 | * If nobody else uses this page, and we don't already have a | |
132 | * temporary page, let's keep track of it as a one-deep | |
133 | * allocation cache. (Otherwise just release our reference to it) | |
134 | */ | |
135 | if (page_count(page) == 1 && !pipe->tmp_page) | |
136 | pipe->tmp_page = page; | |
137 | else | |
138 | put_page(page); | |
139 | } | |
140 | ||
141 | static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, | |
142 | struct pipe_buffer *buf) | |
143 | { | |
144 | struct page *page = buf->page; | |
145 | ||
146 | if (page_count(page) == 1) { | |
147 | if (memcg_kmem_enabled()) | |
148 | memcg_kmem_uncharge(page, 0); | |
149 | __SetPageLocked(page); | |
150 | return 0; | |
151 | } | |
152 | return 1; | |
153 | } | |
154 | ||
155 | /** | |
156 | * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer | |
157 | * @pipe: the pipe that the buffer belongs to | |
158 | * @buf: the buffer to attempt to steal | |
159 | * | |
160 | * Description: | |
161 | * This function attempts to steal the &struct page attached to | |
162 | * @buf. If successful, this function returns 0 and returns with | |
163 | * the page locked. The caller may then reuse the page for whatever | |
164 | * he wishes; the typical use is insertion into a different file | |
165 | * page cache. | |
166 | */ | |
167 | int generic_pipe_buf_steal(struct pipe_inode_info *pipe, | |
168 | struct pipe_buffer *buf) | |
169 | { | |
170 | struct page *page = buf->page; | |
171 | ||
172 | /* | |
173 | * A reference of one is golden, that means that the owner of this | |
174 | * page is the only one holding a reference to it. lock the page | |
175 | * and return OK. | |
176 | */ | |
177 | if (page_count(page) == 1) { | |
178 | lock_page(page); | |
179 | return 0; | |
180 | } | |
181 | ||
182 | return 1; | |
183 | } | |
184 | EXPORT_SYMBOL(generic_pipe_buf_steal); | |
185 | ||
186 | /** | |
187 | * generic_pipe_buf_get - get a reference to a &struct pipe_buffer | |
188 | * @pipe: the pipe that the buffer belongs to | |
189 | * @buf: the buffer to get a reference to | |
190 | * | |
191 | * Description: | |
192 | * This function grabs an extra reference to @buf. It's used in | |
193 | * in the tee() system call, when we duplicate the buffers in one | |
194 | * pipe into another. | |
195 | */ | |
196 | void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) | |
197 | { | |
198 | get_page(buf->page); | |
199 | } | |
200 | EXPORT_SYMBOL(generic_pipe_buf_get); | |
201 | ||
202 | /** | |
203 | * generic_pipe_buf_confirm - verify contents of the pipe buffer | |
204 | * @info: the pipe that the buffer belongs to | |
205 | * @buf: the buffer to confirm | |
206 | * | |
207 | * Description: | |
208 | * This function does nothing, because the generic pipe code uses | |
209 | * pages that are always good when inserted into the pipe. | |
210 | */ | |
211 | int generic_pipe_buf_confirm(struct pipe_inode_info *info, | |
212 | struct pipe_buffer *buf) | |
213 | { | |
214 | return 0; | |
215 | } | |
216 | EXPORT_SYMBOL(generic_pipe_buf_confirm); | |
217 | ||
218 | /** | |
219 | * generic_pipe_buf_release - put a reference to a &struct pipe_buffer | |
220 | * @pipe: the pipe that the buffer belongs to | |
221 | * @buf: the buffer to put a reference to | |
222 | * | |
223 | * Description: | |
224 | * This function releases a reference to @buf. | |
225 | */ | |
226 | void generic_pipe_buf_release(struct pipe_inode_info *pipe, | |
227 | struct pipe_buffer *buf) | |
228 | { | |
229 | put_page(buf->page); | |
230 | } | |
231 | EXPORT_SYMBOL(generic_pipe_buf_release); | |
232 | ||
233 | static const struct pipe_buf_operations anon_pipe_buf_ops = { | |
234 | .can_merge = 1, | |
235 | .confirm = generic_pipe_buf_confirm, | |
236 | .release = anon_pipe_buf_release, | |
237 | .steal = anon_pipe_buf_steal, | |
238 | .get = generic_pipe_buf_get, | |
239 | }; | |
240 | ||
241 | static const struct pipe_buf_operations packet_pipe_buf_ops = { | |
242 | .can_merge = 0, | |
243 | .confirm = generic_pipe_buf_confirm, | |
244 | .release = anon_pipe_buf_release, | |
245 | .steal = anon_pipe_buf_steal, | |
246 | .get = generic_pipe_buf_get, | |
247 | }; | |
248 | ||
249 | static ssize_t | |
250 | pipe_read(struct kiocb *iocb, struct iov_iter *to) | |
251 | { | |
252 | size_t total_len = iov_iter_count(to); | |
253 | struct file *filp = iocb->ki_filp; | |
254 | struct pipe_inode_info *pipe = filp->private_data; | |
255 | int do_wakeup; | |
256 | ssize_t ret; | |
257 | ||
258 | /* Null read succeeds. */ | |
259 | if (unlikely(total_len == 0)) | |
260 | return 0; | |
261 | ||
262 | do_wakeup = 0; | |
263 | ret = 0; | |
264 | __pipe_lock(pipe); | |
265 | for (;;) { | |
266 | int bufs = pipe->nrbufs; | |
267 | if (bufs) { | |
268 | int curbuf = pipe->curbuf; | |
269 | struct pipe_buffer *buf = pipe->bufs + curbuf; | |
270 | size_t chars = buf->len; | |
271 | size_t written; | |
272 | int error; | |
273 | ||
274 | if (chars > total_len) | |
275 | chars = total_len; | |
276 | ||
277 | error = pipe_buf_confirm(pipe, buf); | |
278 | if (error) { | |
279 | if (!ret) | |
280 | ret = error; | |
281 | break; | |
282 | } | |
283 | ||
284 | written = copy_page_to_iter(buf->page, buf->offset, chars, to); | |
285 | if (unlikely(written < chars)) { | |
286 | if (!ret) | |
287 | ret = -EFAULT; | |
288 | break; | |
289 | } | |
290 | ret += chars; | |
291 | buf->offset += chars; | |
292 | buf->len -= chars; | |
293 | ||
294 | /* Was it a packet buffer? Clean up and exit */ | |
295 | if (buf->flags & PIPE_BUF_FLAG_PACKET) { | |
296 | total_len = chars; | |
297 | buf->len = 0; | |
298 | } | |
299 | ||
300 | if (!buf->len) { | |
301 | pipe_buf_release(pipe, buf); | |
302 | curbuf = (curbuf + 1) & (pipe->buffers - 1); | |
303 | pipe->curbuf = curbuf; | |
304 | pipe->nrbufs = --bufs; | |
305 | do_wakeup = 1; | |
306 | } | |
307 | total_len -= chars; | |
308 | if (!total_len) | |
309 | break; /* common path: read succeeded */ | |
310 | } | |
311 | if (bufs) /* More to do? */ | |
312 | continue; | |
313 | if (!pipe->writers) | |
314 | break; | |
315 | if (!pipe->waiting_writers) { | |
316 | /* syscall merging: Usually we must not sleep | |
317 | * if O_NONBLOCK is set, or if we got some data. | |
318 | * But if a writer sleeps in kernel space, then | |
319 | * we can wait for that data without violating POSIX. | |
320 | */ | |
321 | if (ret) | |
322 | break; | |
323 | if (filp->f_flags & O_NONBLOCK) { | |
324 | ret = -EAGAIN; | |
325 | break; | |
326 | } | |
327 | } | |
328 | if (signal_pending(current)) { | |
329 | if (!ret) | |
330 | ret = -ERESTARTSYS; | |
331 | break; | |
332 | } | |
333 | if (do_wakeup) { | |
334 | wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); | |
335 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
336 | } | |
337 | pipe_wait(pipe); | |
338 | } | |
339 | __pipe_unlock(pipe); | |
340 | ||
341 | /* Signal writers asynchronously that there is more room. */ | |
342 | if (do_wakeup) { | |
343 | wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); | |
344 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
345 | } | |
346 | if (ret > 0) | |
347 | file_accessed(filp); | |
348 | return ret; | |
349 | } | |
350 | ||
351 | static inline int is_packetized(struct file *file) | |
352 | { | |
353 | return (file->f_flags & O_DIRECT) != 0; | |
354 | } | |
355 | ||
356 | static ssize_t | |
357 | pipe_write(struct kiocb *iocb, struct iov_iter *from) | |
358 | { | |
359 | struct file *filp = iocb->ki_filp; | |
360 | struct pipe_inode_info *pipe = filp->private_data; | |
361 | ssize_t ret = 0; | |
362 | int do_wakeup = 0; | |
363 | size_t total_len = iov_iter_count(from); | |
364 | ssize_t chars; | |
365 | ||
366 | /* Null write succeeds. */ | |
367 | if (unlikely(total_len == 0)) | |
368 | return 0; | |
369 | ||
370 | __pipe_lock(pipe); | |
371 | ||
372 | if (!pipe->readers) { | |
373 | send_sig(SIGPIPE, current, 0); | |
374 | ret = -EPIPE; | |
375 | goto out; | |
376 | } | |
377 | ||
378 | /* We try to merge small writes */ | |
379 | chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ | |
380 | if (pipe->nrbufs && chars != 0) { | |
381 | int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & | |
382 | (pipe->buffers - 1); | |
383 | struct pipe_buffer *buf = pipe->bufs + lastbuf; | |
384 | int offset = buf->offset + buf->len; | |
385 | ||
386 | if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) { | |
387 | ret = pipe_buf_confirm(pipe, buf); | |
388 | if (ret) | |
389 | goto out; | |
390 | ||
391 | ret = copy_page_from_iter(buf->page, offset, chars, from); | |
392 | if (unlikely(ret < chars)) { | |
393 | ret = -EFAULT; | |
394 | goto out; | |
395 | } | |
396 | do_wakeup = 1; | |
397 | buf->len += ret; | |
398 | if (!iov_iter_count(from)) | |
399 | goto out; | |
400 | } | |
401 | } | |
402 | ||
403 | for (;;) { | |
404 | int bufs; | |
405 | ||
406 | if (!pipe->readers) { | |
407 | send_sig(SIGPIPE, current, 0); | |
408 | if (!ret) | |
409 | ret = -EPIPE; | |
410 | break; | |
411 | } | |
412 | bufs = pipe->nrbufs; | |
413 | if (bufs < pipe->buffers) { | |
414 | int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); | |
415 | struct pipe_buffer *buf = pipe->bufs + newbuf; | |
416 | struct page *page = pipe->tmp_page; | |
417 | int copied; | |
418 | ||
419 | if (!page) { | |
420 | page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); | |
421 | if (unlikely(!page)) { | |
422 | ret = ret ? : -ENOMEM; | |
423 | break; | |
424 | } | |
425 | pipe->tmp_page = page; | |
426 | } | |
427 | /* Always wake up, even if the copy fails. Otherwise | |
428 | * we lock up (O_NONBLOCK-)readers that sleep due to | |
429 | * syscall merging. | |
430 | * FIXME! Is this really true? | |
431 | */ | |
432 | do_wakeup = 1; | |
433 | copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); | |
434 | if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { | |
435 | if (!ret) | |
436 | ret = -EFAULT; | |
437 | break; | |
438 | } | |
439 | ret += copied; | |
440 | ||
441 | /* Insert it into the buffer array */ | |
442 | buf->page = page; | |
443 | buf->ops = &anon_pipe_buf_ops; | |
444 | buf->offset = 0; | |
445 | buf->len = copied; | |
446 | buf->flags = 0; | |
447 | if (is_packetized(filp)) { | |
448 | buf->ops = &packet_pipe_buf_ops; | |
449 | buf->flags = PIPE_BUF_FLAG_PACKET; | |
450 | } | |
451 | pipe->nrbufs = ++bufs; | |
452 | pipe->tmp_page = NULL; | |
453 | ||
454 | if (!iov_iter_count(from)) | |
455 | break; | |
456 | } | |
457 | if (bufs < pipe->buffers) | |
458 | continue; | |
459 | if (filp->f_flags & O_NONBLOCK) { | |
460 | if (!ret) | |
461 | ret = -EAGAIN; | |
462 | break; | |
463 | } | |
464 | if (signal_pending(current)) { | |
465 | if (!ret) | |
466 | ret = -ERESTARTSYS; | |
467 | break; | |
468 | } | |
469 | if (do_wakeup) { | |
470 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); | |
471 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
472 | do_wakeup = 0; | |
473 | } | |
474 | pipe->waiting_writers++; | |
475 | pipe_wait(pipe); | |
476 | pipe->waiting_writers--; | |
477 | } | |
478 | out: | |
479 | __pipe_unlock(pipe); | |
480 | if (do_wakeup) { | |
481 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); | |
482 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
483 | } | |
484 | if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { | |
485 | int err = file_update_time(filp); | |
486 | if (err) | |
487 | ret = err; | |
488 | sb_end_write(file_inode(filp)->i_sb); | |
489 | } | |
490 | return ret; | |
491 | } | |
492 | ||
493 | static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
494 | { | |
495 | struct pipe_inode_info *pipe = filp->private_data; | |
496 | int count, buf, nrbufs; | |
497 | ||
498 | switch (cmd) { | |
499 | case FIONREAD: | |
500 | __pipe_lock(pipe); | |
501 | count = 0; | |
502 | buf = pipe->curbuf; | |
503 | nrbufs = pipe->nrbufs; | |
504 | while (--nrbufs >= 0) { | |
505 | count += pipe->bufs[buf].len; | |
506 | buf = (buf+1) & (pipe->buffers - 1); | |
507 | } | |
508 | __pipe_unlock(pipe); | |
509 | ||
510 | return put_user(count, (int __user *)arg); | |
511 | default: | |
512 | return -ENOIOCTLCMD; | |
513 | } | |
514 | } | |
515 | ||
516 | /* No kernel lock held - fine */ | |
517 | static unsigned int | |
518 | pipe_poll(struct file *filp, poll_table *wait) | |
519 | { | |
520 | unsigned int mask; | |
521 | struct pipe_inode_info *pipe = filp->private_data; | |
522 | int nrbufs; | |
523 | ||
524 | poll_wait(filp, &pipe->wait, wait); | |
525 | ||
526 | /* Reading only -- no need for acquiring the semaphore. */ | |
527 | nrbufs = pipe->nrbufs; | |
528 | mask = 0; | |
529 | if (filp->f_mode & FMODE_READ) { | |
530 | mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; | |
531 | if (!pipe->writers && filp->f_version != pipe->w_counter) | |
532 | mask |= POLLHUP; | |
533 | } | |
534 | ||
535 | if (filp->f_mode & FMODE_WRITE) { | |
536 | mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0; | |
537 | /* | |
538 | * Most Unices do not set POLLERR for FIFOs but on Linux they | |
539 | * behave exactly like pipes for poll(). | |
540 | */ | |
541 | if (!pipe->readers) | |
542 | mask |= POLLERR; | |
543 | } | |
544 | ||
545 | return mask; | |
546 | } | |
547 | ||
548 | static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) | |
549 | { | |
550 | int kill = 0; | |
551 | ||
552 | spin_lock(&inode->i_lock); | |
553 | if (!--pipe->files) { | |
554 | inode->i_pipe = NULL; | |
555 | kill = 1; | |
556 | } | |
557 | spin_unlock(&inode->i_lock); | |
558 | ||
559 | if (kill) | |
560 | free_pipe_info(pipe); | |
561 | } | |
562 | ||
563 | static int | |
564 | pipe_release(struct inode *inode, struct file *file) | |
565 | { | |
566 | struct pipe_inode_info *pipe = file->private_data; | |
567 | ||
568 | __pipe_lock(pipe); | |
569 | if (file->f_mode & FMODE_READ) | |
570 | pipe->readers--; | |
571 | if (file->f_mode & FMODE_WRITE) | |
572 | pipe->writers--; | |
573 | ||
574 | if (pipe->readers || pipe->writers) { | |
575 | wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); | |
576 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
577 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
578 | } | |
579 | __pipe_unlock(pipe); | |
580 | ||
581 | put_pipe_info(inode, pipe); | |
582 | return 0; | |
583 | } | |
584 | ||
585 | static int | |
586 | pipe_fasync(int fd, struct file *filp, int on) | |
587 | { | |
588 | struct pipe_inode_info *pipe = filp->private_data; | |
589 | int retval = 0; | |
590 | ||
591 | __pipe_lock(pipe); | |
592 | if (filp->f_mode & FMODE_READ) | |
593 | retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); | |
594 | if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { | |
595 | retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); | |
596 | if (retval < 0 && (filp->f_mode & FMODE_READ)) | |
597 | /* this can happen only if on == T */ | |
598 | fasync_helper(-1, filp, 0, &pipe->fasync_readers); | |
599 | } | |
600 | __pipe_unlock(pipe); | |
601 | return retval; | |
602 | } | |
603 | ||
604 | static unsigned long account_pipe_buffers(struct user_struct *user, | |
605 | unsigned long old, unsigned long new) | |
606 | { | |
607 | return atomic_long_add_return(new - old, &user->pipe_bufs); | |
608 | } | |
609 | ||
610 | static bool too_many_pipe_buffers_soft(unsigned long user_bufs) | |
611 | { | |
612 | return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft; | |
613 | } | |
614 | ||
615 | static bool too_many_pipe_buffers_hard(unsigned long user_bufs) | |
616 | { | |
617 | return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; | |
618 | } | |
619 | ||
620 | struct pipe_inode_info *alloc_pipe_info(void) | |
621 | { | |
622 | struct pipe_inode_info *pipe; | |
623 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; | |
624 | struct user_struct *user = get_current_user(); | |
625 | unsigned long user_bufs; | |
626 | ||
627 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); | |
628 | if (pipe == NULL) | |
629 | goto out_free_uid; | |
630 | ||
631 | if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | |
632 | pipe_bufs = pipe_max_size >> PAGE_SHIFT; | |
633 | ||
634 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); | |
635 | ||
636 | if (too_many_pipe_buffers_soft(user_bufs)) { | |
637 | user_bufs = account_pipe_buffers(user, pipe_bufs, 1); | |
638 | pipe_bufs = 1; | |
639 | } | |
640 | ||
641 | if (too_many_pipe_buffers_hard(user_bufs)) | |
642 | goto out_revert_acct; | |
643 | ||
644 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), | |
645 | GFP_KERNEL_ACCOUNT); | |
646 | ||
647 | if (pipe->bufs) { | |
648 | init_waitqueue_head(&pipe->wait); | |
649 | pipe->r_counter = pipe->w_counter = 1; | |
650 | pipe->buffers = pipe_bufs; | |
651 | pipe->user = user; | |
652 | mutex_init(&pipe->mutex); | |
653 | return pipe; | |
654 | } | |
655 | ||
656 | out_revert_acct: | |
657 | (void) account_pipe_buffers(user, pipe_bufs, 0); | |
658 | kfree(pipe); | |
659 | out_free_uid: | |
660 | free_uid(user); | |
661 | return NULL; | |
662 | } | |
663 | ||
664 | void free_pipe_info(struct pipe_inode_info *pipe) | |
665 | { | |
666 | int i; | |
667 | ||
668 | (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); | |
669 | free_uid(pipe->user); | |
670 | for (i = 0; i < pipe->buffers; i++) { | |
671 | struct pipe_buffer *buf = pipe->bufs + i; | |
672 | if (buf->ops) | |
673 | pipe_buf_release(pipe, buf); | |
674 | } | |
675 | if (pipe->tmp_page) | |
676 | __free_page(pipe->tmp_page); | |
677 | kfree(pipe->bufs); | |
678 | kfree(pipe); | |
679 | } | |
680 | ||
681 | static struct vfsmount *pipe_mnt __read_mostly; | |
682 | ||
683 | /* | |
684 | * pipefs_dname() is called from d_path(). | |
685 | */ | |
686 | static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) | |
687 | { | |
688 | return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", | |
689 | d_inode(dentry)->i_ino); | |
690 | } | |
691 | ||
692 | static const struct dentry_operations pipefs_dentry_operations = { | |
693 | .d_dname = pipefs_dname, | |
694 | }; | |
695 | ||
696 | static struct inode * get_pipe_inode(void) | |
697 | { | |
698 | struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); | |
699 | struct pipe_inode_info *pipe; | |
700 | ||
701 | if (!inode) | |
702 | goto fail_inode; | |
703 | ||
704 | inode->i_ino = get_next_ino(); | |
705 | ||
706 | pipe = alloc_pipe_info(); | |
707 | if (!pipe) | |
708 | goto fail_iput; | |
709 | ||
710 | inode->i_pipe = pipe; | |
711 | pipe->files = 2; | |
712 | pipe->readers = pipe->writers = 1; | |
713 | inode->i_fop = &pipefifo_fops; | |
714 | ||
715 | /* | |
716 | * Mark the inode dirty from the very beginning, | |
717 | * that way it will never be moved to the dirty | |
718 | * list because "mark_inode_dirty()" will think | |
719 | * that it already _is_ on the dirty list. | |
720 | */ | |
721 | inode->i_state = I_DIRTY; | |
722 | inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; | |
723 | inode->i_uid = current_fsuid(); | |
724 | inode->i_gid = current_fsgid(); | |
725 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); | |
726 | ||
727 | return inode; | |
728 | ||
729 | fail_iput: | |
730 | iput(inode); | |
731 | ||
732 | fail_inode: | |
733 | return NULL; | |
734 | } | |
735 | ||
736 | int create_pipe_files(struct file **res, int flags) | |
737 | { | |
738 | int err; | |
739 | struct inode *inode = get_pipe_inode(); | |
740 | struct file *f; | |
741 | struct path path; | |
742 | static struct qstr name = { .name = "" }; | |
743 | ||
744 | if (!inode) | |
745 | return -ENFILE; | |
746 | ||
747 | err = -ENOMEM; | |
748 | path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name); | |
749 | if (!path.dentry) | |
750 | goto err_inode; | |
751 | path.mnt = mntget(pipe_mnt); | |
752 | ||
753 | d_instantiate(path.dentry, inode); | |
754 | ||
755 | f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops); | |
756 | if (IS_ERR(f)) { | |
757 | err = PTR_ERR(f); | |
758 | goto err_dentry; | |
759 | } | |
760 | ||
761 | f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); | |
762 | f->private_data = inode->i_pipe; | |
763 | ||
764 | res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops); | |
765 | if (IS_ERR(res[0])) { | |
766 | err = PTR_ERR(res[0]); | |
767 | goto err_file; | |
768 | } | |
769 | ||
770 | path_get(&path); | |
771 | res[0]->private_data = inode->i_pipe; | |
772 | res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); | |
773 | res[1] = f; | |
774 | return 0; | |
775 | ||
776 | err_file: | |
777 | put_filp(f); | |
778 | err_dentry: | |
779 | free_pipe_info(inode->i_pipe); | |
780 | path_put(&path); | |
781 | return err; | |
782 | ||
783 | err_inode: | |
784 | free_pipe_info(inode->i_pipe); | |
785 | iput(inode); | |
786 | return err; | |
787 | } | |
788 | ||
789 | static int __do_pipe_flags(int *fd, struct file **files, int flags) | |
790 | { | |
791 | int error; | |
792 | int fdw, fdr; | |
793 | ||
794 | if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) | |
795 | return -EINVAL; | |
796 | ||
797 | error = create_pipe_files(files, flags); | |
798 | if (error) | |
799 | return error; | |
800 | ||
801 | error = get_unused_fd_flags(flags); | |
802 | if (error < 0) | |
803 | goto err_read_pipe; | |
804 | fdr = error; | |
805 | ||
806 | error = get_unused_fd_flags(flags); | |
807 | if (error < 0) | |
808 | goto err_fdr; | |
809 | fdw = error; | |
810 | ||
811 | audit_fd_pair(fdr, fdw); | |
812 | fd[0] = fdr; | |
813 | fd[1] = fdw; | |
814 | return 0; | |
815 | ||
816 | err_fdr: | |
817 | put_unused_fd(fdr); | |
818 | err_read_pipe: | |
819 | fput(files[0]); | |
820 | fput(files[1]); | |
821 | return error; | |
822 | } | |
823 | ||
824 | int do_pipe_flags(int *fd, int flags) | |
825 | { | |
826 | struct file *files[2]; | |
827 | int error = __do_pipe_flags(fd, files, flags); | |
828 | if (!error) { | |
829 | fd_install(fd[0], files[0]); | |
830 | fd_install(fd[1], files[1]); | |
831 | } | |
832 | return error; | |
833 | } | |
834 | ||
835 | /* | |
836 | * sys_pipe() is the normal C calling standard for creating | |
837 | * a pipe. It's not the way Unix traditionally does this, though. | |
838 | */ | |
839 | SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) | |
840 | { | |
841 | struct file *files[2]; | |
842 | int fd[2]; | |
843 | int error; | |
844 | ||
845 | error = __do_pipe_flags(fd, files, flags); | |
846 | if (!error) { | |
847 | if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { | |
848 | fput(files[0]); | |
849 | fput(files[1]); | |
850 | put_unused_fd(fd[0]); | |
851 | put_unused_fd(fd[1]); | |
852 | error = -EFAULT; | |
853 | } else { | |
854 | fd_install(fd[0], files[0]); | |
855 | fd_install(fd[1], files[1]); | |
856 | } | |
857 | } | |
858 | return error; | |
859 | } | |
860 | ||
861 | SYSCALL_DEFINE1(pipe, int __user *, fildes) | |
862 | { | |
863 | return sys_pipe2(fildes, 0); | |
864 | } | |
865 | ||
866 | static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) | |
867 | { | |
868 | int cur = *cnt; | |
869 | ||
870 | while (cur == *cnt) { | |
871 | pipe_wait(pipe); | |
872 | if (signal_pending(current)) | |
873 | break; | |
874 | } | |
875 | return cur == *cnt ? -ERESTARTSYS : 0; | |
876 | } | |
877 | ||
878 | static void wake_up_partner(struct pipe_inode_info *pipe) | |
879 | { | |
880 | wake_up_interruptible(&pipe->wait); | |
881 | } | |
882 | ||
883 | static int fifo_open(struct inode *inode, struct file *filp) | |
884 | { | |
885 | struct pipe_inode_info *pipe; | |
886 | bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; | |
887 | int ret; | |
888 | ||
889 | filp->f_version = 0; | |
890 | ||
891 | spin_lock(&inode->i_lock); | |
892 | if (inode->i_pipe) { | |
893 | pipe = inode->i_pipe; | |
894 | pipe->files++; | |
895 | spin_unlock(&inode->i_lock); | |
896 | } else { | |
897 | spin_unlock(&inode->i_lock); | |
898 | pipe = alloc_pipe_info(); | |
899 | if (!pipe) | |
900 | return -ENOMEM; | |
901 | pipe->files = 1; | |
902 | spin_lock(&inode->i_lock); | |
903 | if (unlikely(inode->i_pipe)) { | |
904 | inode->i_pipe->files++; | |
905 | spin_unlock(&inode->i_lock); | |
906 | free_pipe_info(pipe); | |
907 | pipe = inode->i_pipe; | |
908 | } else { | |
909 | inode->i_pipe = pipe; | |
910 | spin_unlock(&inode->i_lock); | |
911 | } | |
912 | } | |
913 | filp->private_data = pipe; | |
914 | /* OK, we have a pipe and it's pinned down */ | |
915 | ||
916 | __pipe_lock(pipe); | |
917 | ||
918 | /* We can only do regular read/write on fifos */ | |
919 | filp->f_mode &= (FMODE_READ | FMODE_WRITE); | |
920 | ||
921 | switch (filp->f_mode) { | |
922 | case FMODE_READ: | |
923 | /* | |
924 | * O_RDONLY | |
925 | * POSIX.1 says that O_NONBLOCK means return with the FIFO | |
926 | * opened, even when there is no process writing the FIFO. | |
927 | */ | |
928 | pipe->r_counter++; | |
929 | if (pipe->readers++ == 0) | |
930 | wake_up_partner(pipe); | |
931 | ||
932 | if (!is_pipe && !pipe->writers) { | |
933 | if ((filp->f_flags & O_NONBLOCK)) { | |
934 | /* suppress POLLHUP until we have | |
935 | * seen a writer */ | |
936 | filp->f_version = pipe->w_counter; | |
937 | } else { | |
938 | if (wait_for_partner(pipe, &pipe->w_counter)) | |
939 | goto err_rd; | |
940 | } | |
941 | } | |
942 | break; | |
943 | ||
944 | case FMODE_WRITE: | |
945 | /* | |
946 | * O_WRONLY | |
947 | * POSIX.1 says that O_NONBLOCK means return -1 with | |
948 | * errno=ENXIO when there is no process reading the FIFO. | |
949 | */ | |
950 | ret = -ENXIO; | |
951 | if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) | |
952 | goto err; | |
953 | ||
954 | pipe->w_counter++; | |
955 | if (!pipe->writers++) | |
956 | wake_up_partner(pipe); | |
957 | ||
958 | if (!is_pipe && !pipe->readers) { | |
959 | if (wait_for_partner(pipe, &pipe->r_counter)) | |
960 | goto err_wr; | |
961 | } | |
962 | break; | |
963 | ||
964 | case FMODE_READ | FMODE_WRITE: | |
965 | /* | |
966 | * O_RDWR | |
967 | * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. | |
968 | * This implementation will NEVER block on a O_RDWR open, since | |
969 | * the process can at least talk to itself. | |
970 | */ | |
971 | ||
972 | pipe->readers++; | |
973 | pipe->writers++; | |
974 | pipe->r_counter++; | |
975 | pipe->w_counter++; | |
976 | if (pipe->readers == 1 || pipe->writers == 1) | |
977 | wake_up_partner(pipe); | |
978 | break; | |
979 | ||
980 | default: | |
981 | ret = -EINVAL; | |
982 | goto err; | |
983 | } | |
984 | ||
985 | /* Ok! */ | |
986 | __pipe_unlock(pipe); | |
987 | return 0; | |
988 | ||
989 | err_rd: | |
990 | if (!--pipe->readers) | |
991 | wake_up_interruptible(&pipe->wait); | |
992 | ret = -ERESTARTSYS; | |
993 | goto err; | |
994 | ||
995 | err_wr: | |
996 | if (!--pipe->writers) | |
997 | wake_up_interruptible(&pipe->wait); | |
998 | ret = -ERESTARTSYS; | |
999 | goto err; | |
1000 | ||
1001 | err: | |
1002 | __pipe_unlock(pipe); | |
1003 | ||
1004 | put_pipe_info(inode, pipe); | |
1005 | return ret; | |
1006 | } | |
1007 | ||
1008 | const struct file_operations pipefifo_fops = { | |
1009 | .open = fifo_open, | |
1010 | .llseek = no_llseek, | |
1011 | .read_iter = pipe_read, | |
1012 | .write_iter = pipe_write, | |
1013 | .poll = pipe_poll, | |
1014 | .unlocked_ioctl = pipe_ioctl, | |
1015 | .release = pipe_release, | |
1016 | .fasync = pipe_fasync, | |
1017 | }; | |
1018 | ||
1019 | /* | |
1020 | * Currently we rely on the pipe array holding a power-of-2 number | |
1021 | * of pages. | |
1022 | */ | |
1023 | static inline unsigned int round_pipe_size(unsigned int size) | |
1024 | { | |
1025 | unsigned long nr_pages; | |
1026 | ||
1027 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1028 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * Allocate a new array of pipe buffers and copy the info over. Returns the | |
1033 | * pipe size if successful, or return -ERROR on error. | |
1034 | */ | |
1035 | static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) | |
1036 | { | |
1037 | struct pipe_buffer *bufs; | |
1038 | unsigned int size, nr_pages; | |
1039 | unsigned long user_bufs; | |
1040 | long ret = 0; | |
1041 | ||
1042 | size = round_pipe_size(arg); | |
1043 | nr_pages = size >> PAGE_SHIFT; | |
1044 | ||
1045 | if (!nr_pages) | |
1046 | return -EINVAL; | |
1047 | ||
1048 | /* | |
1049 | * If trying to increase the pipe capacity, check that an | |
1050 | * unprivileged user is not trying to exceed various limits | |
1051 | * (soft limit check here, hard limit check just below). | |
1052 | * Decreasing the pipe capacity is always permitted, even | |
1053 | * if the user is currently over a limit. | |
1054 | */ | |
1055 | if (nr_pages > pipe->buffers && | |
1056 | size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | |
1057 | return -EPERM; | |
1058 | ||
1059 | user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); | |
1060 | ||
1061 | if (nr_pages > pipe->buffers && | |
1062 | (too_many_pipe_buffers_hard(user_bufs) || | |
1063 | too_many_pipe_buffers_soft(user_bufs)) && | |
1064 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { | |
1065 | ret = -EPERM; | |
1066 | goto out_revert_acct; | |
1067 | } | |
1068 | ||
1069 | /* | |
1070 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't | |
1071 | * expect a lot of shrink+grow operations, just free and allocate | |
1072 | * again like we would do for growing. If the pipe currently | |
1073 | * contains more buffers than arg, then return busy. | |
1074 | */ | |
1075 | if (nr_pages < pipe->nrbufs) { | |
1076 | ret = -EBUSY; | |
1077 | goto out_revert_acct; | |
1078 | } | |
1079 | ||
1080 | bufs = kcalloc(nr_pages, sizeof(*bufs), | |
1081 | GFP_KERNEL_ACCOUNT | __GFP_NOWARN); | |
1082 | if (unlikely(!bufs)) { | |
1083 | ret = -ENOMEM; | |
1084 | goto out_revert_acct; | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * The pipe array wraps around, so just start the new one at zero | |
1089 | * and adjust the indexes. | |
1090 | */ | |
1091 | if (pipe->nrbufs) { | |
1092 | unsigned int tail; | |
1093 | unsigned int head; | |
1094 | ||
1095 | tail = pipe->curbuf + pipe->nrbufs; | |
1096 | if (tail < pipe->buffers) | |
1097 | tail = 0; | |
1098 | else | |
1099 | tail &= (pipe->buffers - 1); | |
1100 | ||
1101 | head = pipe->nrbufs - tail; | |
1102 | if (head) | |
1103 | memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); | |
1104 | if (tail) | |
1105 | memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); | |
1106 | } | |
1107 | ||
1108 | pipe->curbuf = 0; | |
1109 | kfree(pipe->bufs); | |
1110 | pipe->bufs = bufs; | |
1111 | pipe->buffers = nr_pages; | |
1112 | return nr_pages * PAGE_SIZE; | |
1113 | ||
1114 | out_revert_acct: | |
1115 | (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); | |
1116 | return ret; | |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax | |
1121 | * will return an error. | |
1122 | */ | |
1123 | int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, | |
1124 | size_t *lenp, loff_t *ppos) | |
1125 | { | |
1126 | int ret; | |
1127 | ||
1128 | ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); | |
1129 | if (ret < 0 || !write) | |
1130 | return ret; | |
1131 | ||
1132 | pipe_max_size = round_pipe_size(pipe_max_size); | |
1133 | return ret; | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same | |
1138 | * location, so checking ->i_pipe is not enough to verify that this is a | |
1139 | * pipe. | |
1140 | */ | |
1141 | struct pipe_inode_info *get_pipe_info(struct file *file) | |
1142 | { | |
1143 | return file->f_op == &pipefifo_fops ? file->private_data : NULL; | |
1144 | } | |
1145 | ||
1146 | long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) | |
1147 | { | |
1148 | struct pipe_inode_info *pipe; | |
1149 | long ret; | |
1150 | ||
1151 | pipe = get_pipe_info(file); | |
1152 | if (!pipe) | |
1153 | return -EBADF; | |
1154 | ||
1155 | __pipe_lock(pipe); | |
1156 | ||
1157 | switch (cmd) { | |
1158 | case F_SETPIPE_SZ: | |
1159 | ret = pipe_set_size(pipe, arg); | |
1160 | break; | |
1161 | case F_GETPIPE_SZ: | |
1162 | ret = pipe->buffers * PAGE_SIZE; | |
1163 | break; | |
1164 | default: | |
1165 | ret = -EINVAL; | |
1166 | break; | |
1167 | } | |
1168 | ||
1169 | __pipe_unlock(pipe); | |
1170 | return ret; | |
1171 | } | |
1172 | ||
1173 | static const struct super_operations pipefs_ops = { | |
1174 | .destroy_inode = free_inode_nonrcu, | |
1175 | .statfs = simple_statfs, | |
1176 | }; | |
1177 | ||
1178 | /* | |
1179 | * pipefs should _never_ be mounted by userland - too much of security hassle, | |
1180 | * no real gain from having the whole whorehouse mounted. So we don't need | |
1181 | * any operations on the root directory. However, we need a non-trivial | |
1182 | * d_name - pipe: will go nicely and kill the special-casing in procfs. | |
1183 | */ | |
1184 | static struct dentry *pipefs_mount(struct file_system_type *fs_type, | |
1185 | int flags, const char *dev_name, void *data) | |
1186 | { | |
1187 | return mount_pseudo(fs_type, "pipe:", &pipefs_ops, | |
1188 | &pipefs_dentry_operations, PIPEFS_MAGIC); | |
1189 | } | |
1190 | ||
1191 | static struct file_system_type pipe_fs_type = { | |
1192 | .name = "pipefs", | |
1193 | .mount = pipefs_mount, | |
1194 | .kill_sb = kill_anon_super, | |
1195 | }; | |
1196 | ||
1197 | static int __init init_pipe_fs(void) | |
1198 | { | |
1199 | int err = register_filesystem(&pipe_fs_type); | |
1200 | ||
1201 | if (!err) { | |
1202 | pipe_mnt = kern_mount(&pipe_fs_type); | |
1203 | if (IS_ERR(pipe_mnt)) { | |
1204 | err = PTR_ERR(pipe_mnt); | |
1205 | unregister_filesystem(&pipe_fs_type); | |
1206 | } | |
1207 | } | |
1208 | return err; | |
1209 | } | |
1210 | ||
1211 | fs_initcall(init_pipe_fs); |