]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/fs/pipe.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1999 Linus Torvalds | |
6 | */ | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/file.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/fs.h> | |
35f3d14d | 15 | #include <linux/log2.h> |
1da177e4 | 16 | #include <linux/mount.h> |
4fa7ec5d | 17 | #include <linux/pseudo_fs.h> |
b502bd11 | 18 | #include <linux/magic.h> |
1da177e4 LT |
19 | #include <linux/pipe_fs_i.h> |
20 | #include <linux/uio.h> | |
21 | #include <linux/highmem.h> | |
5274f052 | 22 | #include <linux/pagemap.h> |
db349509 | 23 | #include <linux/audit.h> |
ba719bae | 24 | #include <linux/syscalls.h> |
b492e95b | 25 | #include <linux/fcntl.h> |
d86133bd | 26 | #include <linux/memcontrol.h> |
c73be61c | 27 | #include <linux/watch_queue.h> |
1da177e4 | 28 | |
7c0f6ba6 | 29 | #include <linux/uaccess.h> |
1da177e4 LT |
30 | #include <asm/ioctls.h> |
31 | ||
599a0ac1 AV |
32 | #include "internal.h" |
33 | ||
46c4c9d1 AXH |
34 | /* |
35 | * New pipe buffers will be restricted to this size while the user is exceeding | |
36 | * their pipe buffer quota. The general pipe use case needs at least two | |
37 | * buffers: one for data yet to be read, and one for new data. If this is less | |
38 | * than two, then a write to a non-empty pipe may block even if the pipe is not | |
39 | * full. This can occur with GNU make jobserver or similar uses of pipes as | |
40 | * semaphores: multiple processes may be waiting to write tokens back to the | |
41 | * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. | |
42 | * | |
43 | * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their | |
44 | * own risk, namely: pipe writes to non-full pipes may block until the pipe is | |
45 | * emptied. | |
46 | */ | |
47 | #define PIPE_MIN_DEF_BUFFERS 2 | |
48 | ||
b492e95b JA |
49 | /* |
50 | * The max size that a non-root user is allowed to grow the pipe. Can | |
ff9da691 | 51 | * be set by root in /proc/sys/fs/pipe-max-size |
b492e95b | 52 | */ |
ff9da691 JA |
53 | unsigned int pipe_max_size = 1048576; |
54 | ||
759c0114 WT |
55 | /* Maximum allocatable pages per user. Hard limit is unset by default, soft |
56 | * matches default values. | |
57 | */ | |
58 | unsigned long pipe_user_pages_hard; | |
59 | unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; | |
60 | ||
1da177e4 | 61 | /* |
8cefc107 DH |
62 | * We use head and tail indices that aren't masked off, except at the point of |
63 | * dereference, but rather they're allowed to wrap naturally. This means there | |
64 | * isn't a dead spot in the buffer, but the ring has to be a power of two and | |
65 | * <= 2^31. | |
66 | * -- David Howells 2019-09-23. | |
67 | * | |
1da177e4 LT |
68 | * Reads with count = 0 should always return 0. |
69 | * -- Julian Bradfield 1999-06-07. | |
70 | * | |
71 | * FIFOs and Pipes now generate SIGIO for both readers and writers. | |
72 | * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 | |
73 | * | |
74 | * pipe_read & write cleanup | |
75 | * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 | |
76 | */ | |
77 | ||
61e0d47c MS |
78 | static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) |
79 | { | |
6447a3cf | 80 | if (pipe->files) |
72b0d9aa | 81 | mutex_lock_nested(&pipe->mutex, subclass); |
61e0d47c MS |
82 | } |
83 | ||
84 | void pipe_lock(struct pipe_inode_info *pipe) | |
85 | { | |
86 | /* | |
87 | * pipe_lock() nests non-pipe inode locks (for writing to a file) | |
88 | */ | |
89 | pipe_lock_nested(pipe, I_MUTEX_PARENT); | |
90 | } | |
91 | EXPORT_SYMBOL(pipe_lock); | |
92 | ||
93 | void pipe_unlock(struct pipe_inode_info *pipe) | |
94 | { | |
6447a3cf | 95 | if (pipe->files) |
72b0d9aa | 96 | mutex_unlock(&pipe->mutex); |
61e0d47c MS |
97 | } |
98 | EXPORT_SYMBOL(pipe_unlock); | |
99 | ||
ebec73f4 AV |
100 | static inline void __pipe_lock(struct pipe_inode_info *pipe) |
101 | { | |
102 | mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); | |
103 | } | |
104 | ||
105 | static inline void __pipe_unlock(struct pipe_inode_info *pipe) | |
106 | { | |
107 | mutex_unlock(&pipe->mutex); | |
108 | } | |
109 | ||
61e0d47c MS |
110 | void pipe_double_lock(struct pipe_inode_info *pipe1, |
111 | struct pipe_inode_info *pipe2) | |
112 | { | |
113 | BUG_ON(pipe1 == pipe2); | |
114 | ||
115 | if (pipe1 < pipe2) { | |
116 | pipe_lock_nested(pipe1, I_MUTEX_PARENT); | |
117 | pipe_lock_nested(pipe2, I_MUTEX_CHILD); | |
118 | } else { | |
023d43c7 PZ |
119 | pipe_lock_nested(pipe2, I_MUTEX_PARENT); |
120 | pipe_lock_nested(pipe1, I_MUTEX_CHILD); | |
61e0d47c MS |
121 | } |
122 | } | |
123 | ||
341b446b IM |
124 | static void anon_pipe_buf_release(struct pipe_inode_info *pipe, |
125 | struct pipe_buffer *buf) | |
1da177e4 LT |
126 | { |
127 | struct page *page = buf->page; | |
128 | ||
5274f052 JA |
129 | /* |
130 | * If nobody else uses this page, and we don't already have a | |
131 | * temporary page, let's keep track of it as a one-deep | |
341b446b | 132 | * allocation cache. (Otherwise just release our reference to it) |
5274f052 | 133 | */ |
341b446b | 134 | if (page_count(page) == 1 && !pipe->tmp_page) |
923f4f23 | 135 | pipe->tmp_page = page; |
341b446b | 136 | else |
09cbfeaf | 137 | put_page(page); |
1da177e4 LT |
138 | } |
139 | ||
c928f642 CH |
140 | static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe, |
141 | struct pipe_buffer *buf) | |
d86133bd VD |
142 | { |
143 | struct page *page = buf->page; | |
144 | ||
c928f642 CH |
145 | if (page_count(page) != 1) |
146 | return false; | |
147 | memcg_kmem_uncharge_page(page, 0); | |
148 | __SetPageLocked(page); | |
149 | return true; | |
d86133bd VD |
150 | } |
151 | ||
0845718d | 152 | /** |
c928f642 | 153 | * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer |
0845718d JA |
154 | * @pipe: the pipe that the buffer belongs to |
155 | * @buf: the buffer to attempt to steal | |
156 | * | |
157 | * Description: | |
b51d63c6 | 158 | * This function attempts to steal the &struct page attached to |
0845718d JA |
159 | * @buf. If successful, this function returns 0 and returns with |
160 | * the page locked. The caller may then reuse the page for whatever | |
b51d63c6 | 161 | * he wishes; the typical use is insertion into a different file |
0845718d JA |
162 | * page cache. |
163 | */ | |
c928f642 CH |
164 | bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe, |
165 | struct pipe_buffer *buf) | |
5abc97aa | 166 | { |
46e678c9 JA |
167 | struct page *page = buf->page; |
168 | ||
0845718d JA |
169 | /* |
170 | * A reference of one is golden, that means that the owner of this | |
171 | * page is the only one holding a reference to it. lock the page | |
172 | * and return OK. | |
173 | */ | |
46e678c9 | 174 | if (page_count(page) == 1) { |
46e678c9 | 175 | lock_page(page); |
c928f642 | 176 | return true; |
46e678c9 | 177 | } |
c928f642 | 178 | return false; |
5abc97aa | 179 | } |
c928f642 | 180 | EXPORT_SYMBOL(generic_pipe_buf_try_steal); |
5abc97aa | 181 | |
0845718d | 182 | /** |
b51d63c6 | 183 | * generic_pipe_buf_get - get a reference to a &struct pipe_buffer |
0845718d JA |
184 | * @pipe: the pipe that the buffer belongs to |
185 | * @buf: the buffer to get a reference to | |
186 | * | |
187 | * Description: | |
188 | * This function grabs an extra reference to @buf. It's used in | |
3d742d4b | 189 | * the tee() system call, when we duplicate the buffers in one |
0845718d JA |
190 | * pipe into another. |
191 | */ | |
15fab63e | 192 | bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) |
70524490 | 193 | { |
cd1adf1b | 194 | return try_get_page(buf->page); |
70524490 | 195 | } |
51921cb7 | 196 | EXPORT_SYMBOL(generic_pipe_buf_get); |
70524490 | 197 | |
6818173b MS |
198 | /** |
199 | * generic_pipe_buf_release - put a reference to a &struct pipe_buffer | |
200 | * @pipe: the pipe that the buffer belongs to | |
201 | * @buf: the buffer to put a reference to | |
202 | * | |
203 | * Description: | |
204 | * This function releases a reference to @buf. | |
205 | */ | |
206 | void generic_pipe_buf_release(struct pipe_inode_info *pipe, | |
207 | struct pipe_buffer *buf) | |
208 | { | |
09cbfeaf | 209 | put_page(buf->page); |
6818173b | 210 | } |
51921cb7 | 211 | EXPORT_SYMBOL(generic_pipe_buf_release); |
6818173b | 212 | |
d4c3cca9 | 213 | static const struct pipe_buf_operations anon_pipe_buf_ops = { |
c928f642 CH |
214 | .release = anon_pipe_buf_release, |
215 | .try_steal = anon_pipe_buf_try_steal, | |
216 | .get = generic_pipe_buf_get, | |
1da177e4 LT |
217 | }; |
218 | ||
85190d15 LT |
219 | /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ |
220 | static inline bool pipe_readable(const struct pipe_inode_info *pipe) | |
221 | { | |
222 | unsigned int head = READ_ONCE(pipe->head); | |
223 | unsigned int tail = READ_ONCE(pipe->tail); | |
224 | unsigned int writers = READ_ONCE(pipe->writers); | |
225 | ||
226 | return !pipe_empty(head, tail) || !writers; | |
227 | } | |
228 | ||
1da177e4 | 229 | static ssize_t |
fb9096a3 | 230 | pipe_read(struct kiocb *iocb, struct iov_iter *to) |
1da177e4 | 231 | { |
fb9096a3 | 232 | size_t total_len = iov_iter_count(to); |
ee0b3e67 | 233 | struct file *filp = iocb->ki_filp; |
de32ec4c | 234 | struct pipe_inode_info *pipe = filp->private_data; |
0ddad21d | 235 | bool was_full, wake_next_reader = false; |
1da177e4 | 236 | ssize_t ret; |
1da177e4 | 237 | |
1da177e4 LT |
238 | /* Null read succeeds. */ |
239 | if (unlikely(total_len == 0)) | |
240 | return 0; | |
241 | ||
1da177e4 | 242 | ret = 0; |
ebec73f4 | 243 | __pipe_lock(pipe); |
f467a6a6 LT |
244 | |
245 | /* | |
246 | * We only wake up writers if the pipe was full when we started | |
247 | * reading in order to avoid unnecessary wakeups. | |
248 | * | |
249 | * But when we do wake up writers, we do so using a sync wakeup | |
250 | * (WF_SYNC), because we want them to get going and generate more | |
251 | * data for us. | |
252 | */ | |
253 | was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); | |
1da177e4 | 254 | for (;;) { |
d3fc643a DH |
255 | /* Read ->head with a barrier vs post_one_notification() */ |
256 | unsigned int head = smp_load_acquire(&pipe->head); | |
8cefc107 DH |
257 | unsigned int tail = pipe->tail; |
258 | unsigned int mask = pipe->ring_size - 1; | |
259 | ||
e7d553d6 DH |
260 | #ifdef CONFIG_WATCH_QUEUE |
261 | if (pipe->note_loss) { | |
262 | struct watch_notification n; | |
263 | ||
264 | if (total_len < 8) { | |
265 | if (ret == 0) | |
266 | ret = -ENOBUFS; | |
267 | break; | |
268 | } | |
269 | ||
270 | n.type = WATCH_TYPE_META; | |
271 | n.subtype = WATCH_META_LOSS_NOTIFICATION; | |
272 | n.info = watch_sizeof(n); | |
273 | if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) { | |
274 | if (ret == 0) | |
275 | ret = -EFAULT; | |
276 | break; | |
277 | } | |
278 | ret += sizeof(n); | |
279 | total_len -= sizeof(n); | |
280 | pipe->note_loss = false; | |
281 | } | |
282 | #endif | |
283 | ||
8cefc107 DH |
284 | if (!pipe_empty(head, tail)) { |
285 | struct pipe_buffer *buf = &pipe->bufs[tail & mask]; | |
1da177e4 | 286 | size_t chars = buf->len; |
637b58c2 AV |
287 | size_t written; |
288 | int error; | |
1da177e4 | 289 | |
8cfba763 DH |
290 | if (chars > total_len) { |
291 | if (buf->flags & PIPE_BUF_FLAG_WHOLE) { | |
292 | if (ret == 0) | |
293 | ret = -ENOBUFS; | |
294 | break; | |
295 | } | |
1da177e4 | 296 | chars = total_len; |
8cfba763 | 297 | } |
1da177e4 | 298 | |
fba597db | 299 | error = pipe_buf_confirm(pipe, buf); |
f84d7519 | 300 | if (error) { |
5274f052 | 301 | if (!ret) |
e5953cbd | 302 | ret = error; |
5274f052 JA |
303 | break; |
304 | } | |
f84d7519 | 305 | |
fb9096a3 | 306 | written = copy_page_to_iter(buf->page, buf->offset, chars, to); |
637b58c2 | 307 | if (unlikely(written < chars)) { |
341b446b | 308 | if (!ret) |
637b58c2 | 309 | ret = -EFAULT; |
1da177e4 LT |
310 | break; |
311 | } | |
312 | ret += chars; | |
313 | buf->offset += chars; | |
314 | buf->len -= chars; | |
9883035a LT |
315 | |
316 | /* Was it a packet buffer? Clean up and exit */ | |
317 | if (buf->flags & PIPE_BUF_FLAG_PACKET) { | |
318 | total_len = chars; | |
319 | buf->len = 0; | |
320 | } | |
321 | ||
1da177e4 | 322 | if (!buf->len) { |
a779638c | 323 | pipe_buf_release(pipe, buf); |
0ddad21d | 324 | spin_lock_irq(&pipe->rd_wait.lock); |
e7d553d6 DH |
325 | #ifdef CONFIG_WATCH_QUEUE |
326 | if (buf->flags & PIPE_BUF_FLAG_LOSS) | |
327 | pipe->note_loss = true; | |
328 | #endif | |
8cefc107 DH |
329 | tail++; |
330 | pipe->tail = tail; | |
0ddad21d | 331 | spin_unlock_irq(&pipe->rd_wait.lock); |
1da177e4 LT |
332 | } |
333 | total_len -= chars; | |
334 | if (!total_len) | |
335 | break; /* common path: read succeeded */ | |
8cefc107 DH |
336 | if (!pipe_empty(head, tail)) /* More to do? */ |
337 | continue; | |
1da177e4 | 338 | } |
8cefc107 | 339 | |
923f4f23 | 340 | if (!pipe->writers) |
1da177e4 | 341 | break; |
a28c8b9d LT |
342 | if (ret) |
343 | break; | |
344 | if (filp->f_flags & O_NONBLOCK) { | |
345 | ret = -EAGAIN; | |
346 | break; | |
1da177e4 | 347 | } |
85190d15 | 348 | __pipe_unlock(pipe); |
d1c6a2aa LT |
349 | |
350 | /* | |
351 | * We only get here if we didn't actually read anything. | |
352 | * | |
353 | * However, we could have seen (and removed) a zero-sized | |
354 | * pipe buffer, and might have made space in the buffers | |
355 | * that way. | |
356 | * | |
357 | * You can't make zero-sized pipe buffers by doing an empty | |
358 | * write (not even in packet mode), but they can happen if | |
359 | * the writer gets an EFAULT when trying to fill a buffer | |
360 | * that already got allocated and inserted in the buffer | |
361 | * array. | |
362 | * | |
363 | * So we still need to wake up any pending writers in the | |
364 | * _very_ unlikely case that the pipe was full, but we got | |
365 | * no data. | |
366 | */ | |
fe67f4dd | 367 | if (unlikely(was_full)) |
0ddad21d | 368 | wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); |
fe67f4dd | 369 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); |
d1c6a2aa LT |
370 | |
371 | /* | |
372 | * But because we didn't read anything, at this point we can | |
373 | * just return directly with -ERESTARTSYS if we're interrupted, | |
374 | * since we've done any required wakeups and there's no need | |
375 | * to mark anything accessed. And we've dropped the lock. | |
376 | */ | |
0ddad21d | 377 | if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) |
d1c6a2aa LT |
378 | return -ERESTARTSYS; |
379 | ||
85190d15 | 380 | __pipe_lock(pipe); |
f467a6a6 | 381 | was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); |
0ddad21d | 382 | wake_next_reader = true; |
1da177e4 | 383 | } |
0ddad21d LT |
384 | if (pipe_empty(pipe->head, pipe->tail)) |
385 | wake_next_reader = false; | |
ebec73f4 | 386 | __pipe_unlock(pipe); |
341b446b | 387 | |
fe67f4dd | 388 | if (was_full) |
0ddad21d | 389 | wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); |
0ddad21d LT |
390 | if (wake_next_reader) |
391 | wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); | |
fe67f4dd | 392 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); |
1da177e4 LT |
393 | if (ret > 0) |
394 | file_accessed(filp); | |
395 | return ret; | |
396 | } | |
397 | ||
9883035a LT |
398 | static inline int is_packetized(struct file *file) |
399 | { | |
400 | return (file->f_flags & O_DIRECT) != 0; | |
401 | } | |
402 | ||
85190d15 LT |
403 | /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ |
404 | static inline bool pipe_writable(const struct pipe_inode_info *pipe) | |
405 | { | |
406 | unsigned int head = READ_ONCE(pipe->head); | |
407 | unsigned int tail = READ_ONCE(pipe->tail); | |
408 | unsigned int max_usage = READ_ONCE(pipe->max_usage); | |
409 | ||
410 | return !pipe_full(head, tail, max_usage) || | |
411 | !READ_ONCE(pipe->readers); | |
412 | } | |
413 | ||
1da177e4 | 414 | static ssize_t |
f0d1bec9 | 415 | pipe_write(struct kiocb *iocb, struct iov_iter *from) |
1da177e4 | 416 | { |
ee0b3e67 | 417 | struct file *filp = iocb->ki_filp; |
de32ec4c | 418 | struct pipe_inode_info *pipe = filp->private_data; |
8f868d68 | 419 | unsigned int head; |
f0d1bec9 | 420 | ssize_t ret = 0; |
f0d1bec9 | 421 | size_t total_len = iov_iter_count(from); |
1da177e4 | 422 | ssize_t chars; |
1b6b26ae | 423 | bool was_empty = false; |
0ddad21d | 424 | bool wake_next_writer = false; |
1da177e4 | 425 | |
1da177e4 LT |
426 | /* Null write succeeds. */ |
427 | if (unlikely(total_len == 0)) | |
428 | return 0; | |
429 | ||
ebec73f4 | 430 | __pipe_lock(pipe); |
1da177e4 | 431 | |
923f4f23 | 432 | if (!pipe->readers) { |
1da177e4 LT |
433 | send_sig(SIGPIPE, current, 0); |
434 | ret = -EPIPE; | |
435 | goto out; | |
436 | } | |
437 | ||
c73be61c DH |
438 | #ifdef CONFIG_WATCH_QUEUE |
439 | if (pipe->watch_queue) { | |
440 | ret = -EXDEV; | |
441 | goto out; | |
442 | } | |
443 | #endif | |
444 | ||
1b6b26ae | 445 | /* |
1b6b26ae LT |
446 | * If it wasn't empty we try to merge new data into |
447 | * the last buffer. | |
448 | * | |
449 | * That naturally merges small writes, but it also | |
3a34b13a | 450 | * page-aligns the rest of the writes for large writes |
1b6b26ae LT |
451 | * spanning multiple pages. |
452 | */ | |
8cefc107 | 453 | head = pipe->head; |
3b844826 | 454 | was_empty = pipe_empty(head, pipe->tail); |
1b6b26ae | 455 | chars = total_len & (PAGE_SIZE-1); |
3b844826 | 456 | if (chars && !was_empty) { |
8f868d68 | 457 | unsigned int mask = pipe->ring_size - 1; |
8cefc107 | 458 | struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask]; |
1da177e4 | 459 | int offset = buf->offset + buf->len; |
341b446b | 460 | |
f6dd9755 CH |
461 | if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) && |
462 | offset + chars <= PAGE_SIZE) { | |
fba597db | 463 | ret = pipe_buf_confirm(pipe, buf); |
6ae08069 | 464 | if (ret) |
5274f052 | 465 | goto out; |
f84d7519 | 466 | |
f0d1bec9 AV |
467 | ret = copy_page_from_iter(buf->page, offset, chars, from); |
468 | if (unlikely(ret < chars)) { | |
6ae08069 | 469 | ret = -EFAULT; |
1da177e4 | 470 | goto out; |
f6762b7a | 471 | } |
1b6b26ae | 472 | |
6ae08069 | 473 | buf->len += ret; |
f0d1bec9 | 474 | if (!iov_iter_count(from)) |
1da177e4 LT |
475 | goto out; |
476 | } | |
477 | } | |
478 | ||
479 | for (;;) { | |
923f4f23 | 480 | if (!pipe->readers) { |
1da177e4 | 481 | send_sig(SIGPIPE, current, 0); |
341b446b IM |
482 | if (!ret) |
483 | ret = -EPIPE; | |
1da177e4 LT |
484 | break; |
485 | } | |
8cefc107 | 486 | |
a194dfe6 | 487 | head = pipe->head; |
8f868d68 DH |
488 | if (!pipe_full(head, pipe->tail, pipe->max_usage)) { |
489 | unsigned int mask = pipe->ring_size - 1; | |
8cefc107 | 490 | struct pipe_buffer *buf = &pipe->bufs[head & mask]; |
923f4f23 | 491 | struct page *page = pipe->tmp_page; |
f0d1bec9 | 492 | int copied; |
1da177e4 LT |
493 | |
494 | if (!page) { | |
d86133bd | 495 | page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); |
1da177e4 LT |
496 | if (unlikely(!page)) { |
497 | ret = ret ? : -ENOMEM; | |
498 | break; | |
499 | } | |
923f4f23 | 500 | pipe->tmp_page = page; |
1da177e4 | 501 | } |
a194dfe6 DH |
502 | |
503 | /* Allocate a slot in the ring in advance and attach an | |
504 | * empty buffer. If we fault or otherwise fail to use | |
505 | * it, either the reader will consume it or it'll still | |
506 | * be there for the next write. | |
507 | */ | |
0ddad21d | 508 | spin_lock_irq(&pipe->rd_wait.lock); |
a194dfe6 DH |
509 | |
510 | head = pipe->head; | |
8f868d68 | 511 | if (pipe_full(head, pipe->tail, pipe->max_usage)) { |
0ddad21d | 512 | spin_unlock_irq(&pipe->rd_wait.lock); |
8df44129 DH |
513 | continue; |
514 | } | |
515 | ||
a194dfe6 | 516 | pipe->head = head + 1; |
0ddad21d | 517 | spin_unlock_irq(&pipe->rd_wait.lock); |
1da177e4 LT |
518 | |
519 | /* Insert it into the buffer array */ | |
a194dfe6 | 520 | buf = &pipe->bufs[head & mask]; |
1da177e4 LT |
521 | buf->page = page; |
522 | buf->ops = &anon_pipe_buf_ops; | |
523 | buf->offset = 0; | |
a194dfe6 | 524 | buf->len = 0; |
f6dd9755 | 525 | if (is_packetized(filp)) |
9883035a | 526 | buf->flags = PIPE_BUF_FLAG_PACKET; |
f6dd9755 CH |
527 | else |
528 | buf->flags = PIPE_BUF_FLAG_CAN_MERGE; | |
923f4f23 | 529 | pipe->tmp_page = NULL; |
1da177e4 | 530 | |
a194dfe6 DH |
531 | copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); |
532 | if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { | |
533 | if (!ret) | |
534 | ret = -EFAULT; | |
535 | break; | |
536 | } | |
537 | ret += copied; | |
538 | buf->offset = 0; | |
539 | buf->len = copied; | |
540 | ||
f0d1bec9 | 541 | if (!iov_iter_count(from)) |
1da177e4 LT |
542 | break; |
543 | } | |
8cefc107 | 544 | |
8f868d68 | 545 | if (!pipe_full(head, pipe->tail, pipe->max_usage)) |
1da177e4 | 546 | continue; |
8cefc107 DH |
547 | |
548 | /* Wait for buffer space to become available. */ | |
1da177e4 | 549 | if (filp->f_flags & O_NONBLOCK) { |
341b446b IM |
550 | if (!ret) |
551 | ret = -EAGAIN; | |
1da177e4 LT |
552 | break; |
553 | } | |
554 | if (signal_pending(current)) { | |
341b446b IM |
555 | if (!ret) |
556 | ret = -ERESTARTSYS; | |
1da177e4 LT |
557 | break; |
558 | } | |
1b6b26ae LT |
559 | |
560 | /* | |
561 | * We're going to release the pipe lock and wait for more | |
562 | * space. We wake up any readers if necessary, and then | |
563 | * after waiting we need to re-check whether the pipe | |
564 | * become empty while we dropped the lock. | |
565 | */ | |
85190d15 | 566 | __pipe_unlock(pipe); |
fe67f4dd | 567 | if (was_empty) |
0ddad21d | 568 | wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); |
fe67f4dd | 569 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); |
0ddad21d | 570 | wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); |
85190d15 | 571 | __pipe_lock(pipe); |
0dd1e377 | 572 | was_empty = pipe_empty(pipe->head, pipe->tail); |
0ddad21d | 573 | wake_next_writer = true; |
1da177e4 LT |
574 | } |
575 | out: | |
0ddad21d LT |
576 | if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) |
577 | wake_next_writer = false; | |
ebec73f4 | 578 | __pipe_unlock(pipe); |
1b6b26ae LT |
579 | |
580 | /* | |
581 | * If we do do a wakeup event, we do a 'sync' wakeup, because we | |
582 | * want the reader to start processing things asap, rather than | |
583 | * leave the data pending. | |
584 | * | |
585 | * This is particularly important for small writes, because of | |
586 | * how (for example) the GNU make jobserver uses small writes to | |
587 | * wake up pending jobs | |
3b844826 LT |
588 | * |
589 | * Epoll nonsensically wants a wakeup whether the pipe | |
590 | * was already empty or not. | |
1b6b26ae | 591 | */ |
fe67f4dd | 592 | if (was_empty || pipe->poll_usage) |
0ddad21d | 593 | wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); |
fe67f4dd | 594 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); |
0ddad21d LT |
595 | if (wake_next_writer) |
596 | wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); | |
7e775f46 | 597 | if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { |
c3b2da31 JB |
598 | int err = file_update_time(filp); |
599 | if (err) | |
600 | ret = err; | |
7e775f46 | 601 | sb_end_write(file_inode(filp)->i_sb); |
c3b2da31 | 602 | } |
1da177e4 LT |
603 | return ret; |
604 | } | |
605 | ||
d59d0b1b | 606 | static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
1da177e4 | 607 | { |
de32ec4c | 608 | struct pipe_inode_info *pipe = filp->private_data; |
8cefc107 | 609 | int count, head, tail, mask; |
1da177e4 LT |
610 | |
611 | switch (cmd) { | |
c73be61c DH |
612 | case FIONREAD: |
613 | __pipe_lock(pipe); | |
614 | count = 0; | |
615 | head = pipe->head; | |
616 | tail = pipe->tail; | |
617 | mask = pipe->ring_size - 1; | |
8cefc107 | 618 | |
c73be61c DH |
619 | while (tail != head) { |
620 | count += pipe->bufs[tail & mask].len; | |
621 | tail++; | |
622 | } | |
623 | __pipe_unlock(pipe); | |
923f4f23 | 624 | |
c73be61c | 625 | return put_user(count, (int __user *)arg); |
923f4f23 | 626 | |
c73be61c DH |
627 | #ifdef CONFIG_WATCH_QUEUE |
628 | case IOC_WATCH_QUEUE_SET_SIZE: { | |
629 | int ret; | |
630 | __pipe_lock(pipe); | |
631 | ret = watch_queue_set_size(pipe, arg); | |
632 | __pipe_unlock(pipe); | |
633 | return ret; | |
634 | } | |
635 | ||
636 | case IOC_WATCH_QUEUE_SET_FILTER: | |
637 | return watch_queue_set_filter( | |
638 | pipe, (struct watch_notification_filter __user *)arg); | |
639 | #endif | |
640 | ||
641 | default: | |
642 | return -ENOIOCTLCMD; | |
1da177e4 LT |
643 | } |
644 | } | |
645 | ||
dd67081b | 646 | /* No kernel lock held - fine */ |
a11e1d43 LT |
647 | static __poll_t |
648 | pipe_poll(struct file *filp, poll_table *wait) | |
dd67081b | 649 | { |
a11e1d43 | 650 | __poll_t mask; |
dd67081b | 651 | struct pipe_inode_info *pipe = filp->private_data; |
ad910e36 | 652 | unsigned int head, tail; |
a11e1d43 | 653 | |
3b844826 | 654 | /* Epoll has some historical nasty semantics, this enables them */ |
160bc819 | 655 | WRITE_ONCE(pipe->poll_usage, true); |
3b844826 | 656 | |
ad910e36 | 657 | /* |
0ddad21d | 658 | * Reading pipe state only -- no need for acquiring the semaphore. |
ad910e36 LT |
659 | * |
660 | * But because this is racy, the code has to add the | |
661 | * entry to the poll table _first_ .. | |
662 | */ | |
0ddad21d LT |
663 | if (filp->f_mode & FMODE_READ) |
664 | poll_wait(filp, &pipe->rd_wait, wait); | |
665 | if (filp->f_mode & FMODE_WRITE) | |
666 | poll_wait(filp, &pipe->wr_wait, wait); | |
1da177e4 | 667 | |
ad910e36 LT |
668 | /* |
669 | * .. and only then can you do the racy tests. That way, | |
670 | * if something changes and you got it wrong, the poll | |
671 | * table entry will wake you up and fix it. | |
672 | */ | |
673 | head = READ_ONCE(pipe->head); | |
674 | tail = READ_ONCE(pipe->tail); | |
675 | ||
a11e1d43 | 676 | mask = 0; |
1da177e4 | 677 | if (filp->f_mode & FMODE_READ) { |
8cefc107 DH |
678 | if (!pipe_empty(head, tail)) |
679 | mask |= EPOLLIN | EPOLLRDNORM; | |
923f4f23 | 680 | if (!pipe->writers && filp->f_version != pipe->w_counter) |
a9a08845 | 681 | mask |= EPOLLHUP; |
1da177e4 LT |
682 | } |
683 | ||
684 | if (filp->f_mode & FMODE_WRITE) { | |
6718b6f8 | 685 | if (!pipe_full(head, tail, pipe->max_usage)) |
8cefc107 | 686 | mask |= EPOLLOUT | EPOLLWRNORM; |
5e5d7a22 | 687 | /* |
a9a08845 | 688 | * Most Unices do not set EPOLLERR for FIFOs but on Linux they |
5e5d7a22 PE |
689 | * behave exactly like pipes for poll(). |
690 | */ | |
923f4f23 | 691 | if (!pipe->readers) |
a9a08845 | 692 | mask |= EPOLLERR; |
1da177e4 LT |
693 | } |
694 | ||
695 | return mask; | |
696 | } | |
697 | ||
b0d8d229 LT |
698 | static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) |
699 | { | |
700 | int kill = 0; | |
701 | ||
702 | spin_lock(&inode->i_lock); | |
703 | if (!--pipe->files) { | |
704 | inode->i_pipe = NULL; | |
705 | kill = 1; | |
706 | } | |
707 | spin_unlock(&inode->i_lock); | |
708 | ||
709 | if (kill) | |
710 | free_pipe_info(pipe); | |
711 | } | |
712 | ||
1da177e4 | 713 | static int |
599a0ac1 | 714 | pipe_release(struct inode *inode, struct file *file) |
1da177e4 | 715 | { |
b0d8d229 | 716 | struct pipe_inode_info *pipe = file->private_data; |
923f4f23 | 717 | |
ebec73f4 | 718 | __pipe_lock(pipe); |
599a0ac1 AV |
719 | if (file->f_mode & FMODE_READ) |
720 | pipe->readers--; | |
721 | if (file->f_mode & FMODE_WRITE) | |
722 | pipe->writers--; | |
341b446b | 723 | |
6551d5c5 LT |
724 | /* Was that the last reader or writer, but not the other side? */ |
725 | if (!pipe->readers != !pipe->writers) { | |
726 | wake_up_interruptible_all(&pipe->rd_wait); | |
727 | wake_up_interruptible_all(&pipe->wr_wait); | |
923f4f23 IM |
728 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); |
729 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | |
1da177e4 | 730 | } |
ebec73f4 | 731 | __pipe_unlock(pipe); |
ba5bb147 | 732 | |
b0d8d229 | 733 | put_pipe_info(inode, pipe); |
1da177e4 LT |
734 | return 0; |
735 | } | |
736 | ||
737 | static int | |
599a0ac1 | 738 | pipe_fasync(int fd, struct file *filp, int on) |
1da177e4 | 739 | { |
de32ec4c | 740 | struct pipe_inode_info *pipe = filp->private_data; |
599a0ac1 | 741 | int retval = 0; |
1da177e4 | 742 | |
ebec73f4 | 743 | __pipe_lock(pipe); |
599a0ac1 AV |
744 | if (filp->f_mode & FMODE_READ) |
745 | retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); | |
746 | if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { | |
341b446b | 747 | retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); |
599a0ac1 AV |
748 | if (retval < 0 && (filp->f_mode & FMODE_READ)) |
749 | /* this can happen only if on == T */ | |
e5bc49ba ON |
750 | fasync_helper(-1, filp, 0, &pipe->fasync_readers); |
751 | } | |
ebec73f4 | 752 | __pipe_unlock(pipe); |
60aa4924 | 753 | return retval; |
1da177e4 LT |
754 | } |
755 | ||
c73be61c DH |
756 | unsigned long account_pipe_buffers(struct user_struct *user, |
757 | unsigned long old, unsigned long new) | |
759c0114 | 758 | { |
9c87bcf0 | 759 | return atomic_long_add_return(new - old, &user->pipe_bufs); |
759c0114 WT |
760 | } |
761 | ||
c73be61c | 762 | bool too_many_pipe_buffers_soft(unsigned long user_bufs) |
759c0114 | 763 | { |
f7340761 EB |
764 | unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); |
765 | ||
766 | return soft_limit && user_bufs > soft_limit; | |
759c0114 WT |
767 | } |
768 | ||
c73be61c | 769 | bool too_many_pipe_buffers_hard(unsigned long user_bufs) |
759c0114 | 770 | { |
f7340761 EB |
771 | unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); |
772 | ||
773 | return hard_limit && user_bufs > hard_limit; | |
759c0114 WT |
774 | } |
775 | ||
c73be61c | 776 | bool pipe_is_unprivileged_user(void) |
85c2dd54 EB |
777 | { |
778 | return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); | |
779 | } | |
780 | ||
7bee130e | 781 | struct pipe_inode_info *alloc_pipe_info(void) |
3a326a2c | 782 | { |
923f4f23 | 783 | struct pipe_inode_info *pipe; |
09b4d199 MK |
784 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; |
785 | struct user_struct *user = get_current_user(); | |
9c87bcf0 | 786 | unsigned long user_bufs; |
f7340761 | 787 | unsigned int max_size = READ_ONCE(pipe_max_size); |
3a326a2c | 788 | |
d86133bd | 789 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); |
09b4d199 MK |
790 | if (pipe == NULL) |
791 | goto out_free_uid; | |
792 | ||
f7340761 EB |
793 | if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) |
794 | pipe_bufs = max_size >> PAGE_SHIFT; | |
086e774a | 795 | |
9c87bcf0 | 796 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); |
a005ca0e | 797 | |
c73be61c | 798 | if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { |
46c4c9d1 AXH |
799 | user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS); |
800 | pipe_bufs = PIPE_MIN_DEF_BUFFERS; | |
09b4d199 | 801 | } |
759c0114 | 802 | |
c73be61c | 803 | if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user()) |
a005ca0e MK |
804 | goto out_revert_acct; |
805 | ||
806 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), | |
807 | GFP_KERNEL_ACCOUNT); | |
808 | ||
09b4d199 | 809 | if (pipe->bufs) { |
0ddad21d LT |
810 | init_waitqueue_head(&pipe->rd_wait); |
811 | init_waitqueue_head(&pipe->wr_wait); | |
09b4d199 | 812 | pipe->r_counter = pipe->w_counter = 1; |
6718b6f8 | 813 | pipe->max_usage = pipe_bufs; |
8cefc107 | 814 | pipe->ring_size = pipe_bufs; |
c73be61c | 815 | pipe->nr_accounted = pipe_bufs; |
09b4d199 | 816 | pipe->user = user; |
09b4d199 MK |
817 | mutex_init(&pipe->mutex); |
818 | return pipe; | |
3a326a2c IM |
819 | } |
820 | ||
a005ca0e | 821 | out_revert_acct: |
9c87bcf0 | 822 | (void) account_pipe_buffers(user, pipe_bufs, 0); |
09b4d199 MK |
823 | kfree(pipe); |
824 | out_free_uid: | |
825 | free_uid(user); | |
35f3d14d | 826 | return NULL; |
3a326a2c IM |
827 | } |
828 | ||
4b8a8f1e | 829 | void free_pipe_info(struct pipe_inode_info *pipe) |
1da177e4 LT |
830 | { |
831 | int i; | |
1da177e4 | 832 | |
c73be61c | 833 | #ifdef CONFIG_WATCH_QUEUE |
cade8b8c | 834 | if (pipe->watch_queue) |
c73be61c | 835 | watch_queue_clear(pipe->watch_queue); |
c73be61c DH |
836 | #endif |
837 | ||
838 | (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); | |
759c0114 | 839 | free_uid(pipe->user); |
8cefc107 | 840 | for (i = 0; i < pipe->ring_size; i++) { |
923f4f23 | 841 | struct pipe_buffer *buf = pipe->bufs + i; |
1da177e4 | 842 | if (buf->ops) |
a779638c | 843 | pipe_buf_release(pipe, buf); |
1da177e4 | 844 | } |
cade8b8c DH |
845 | #ifdef CONFIG_WATCH_QUEUE |
846 | if (pipe->watch_queue) | |
847 | put_watch_queue(pipe->watch_queue); | |
848 | #endif | |
923f4f23 IM |
849 | if (pipe->tmp_page) |
850 | __free_page(pipe->tmp_page); | |
35f3d14d | 851 | kfree(pipe->bufs); |
923f4f23 | 852 | kfree(pipe); |
1da177e4 LT |
853 | } |
854 | ||
fa3536cc | 855 | static struct vfsmount *pipe_mnt __read_mostly; |
341b446b | 856 | |
c23fbb6b ED |
857 | /* |
858 | * pipefs_dname() is called from d_path(). | |
859 | */ | |
860 | static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) | |
861 | { | |
862 | return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", | |
75c3cfa8 | 863 | d_inode(dentry)->i_ino); |
c23fbb6b ED |
864 | } |
865 | ||
3ba13d17 | 866 | static const struct dentry_operations pipefs_dentry_operations = { |
c23fbb6b | 867 | .d_dname = pipefs_dname, |
1da177e4 LT |
868 | }; |
869 | ||
870 | static struct inode * get_pipe_inode(void) | |
871 | { | |
a209dfc7 | 872 | struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); |
923f4f23 | 873 | struct pipe_inode_info *pipe; |
1da177e4 LT |
874 | |
875 | if (!inode) | |
876 | goto fail_inode; | |
877 | ||
85fe4025 CH |
878 | inode->i_ino = get_next_ino(); |
879 | ||
7bee130e | 880 | pipe = alloc_pipe_info(); |
923f4f23 | 881 | if (!pipe) |
1da177e4 | 882 | goto fail_iput; |
3a326a2c | 883 | |
ba5bb147 AV |
884 | inode->i_pipe = pipe; |
885 | pipe->files = 2; | |
923f4f23 | 886 | pipe->readers = pipe->writers = 1; |
599a0ac1 | 887 | inode->i_fop = &pipefifo_fops; |
1da177e4 LT |
888 | |
889 | /* | |
890 | * Mark the inode dirty from the very beginning, | |
891 | * that way it will never be moved to the dirty | |
892 | * list because "mark_inode_dirty()" will think | |
893 | * that it already _is_ on the dirty list. | |
894 | */ | |
895 | inode->i_state = I_DIRTY; | |
896 | inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; | |
da9592ed DH |
897 | inode->i_uid = current_fsuid(); |
898 | inode->i_gid = current_fsgid(); | |
078cd827 | 899 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
923f4f23 | 900 | |
1da177e4 LT |
901 | return inode; |
902 | ||
903 | fail_iput: | |
904 | iput(inode); | |
341b446b | 905 | |
1da177e4 LT |
906 | fail_inode: |
907 | return NULL; | |
908 | } | |
909 | ||
e4fad8e5 | 910 | int create_pipe_files(struct file **res, int flags) |
1da177e4 | 911 | { |
e4fad8e5 | 912 | struct inode *inode = get_pipe_inode(); |
d6cbd281 | 913 | struct file *f; |
8a018eb5 | 914 | int error; |
1da177e4 | 915 | |
1da177e4 | 916 | if (!inode) |
e4fad8e5 | 917 | return -ENFILE; |
1da177e4 | 918 | |
c73be61c | 919 | if (flags & O_NOTIFICATION_PIPE) { |
8a018eb5 QC |
920 | error = watch_queue_init(inode->i_pipe); |
921 | if (error) { | |
922 | free_pipe_info(inode->i_pipe); | |
c73be61c | 923 | iput(inode); |
8a018eb5 | 924 | return error; |
c73be61c | 925 | } |
c73be61c DH |
926 | } |
927 | ||
152b6372 AV |
928 | f = alloc_file_pseudo(inode, pipe_mnt, "", |
929 | O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)), | |
930 | &pipefifo_fops); | |
e9bb1f9b | 931 | if (IS_ERR(f)) { |
152b6372 AV |
932 | free_pipe_info(inode->i_pipe); |
933 | iput(inode); | |
934 | return PTR_ERR(f); | |
e9bb1f9b | 935 | } |
341b446b | 936 | |
de32ec4c | 937 | f->private_data = inode->i_pipe; |
d6cbd281 | 938 | |
183266f2 AV |
939 | res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK), |
940 | &pipefifo_fops); | |
e9bb1f9b | 941 | if (IS_ERR(res[0])) { |
b10a4a9f AV |
942 | put_pipe_info(inode, inode->i_pipe); |
943 | fput(f); | |
944 | return PTR_ERR(res[0]); | |
e9bb1f9b | 945 | } |
de32ec4c | 946 | res[0]->private_data = inode->i_pipe; |
e4fad8e5 | 947 | res[1] = f; |
d8e464ec LT |
948 | stream_open(inode, res[0]); |
949 | stream_open(inode, res[1]); | |
e4fad8e5 | 950 | return 0; |
d6cbd281 AK |
951 | } |
952 | ||
5b249b1b | 953 | static int __do_pipe_flags(int *fd, struct file **files, int flags) |
d6cbd281 | 954 | { |
d6cbd281 AK |
955 | int error; |
956 | int fdw, fdr; | |
957 | ||
c73be61c | 958 | if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE)) |
ed8cae8b UD |
959 | return -EINVAL; |
960 | ||
e4fad8e5 AV |
961 | error = create_pipe_files(files, flags); |
962 | if (error) | |
963 | return error; | |
d6cbd281 | 964 | |
ed8cae8b | 965 | error = get_unused_fd_flags(flags); |
d6cbd281 AK |
966 | if (error < 0) |
967 | goto err_read_pipe; | |
968 | fdr = error; | |
969 | ||
ed8cae8b | 970 | error = get_unused_fd_flags(flags); |
d6cbd281 AK |
971 | if (error < 0) |
972 | goto err_fdr; | |
973 | fdw = error; | |
974 | ||
157cf649 | 975 | audit_fd_pair(fdr, fdw); |
d6cbd281 AK |
976 | fd[0] = fdr; |
977 | fd[1] = fdw; | |
d6cbd281 AK |
978 | return 0; |
979 | ||
980 | err_fdr: | |
981 | put_unused_fd(fdr); | |
982 | err_read_pipe: | |
e4fad8e5 AV |
983 | fput(files[0]); |
984 | fput(files[1]); | |
d6cbd281 | 985 | return error; |
1da177e4 LT |
986 | } |
987 | ||
5b249b1b AV |
988 | int do_pipe_flags(int *fd, int flags) |
989 | { | |
990 | struct file *files[2]; | |
991 | int error = __do_pipe_flags(fd, files, flags); | |
992 | if (!error) { | |
993 | fd_install(fd[0], files[0]); | |
994 | fd_install(fd[1], files[1]); | |
995 | } | |
996 | return error; | |
997 | } | |
998 | ||
d35c7b0e UD |
999 | /* |
1000 | * sys_pipe() is the normal C calling standard for creating | |
1001 | * a pipe. It's not the way Unix traditionally does this, though. | |
1002 | */ | |
0a216dd1 | 1003 | static int do_pipe2(int __user *fildes, int flags) |
d35c7b0e | 1004 | { |
5b249b1b | 1005 | struct file *files[2]; |
d35c7b0e UD |
1006 | int fd[2]; |
1007 | int error; | |
1008 | ||
5b249b1b | 1009 | error = __do_pipe_flags(fd, files, flags); |
d35c7b0e | 1010 | if (!error) { |
5b249b1b AV |
1011 | if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { |
1012 | fput(files[0]); | |
1013 | fput(files[1]); | |
1014 | put_unused_fd(fd[0]); | |
1015 | put_unused_fd(fd[1]); | |
d35c7b0e | 1016 | error = -EFAULT; |
5b249b1b AV |
1017 | } else { |
1018 | fd_install(fd[0], files[0]); | |
1019 | fd_install(fd[1], files[1]); | |
ba719bae | 1020 | } |
d35c7b0e UD |
1021 | } |
1022 | return error; | |
1023 | } | |
1024 | ||
0a216dd1 DB |
1025 | SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) |
1026 | { | |
1027 | return do_pipe2(fildes, flags); | |
1028 | } | |
1029 | ||
2b664219 | 1030 | SYSCALL_DEFINE1(pipe, int __user *, fildes) |
ed8cae8b | 1031 | { |
0a216dd1 | 1032 | return do_pipe2(fildes, 0); |
ed8cae8b UD |
1033 | } |
1034 | ||
472e5b05 LT |
1035 | /* |
1036 | * This is the stupid "wait for pipe to be readable or writable" | |
1037 | * model. | |
1038 | * | |
1039 | * See pipe_read/write() for the proper kind of exclusive wait, | |
1040 | * but that requires that we wake up any other readers/writers | |
1041 | * if we then do not end up reading everything (ie the whole | |
1042 | * "wake_next_reader/writer" logic in pipe_read/write()). | |
1043 | */ | |
1044 | void pipe_wait_readable(struct pipe_inode_info *pipe) | |
1045 | { | |
1046 | pipe_unlock(pipe); | |
1047 | wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); | |
1048 | pipe_lock(pipe); | |
1049 | } | |
1050 | ||
1051 | void pipe_wait_writable(struct pipe_inode_info *pipe) | |
1052 | { | |
1053 | pipe_unlock(pipe); | |
1054 | wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); | |
1055 | pipe_lock(pipe); | |
1056 | } | |
1057 | ||
1058 | /* | |
1059 | * This depends on both the wait (here) and the wakeup (wake_up_partner) | |
1060 | * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot | |
1061 | * race with the count check and waitqueue prep. | |
1062 | * | |
1063 | * Normally in order to avoid races, you'd do the prepare_to_wait() first, | |
1064 | * then check the condition you're waiting for, and only then sleep. But | |
1065 | * because of the pipe lock, we can check the condition before being on | |
1066 | * the wait queue. | |
1067 | * | |
1068 | * We use the 'rd_wait' waitqueue for pipe partner waiting. | |
1069 | */ | |
fc7478a2 | 1070 | static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) |
f776c738 | 1071 | { |
472e5b05 | 1072 | DEFINE_WAIT(rdwait); |
8cefc107 | 1073 | int cur = *cnt; |
f776c738 AV |
1074 | |
1075 | while (cur == *cnt) { | |
472e5b05 LT |
1076 | prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); |
1077 | pipe_unlock(pipe); | |
1078 | schedule(); | |
1079 | finish_wait(&pipe->rd_wait, &rdwait); | |
1080 | pipe_lock(pipe); | |
f776c738 AV |
1081 | if (signal_pending(current)) |
1082 | break; | |
1083 | } | |
1084 | return cur == *cnt ? -ERESTARTSYS : 0; | |
1085 | } | |
1086 | ||
fc7478a2 | 1087 | static void wake_up_partner(struct pipe_inode_info *pipe) |
f776c738 | 1088 | { |
6551d5c5 | 1089 | wake_up_interruptible_all(&pipe->rd_wait); |
f776c738 AV |
1090 | } |
1091 | ||
1092 | static int fifo_open(struct inode *inode, struct file *filp) | |
1093 | { | |
1094 | struct pipe_inode_info *pipe; | |
599a0ac1 | 1095 | bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; |
f776c738 AV |
1096 | int ret; |
1097 | ||
ba5bb147 AV |
1098 | filp->f_version = 0; |
1099 | ||
1100 | spin_lock(&inode->i_lock); | |
1101 | if (inode->i_pipe) { | |
1102 | pipe = inode->i_pipe; | |
1103 | pipe->files++; | |
1104 | spin_unlock(&inode->i_lock); | |
1105 | } else { | |
1106 | spin_unlock(&inode->i_lock); | |
7bee130e | 1107 | pipe = alloc_pipe_info(); |
f776c738 | 1108 | if (!pipe) |
ba5bb147 AV |
1109 | return -ENOMEM; |
1110 | pipe->files = 1; | |
1111 | spin_lock(&inode->i_lock); | |
1112 | if (unlikely(inode->i_pipe)) { | |
1113 | inode->i_pipe->files++; | |
1114 | spin_unlock(&inode->i_lock); | |
4b8a8f1e | 1115 | free_pipe_info(pipe); |
ba5bb147 AV |
1116 | pipe = inode->i_pipe; |
1117 | } else { | |
1118 | inode->i_pipe = pipe; | |
1119 | spin_unlock(&inode->i_lock); | |
1120 | } | |
f776c738 | 1121 | } |
de32ec4c | 1122 | filp->private_data = pipe; |
ba5bb147 AV |
1123 | /* OK, we have a pipe and it's pinned down */ |
1124 | ||
ebec73f4 | 1125 | __pipe_lock(pipe); |
f776c738 AV |
1126 | |
1127 | /* We can only do regular read/write on fifos */ | |
d8e464ec | 1128 | stream_open(inode, filp); |
f776c738 | 1129 | |
d8e464ec | 1130 | switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) { |
f776c738 AV |
1131 | case FMODE_READ: |
1132 | /* | |
1133 | * O_RDONLY | |
1134 | * POSIX.1 says that O_NONBLOCK means return with the FIFO | |
1135 | * opened, even when there is no process writing the FIFO. | |
1136 | */ | |
f776c738 AV |
1137 | pipe->r_counter++; |
1138 | if (pipe->readers++ == 0) | |
fc7478a2 | 1139 | wake_up_partner(pipe); |
f776c738 | 1140 | |
599a0ac1 | 1141 | if (!is_pipe && !pipe->writers) { |
f776c738 | 1142 | if ((filp->f_flags & O_NONBLOCK)) { |
a9a08845 | 1143 | /* suppress EPOLLHUP until we have |
f776c738 AV |
1144 | * seen a writer */ |
1145 | filp->f_version = pipe->w_counter; | |
1146 | } else { | |
fc7478a2 | 1147 | if (wait_for_partner(pipe, &pipe->w_counter)) |
f776c738 AV |
1148 | goto err_rd; |
1149 | } | |
1150 | } | |
1151 | break; | |
8cefc107 | 1152 | |
f776c738 AV |
1153 | case FMODE_WRITE: |
1154 | /* | |
1155 | * O_WRONLY | |
1156 | * POSIX.1 says that O_NONBLOCK means return -1 with | |
1157 | * errno=ENXIO when there is no process reading the FIFO. | |
1158 | */ | |
1159 | ret = -ENXIO; | |
599a0ac1 | 1160 | if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) |
f776c738 AV |
1161 | goto err; |
1162 | ||
f776c738 AV |
1163 | pipe->w_counter++; |
1164 | if (!pipe->writers++) | |
fc7478a2 | 1165 | wake_up_partner(pipe); |
f776c738 | 1166 | |
599a0ac1 | 1167 | if (!is_pipe && !pipe->readers) { |
fc7478a2 | 1168 | if (wait_for_partner(pipe, &pipe->r_counter)) |
f776c738 AV |
1169 | goto err_wr; |
1170 | } | |
1171 | break; | |
8cefc107 | 1172 | |
f776c738 AV |
1173 | case FMODE_READ | FMODE_WRITE: |
1174 | /* | |
1175 | * O_RDWR | |
1176 | * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. | |
1177 | * This implementation will NEVER block on a O_RDWR open, since | |
1178 | * the process can at least talk to itself. | |
1179 | */ | |
f776c738 AV |
1180 | |
1181 | pipe->readers++; | |
1182 | pipe->writers++; | |
1183 | pipe->r_counter++; | |
1184 | pipe->w_counter++; | |
1185 | if (pipe->readers == 1 || pipe->writers == 1) | |
fc7478a2 | 1186 | wake_up_partner(pipe); |
f776c738 AV |
1187 | break; |
1188 | ||
1189 | default: | |
1190 | ret = -EINVAL; | |
1191 | goto err; | |
1192 | } | |
1193 | ||
1194 | /* Ok! */ | |
ebec73f4 | 1195 | __pipe_unlock(pipe); |
f776c738 AV |
1196 | return 0; |
1197 | ||
1198 | err_rd: | |
1199 | if (!--pipe->readers) | |
0ddad21d | 1200 | wake_up_interruptible(&pipe->wr_wait); |
f776c738 AV |
1201 | ret = -ERESTARTSYS; |
1202 | goto err; | |
1203 | ||
1204 | err_wr: | |
1205 | if (!--pipe->writers) | |
6551d5c5 | 1206 | wake_up_interruptible_all(&pipe->rd_wait); |
f776c738 AV |
1207 | ret = -ERESTARTSYS; |
1208 | goto err; | |
1209 | ||
1210 | err: | |
ebec73f4 | 1211 | __pipe_unlock(pipe); |
b0d8d229 LT |
1212 | |
1213 | put_pipe_info(inode, pipe); | |
f776c738 AV |
1214 | return ret; |
1215 | } | |
1216 | ||
599a0ac1 AV |
1217 | const struct file_operations pipefifo_fops = { |
1218 | .open = fifo_open, | |
1219 | .llseek = no_llseek, | |
fb9096a3 | 1220 | .read_iter = pipe_read, |
f0d1bec9 | 1221 | .write_iter = pipe_write, |
a11e1d43 | 1222 | .poll = pipe_poll, |
599a0ac1 AV |
1223 | .unlocked_ioctl = pipe_ioctl, |
1224 | .release = pipe_release, | |
1225 | .fasync = pipe_fasync, | |
f8ad8187 | 1226 | .splice_write = iter_file_splice_write, |
f776c738 AV |
1227 | }; |
1228 | ||
f491bd71 MK |
1229 | /* |
1230 | * Currently we rely on the pipe array holding a power-of-2 number | |
d3f14c48 | 1231 | * of pages. Returns 0 on error. |
f491bd71 | 1232 | */ |
96e99be4 | 1233 | unsigned int round_pipe_size(unsigned long size) |
f491bd71 | 1234 | { |
c4fed5a9 | 1235 | if (size > (1U << 31)) |
96e99be4 EB |
1236 | return 0; |
1237 | ||
4c2e4bef EB |
1238 | /* Minimum pipe size, as required by POSIX */ |
1239 | if (size < PAGE_SIZE) | |
c4fed5a9 | 1240 | return PAGE_SIZE; |
d3f14c48 | 1241 | |
c4fed5a9 | 1242 | return roundup_pow_of_two(size); |
f491bd71 MK |
1243 | } |
1244 | ||
35f3d14d | 1245 | /* |
c73be61c | 1246 | * Resize the pipe ring to a number of slots. |
b7c5dc53 DH |
1247 | * |
1248 | * Note the pipe can be reduced in capacity, but only if the current | |
1249 | * occupancy doesn't exceed nr_slots; if it does, EBUSY will be | |
1250 | * returned instead. | |
35f3d14d | 1251 | */ |
c73be61c | 1252 | int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) |
35f3d14d JA |
1253 | { |
1254 | struct pipe_buffer *bufs; | |
c73be61c | 1255 | unsigned int head, tail, mask, n; |
35f3d14d | 1256 | |
8cefc107 | 1257 | bufs = kcalloc(nr_slots, sizeof(*bufs), |
d86133bd | 1258 | GFP_KERNEL_ACCOUNT | __GFP_NOWARN); |
c73be61c DH |
1259 | if (unlikely(!bufs)) |
1260 | return -ENOMEM; | |
35f3d14d | 1261 | |
b7c5dc53 DH |
1262 | spin_lock_irq(&pipe->rd_wait.lock); |
1263 | mask = pipe->ring_size - 1; | |
1264 | head = pipe->head; | |
1265 | tail = pipe->tail; | |
1266 | ||
1267 | n = pipe_occupancy(head, tail); | |
1268 | if (nr_slots < n) { | |
1269 | spin_unlock_irq(&pipe->rd_wait.lock); | |
1270 | kfree(bufs); | |
1271 | return -EBUSY; | |
1272 | } | |
1273 | ||
35f3d14d JA |
1274 | /* |
1275 | * The pipe array wraps around, so just start the new one at zero | |
8cefc107 | 1276 | * and adjust the indices. |
35f3d14d | 1277 | */ |
8cefc107 DH |
1278 | if (n > 0) { |
1279 | unsigned int h = head & mask; | |
1280 | unsigned int t = tail & mask; | |
1281 | if (h > t) { | |
1282 | memcpy(bufs, pipe->bufs + t, | |
1283 | n * sizeof(struct pipe_buffer)); | |
1284 | } else { | |
1285 | unsigned int tsize = pipe->ring_size - t; | |
1286 | if (h > 0) | |
1287 | memcpy(bufs + tsize, pipe->bufs, | |
1288 | h * sizeof(struct pipe_buffer)); | |
1289 | memcpy(bufs, pipe->bufs + t, | |
1290 | tsize * sizeof(struct pipe_buffer)); | |
1291 | } | |
35f3d14d JA |
1292 | } |
1293 | ||
8cefc107 DH |
1294 | head = n; |
1295 | tail = 0; | |
1296 | ||
35f3d14d JA |
1297 | kfree(pipe->bufs); |
1298 | pipe->bufs = bufs; | |
8cefc107 | 1299 | pipe->ring_size = nr_slots; |
c73be61c DH |
1300 | if (pipe->max_usage > nr_slots) |
1301 | pipe->max_usage = nr_slots; | |
8cefc107 DH |
1302 | pipe->tail = tail; |
1303 | pipe->head = head; | |
6551d5c5 | 1304 | |
b7c5dc53 DH |
1305 | spin_unlock_irq(&pipe->rd_wait.lock); |
1306 | ||
6551d5c5 LT |
1307 | /* This might have made more room for writers */ |
1308 | wake_up_interruptible(&pipe->wr_wait); | |
c73be61c DH |
1309 | return 0; |
1310 | } | |
1311 | ||
1312 | /* | |
1313 | * Allocate a new array of pipe buffers and copy the info over. Returns the | |
1314 | * pipe size if successful, or return -ERROR on error. | |
1315 | */ | |
1316 | static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) | |
1317 | { | |
1318 | unsigned long user_bufs; | |
1319 | unsigned int nr_slots, size; | |
1320 | long ret = 0; | |
1321 | ||
1322 | #ifdef CONFIG_WATCH_QUEUE | |
1323 | if (pipe->watch_queue) | |
1324 | return -EBUSY; | |
1325 | #endif | |
1326 | ||
1327 | size = round_pipe_size(arg); | |
1328 | nr_slots = size >> PAGE_SHIFT; | |
1329 | ||
1330 | if (!nr_slots) | |
1331 | return -EINVAL; | |
1332 | ||
1333 | /* | |
1334 | * If trying to increase the pipe capacity, check that an | |
1335 | * unprivileged user is not trying to exceed various limits | |
1336 | * (soft limit check here, hard limit check just below). | |
1337 | * Decreasing the pipe capacity is always permitted, even | |
1338 | * if the user is currently over a limit. | |
1339 | */ | |
1340 | if (nr_slots > pipe->max_usage && | |
1341 | size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | |
1342 | return -EPERM; | |
1343 | ||
1344 | user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); | |
1345 | ||
1346 | if (nr_slots > pipe->max_usage && | |
1347 | (too_many_pipe_buffers_hard(user_bufs) || | |
1348 | too_many_pipe_buffers_soft(user_bufs)) && | |
1349 | pipe_is_unprivileged_user()) { | |
1350 | ret = -EPERM; | |
1351 | goto out_revert_acct; | |
1352 | } | |
1353 | ||
1354 | ret = pipe_resize_ring(pipe, nr_slots); | |
1355 | if (ret < 0) | |
1356 | goto out_revert_acct; | |
1357 | ||
1358 | pipe->max_usage = nr_slots; | |
1359 | pipe->nr_accounted = nr_slots; | |
6718b6f8 | 1360 | return pipe->max_usage * PAGE_SIZE; |
b0b91d18 MK |
1361 | |
1362 | out_revert_acct: | |
c73be61c | 1363 | (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); |
b0b91d18 | 1364 | return ret; |
35f3d14d JA |
1365 | } |
1366 | ||
72083646 | 1367 | /* |
4e7b5671 CH |
1368 | * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is |
1369 | * not enough to verify that this is a pipe. | |
72083646 | 1370 | */ |
c73be61c | 1371 | struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice) |
72083646 | 1372 | { |
c73be61c DH |
1373 | struct pipe_inode_info *pipe = file->private_data; |
1374 | ||
1375 | if (file->f_op != &pipefifo_fops || !pipe) | |
1376 | return NULL; | |
1377 | #ifdef CONFIG_WATCH_QUEUE | |
1378 | if (for_splice && pipe->watch_queue) | |
1379 | return NULL; | |
1380 | #endif | |
1381 | return pipe; | |
72083646 LT |
1382 | } |
1383 | ||
35f3d14d JA |
1384 | long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) |
1385 | { | |
1386 | struct pipe_inode_info *pipe; | |
1387 | long ret; | |
1388 | ||
c73be61c | 1389 | pipe = get_pipe_info(file, false); |
35f3d14d JA |
1390 | if (!pipe) |
1391 | return -EBADF; | |
1392 | ||
ebec73f4 | 1393 | __pipe_lock(pipe); |
35f3d14d JA |
1394 | |
1395 | switch (cmd) { | |
d37d4166 MK |
1396 | case F_SETPIPE_SZ: |
1397 | ret = pipe_set_size(pipe, arg); | |
35f3d14d JA |
1398 | break; |
1399 | case F_GETPIPE_SZ: | |
6718b6f8 | 1400 | ret = pipe->max_usage * PAGE_SIZE; |
35f3d14d JA |
1401 | break; |
1402 | default: | |
1403 | ret = -EINVAL; | |
1404 | break; | |
1405 | } | |
1406 | ||
ebec73f4 | 1407 | __pipe_unlock(pipe); |
35f3d14d JA |
1408 | return ret; |
1409 | } | |
1410 | ||
ff0c7d15 NP |
1411 | static const struct super_operations pipefs_ops = { |
1412 | .destroy_inode = free_inode_nonrcu, | |
d70ef97b | 1413 | .statfs = simple_statfs, |
ff0c7d15 NP |
1414 | }; |
1415 | ||
1da177e4 LT |
1416 | /* |
1417 | * pipefs should _never_ be mounted by userland - too much of security hassle, | |
1418 | * no real gain from having the whole whorehouse mounted. So we don't need | |
1419 | * any operations on the root directory. However, we need a non-trivial | |
1420 | * d_name - pipe: will go nicely and kill the special-casing in procfs. | |
1421 | */ | |
4fa7ec5d DH |
1422 | |
1423 | static int pipefs_init_fs_context(struct fs_context *fc) | |
1da177e4 | 1424 | { |
4fa7ec5d DH |
1425 | struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC); |
1426 | if (!ctx) | |
1427 | return -ENOMEM; | |
1428 | ctx->ops = &pipefs_ops; | |
1429 | ctx->dops = &pipefs_dentry_operations; | |
1430 | return 0; | |
1da177e4 LT |
1431 | } |
1432 | ||
1433 | static struct file_system_type pipe_fs_type = { | |
1434 | .name = "pipefs", | |
4fa7ec5d | 1435 | .init_fs_context = pipefs_init_fs_context, |
1da177e4 LT |
1436 | .kill_sb = kill_anon_super, |
1437 | }; | |
1438 | ||
1439 | static int __init init_pipe_fs(void) | |
1440 | { | |
1441 | int err = register_filesystem(&pipe_fs_type); | |
341b446b | 1442 | |
1da177e4 LT |
1443 | if (!err) { |
1444 | pipe_mnt = kern_mount(&pipe_fs_type); | |
1445 | if (IS_ERR(pipe_mnt)) { | |
1446 | err = PTR_ERR(pipe_mnt); | |
1447 | unregister_filesystem(&pipe_fs_type); | |
1448 | } | |
1449 | } | |
1450 | return err; | |
1451 | } | |
1452 | ||
1da177e4 | 1453 | fs_initcall(init_pipe_fs); |