1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28 #include <linux/sysctl.h>
29
30 #include <linux/uaccess.h>
31 #include <asm/ioctls.h>
32
33 #include "internal.h"
34
35 /*
36 * New pipe buffers will be restricted to this size while the user is exceeding
37 * their pipe buffer quota. The general pipe use case needs at least two
38 * buffers: one for data yet to be read, and one for new data. If this is less
39 * than two, then a write to a non-empty pipe may block even if the pipe is not
40 * full. This can occur with GNU make jobserver or similar uses of pipes as
41 * semaphores: multiple processes may be waiting to write tokens back to the
42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43 *
44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
46 * emptied.
47 */
48 #define PIPE_MIN_DEF_BUFFERS 2
49
50 /*
51 * The max size that a non-root user is allowed to grow the pipe. Can
52 * be set by root in /proc/sys/fs/pipe-max-size
53 */
54 static unsigned int pipe_max_size = 1048576;
55
56 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
57 * matches default values.
58 */
59 static unsigned long pipe_user_pages_hard;
60 static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
61
62 /*
63 * We use head and tail indices that aren't masked off, except at the point of
64 * dereference, but rather they're allowed to wrap naturally. This means there
65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
66 * <= 2^31.
67 * -- David Howells 2019-09-23.
68 *
69 * Reads with count = 0 should always return 0.
70 * -- Julian Bradfield 1999-06-07.
71 *
72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74 *
75 * pipe_read & write cleanup
76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
77 */
78
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)79 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
80 {
81 if (pipe->files)
82 mutex_lock_nested(&pipe->mutex, subclass);
83 }
84
pipe_lock(struct pipe_inode_info * pipe)85 void pipe_lock(struct pipe_inode_info *pipe)
86 {
87 /*
88 * pipe_lock() nests non-pipe inode locks (for writing to a file)
89 */
90 pipe_lock_nested(pipe, I_MUTEX_PARENT);
91 }
92 EXPORT_SYMBOL(pipe_lock);
93
pipe_unlock(struct pipe_inode_info * pipe)94 void pipe_unlock(struct pipe_inode_info *pipe)
95 {
96 if (pipe->files)
97 mutex_unlock(&pipe->mutex);
98 }
99 EXPORT_SYMBOL(pipe_unlock);
100
__pipe_lock(struct pipe_inode_info * pipe)101 static inline void __pipe_lock(struct pipe_inode_info *pipe)
102 {
103 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
104 }
105
__pipe_unlock(struct pipe_inode_info * pipe)106 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
107 {
108 mutex_unlock(&pipe->mutex);
109 }
110
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)111 void pipe_double_lock(struct pipe_inode_info *pipe1,
112 struct pipe_inode_info *pipe2)
113 {
114 BUG_ON(pipe1 == pipe2);
115
116 if (pipe1 < pipe2) {
117 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
118 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
119 } else {
120 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
121 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
122 }
123 }
124
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)125 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
126 struct pipe_buffer *buf)
127 {
128 struct page *page = buf->page;
129
130 /*
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
134 */
135 if (page_count(page) == 1 && !pipe->tmp_page)
136 pipe->tmp_page = page;
137 else
138 put_page(page);
139 }
140
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)141 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
142 struct pipe_buffer *buf)
143 {
144 struct page *page = buf->page;
145
146 if (page_count(page) != 1)
147 return false;
148 memcg_kmem_uncharge_page(page, 0);
149 __SetPageLocked(page);
150 return true;
151 }
152
153 /**
154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
155 * @pipe: the pipe that the buffer belongs to
156 * @buf: the buffer to attempt to steal
157 *
158 * Description:
159 * This function attempts to steal the &struct page attached to
160 * @buf. If successful, this function returns 0 and returns with
161 * the page locked. The caller may then reuse the page for whatever
162 * he wishes; the typical use is insertion into a different file
163 * page cache.
164 */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)165 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
166 struct pipe_buffer *buf)
167 {
168 struct page *page = buf->page;
169
170 /*
171 * A reference of one is golden, that means that the owner of this
172 * page is the only one holding a reference to it. lock the page
173 * and return OK.
174 */
175 if (page_count(page) == 1) {
176 lock_page(page);
177 return true;
178 }
179 return false;
180 }
181 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
182
183 /**
184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
185 * @pipe: the pipe that the buffer belongs to
186 * @buf: the buffer to get a reference to
187 *
188 * Description:
189 * This function grabs an extra reference to @buf. It's used in
190 * the tee() system call, when we duplicate the buffers in one
191 * pipe into another.
192 */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)193 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
194 {
195 return try_get_page(buf->page);
196 }
197 EXPORT_SYMBOL(generic_pipe_buf_get);
198
199 /**
200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
201 * @pipe: the pipe that the buffer belongs to
202 * @buf: the buffer to put a reference to
203 *
204 * Description:
205 * This function releases a reference to @buf.
206 */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)207 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
208 struct pipe_buffer *buf)
209 {
210 put_page(buf->page);
211 }
212 EXPORT_SYMBOL(generic_pipe_buf_release);
213
214 static const struct pipe_buf_operations anon_pipe_buf_ops = {
215 .release = anon_pipe_buf_release,
216 .try_steal = anon_pipe_buf_try_steal,
217 .get = generic_pipe_buf_get,
218 };
219
220 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)221 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
222 {
223 unsigned int head = READ_ONCE(pipe->head);
224 unsigned int tail = READ_ONCE(pipe->tail);
225 unsigned int writers = READ_ONCE(pipe->writers);
226
227 return !pipe_empty(head, tail) || !writers;
228 }
229
230 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)231 pipe_read(struct kiocb *iocb, struct iov_iter *to)
232 {
233 size_t total_len = iov_iter_count(to);
234 struct file *filp = iocb->ki_filp;
235 struct pipe_inode_info *pipe = filp->private_data;
236 bool was_full, wake_next_reader = false;
237 ssize_t ret;
238
239 /* Null read succeeds. */
240 if (unlikely(total_len == 0))
241 return 0;
242
243 ret = 0;
244 __pipe_lock(pipe);
245
246 /*
247 * We only wake up writers if the pipe was full when we started
248 * reading in order to avoid unnecessary wakeups.
249 *
250 * But when we do wake up writers, we do so using a sync wakeup
251 * (WF_SYNC), because we want them to get going and generate more
252 * data for us.
253 */
254 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
255 for (;;) {
256 /* Read ->head with a barrier vs post_one_notification() */
257 unsigned int head = smp_load_acquire(&pipe->head);
258 unsigned int tail = pipe->tail;
259 unsigned int mask = pipe->ring_size - 1;
260
261 #ifdef CONFIG_WATCH_QUEUE
262 if (pipe->note_loss) {
263 struct watch_notification n;
264
265 if (total_len < 8) {
266 if (ret == 0)
267 ret = -ENOBUFS;
268 break;
269 }
270
271 n.type = WATCH_TYPE_META;
272 n.subtype = WATCH_META_LOSS_NOTIFICATION;
273 n.info = watch_sizeof(n);
274 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
275 if (ret == 0)
276 ret = -EFAULT;
277 break;
278 }
279 ret += sizeof(n);
280 total_len -= sizeof(n);
281 pipe->note_loss = false;
282 }
283 #endif
284
285 if (!pipe_empty(head, tail)) {
286 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
287 size_t chars = buf->len;
288 size_t written;
289 int error;
290
291 if (chars > total_len) {
292 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
293 if (ret == 0)
294 ret = -ENOBUFS;
295 break;
296 }
297 chars = total_len;
298 }
299
300 error = pipe_buf_confirm(pipe, buf);
301 if (error) {
302 if (!ret)
303 ret = error;
304 break;
305 }
306
307 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
308 if (unlikely(written < chars)) {
309 if (!ret)
310 ret = -EFAULT;
311 break;
312 }
313 ret += chars;
314 buf->offset += chars;
315 buf->len -= chars;
316
317 /* Was it a packet buffer? Clean up and exit */
318 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
319 total_len = chars;
320 buf->len = 0;
321 }
322
323 if (!buf->len) {
324 pipe_buf_release(pipe, buf);
325 spin_lock_irq(&pipe->rd_wait.lock);
326 #ifdef CONFIG_WATCH_QUEUE
327 if (buf->flags & PIPE_BUF_FLAG_LOSS)
328 pipe->note_loss = true;
329 #endif
330 tail++;
331 pipe->tail = tail;
332 spin_unlock_irq(&pipe->rd_wait.lock);
333 }
334 total_len -= chars;
335 if (!total_len)
336 break; /* common path: read succeeded */
337 if (!pipe_empty(head, tail)) /* More to do? */
338 continue;
339 }
340
341 if (!pipe->writers)
342 break;
343 if (ret)
344 break;
345 if ((filp->f_flags & O_NONBLOCK) ||
346 (iocb->ki_flags & IOCB_NOWAIT)) {
347 ret = -EAGAIN;
348 break;
349 }
350 __pipe_unlock(pipe);
351
352 /*
353 * We only get here if we didn't actually read anything.
354 *
355 * However, we could have seen (and removed) a zero-sized
356 * pipe buffer, and might have made space in the buffers
357 * that way.
358 *
359 * You can't make zero-sized pipe buffers by doing an empty
360 * write (not even in packet mode), but they can happen if
361 * the writer gets an EFAULT when trying to fill a buffer
362 * that already got allocated and inserted in the buffer
363 * array.
364 *
365 * So we still need to wake up any pending writers in the
366 * _very_ unlikely case that the pipe was full, but we got
367 * no data.
368 */
369 if (unlikely(was_full))
370 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
371 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
372
373 /*
374 * But because we didn't read anything, at this point we can
375 * just return directly with -ERESTARTSYS if we're interrupted,
376 * since we've done any required wakeups and there's no need
377 * to mark anything accessed. And we've dropped the lock.
378 */
379 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
380 return -ERESTARTSYS;
381
382 __pipe_lock(pipe);
383 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
384 wake_next_reader = true;
385 }
386 if (pipe_empty(pipe->head, pipe->tail))
387 wake_next_reader = false;
388 __pipe_unlock(pipe);
389
390 if (was_full)
391 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
392 if (wake_next_reader)
393 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
394 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
395 if (ret > 0)
396 file_accessed(filp);
397 return ret;
398 }
399
is_packetized(struct file * file)400 static inline int is_packetized(struct file *file)
401 {
402 return (file->f_flags & O_DIRECT) != 0;
403 }
404
405 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)406 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
407 {
408 unsigned int head = READ_ONCE(pipe->head);
409 unsigned int tail = READ_ONCE(pipe->tail);
410 unsigned int max_usage = READ_ONCE(pipe->max_usage);
411
412 return !pipe_full(head, tail, max_usage) ||
413 !READ_ONCE(pipe->readers);
414 }
415
416 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)417 pipe_write(struct kiocb *iocb, struct iov_iter *from)
418 {
419 struct file *filp = iocb->ki_filp;
420 struct pipe_inode_info *pipe = filp->private_data;
421 unsigned int head;
422 ssize_t ret = 0;
423 size_t total_len = iov_iter_count(from);
424 ssize_t chars;
425 bool was_empty = false;
426 bool wake_next_writer = false;
427
428 /* Null write succeeds. */
429 if (unlikely(total_len == 0))
430 return 0;
431
432 __pipe_lock(pipe);
433
434 if (!pipe->readers) {
435 send_sig(SIGPIPE, current, 0);
436 ret = -EPIPE;
437 goto out;
438 }
439
440 #ifdef CONFIG_WATCH_QUEUE
441 if (pipe->watch_queue) {
442 ret = -EXDEV;
443 goto out;
444 }
445 #endif
446
447 /*
448 * If it wasn't empty we try to merge new data into
449 * the last buffer.
450 *
451 * That naturally merges small writes, but it also
452 * page-aligns the rest of the writes for large writes
453 * spanning multiple pages.
454 */
455 head = pipe->head;
456 was_empty = pipe_empty(head, pipe->tail);
457 chars = total_len & (PAGE_SIZE-1);
458 if (chars && !was_empty) {
459 unsigned int mask = pipe->ring_size - 1;
460 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
461 int offset = buf->offset + buf->len;
462
463 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
464 offset + chars <= PAGE_SIZE) {
465 ret = pipe_buf_confirm(pipe, buf);
466 if (ret)
467 goto out;
468
469 ret = copy_page_from_iter(buf->page, offset, chars, from);
470 if (unlikely(ret < chars)) {
471 ret = -EFAULT;
472 goto out;
473 }
474
475 buf->len += ret;
476 if (!iov_iter_count(from))
477 goto out;
478 }
479 }
480
481 for (;;) {
482 if (!pipe->readers) {
483 send_sig(SIGPIPE, current, 0);
484 if (!ret)
485 ret = -EPIPE;
486 break;
487 }
488
489 head = pipe->head;
490 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
491 unsigned int mask = pipe->ring_size - 1;
492 struct pipe_buffer *buf;
493 struct page *page = pipe->tmp_page;
494 int copied;
495
496 if (!page) {
497 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
498 if (unlikely(!page)) {
499 ret = ret ? : -ENOMEM;
500 break;
501 }
502 pipe->tmp_page = page;
503 }
504
505 /* Allocate a slot in the ring in advance and attach an
506 * empty buffer. If we fault or otherwise fail to use
507 * it, either the reader will consume it or it'll still
508 * be there for the next write.
509 */
510 spin_lock_irq(&pipe->rd_wait.lock);
511
512 head = pipe->head;
513 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
514 spin_unlock_irq(&pipe->rd_wait.lock);
515 continue;
516 }
517
518 pipe->head = head + 1;
519 spin_unlock_irq(&pipe->rd_wait.lock);
520
521 /* Insert it into the buffer array */
522 buf = &pipe->bufs[head & mask];
523 buf->page = page;
524 buf->ops = &anon_pipe_buf_ops;
525 buf->offset = 0;
526 buf->len = 0;
527 if (is_packetized(filp))
528 buf->flags = PIPE_BUF_FLAG_PACKET;
529 else
530 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
531 pipe->tmp_page = NULL;
532
533 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
534 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
535 if (!ret)
536 ret = -EFAULT;
537 break;
538 }
539 ret += copied;
540 buf->len = copied;
541
542 if (!iov_iter_count(from))
543 break;
544 }
545
546 if (!pipe_full(head, pipe->tail, pipe->max_usage))
547 continue;
548
549 /* Wait for buffer space to become available. */
550 if ((filp->f_flags & O_NONBLOCK) ||
551 (iocb->ki_flags & IOCB_NOWAIT)) {
552 if (!ret)
553 ret = -EAGAIN;
554 break;
555 }
556 if (signal_pending(current)) {
557 if (!ret)
558 ret = -ERESTARTSYS;
559 break;
560 }
561
562 /*
563 * We're going to release the pipe lock and wait for more
564 * space. We wake up any readers if necessary, and then
565 * after waiting we need to re-check whether the pipe
566 * become empty while we dropped the lock.
567 */
568 __pipe_unlock(pipe);
569 if (was_empty)
570 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
571 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
572 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
573 __pipe_lock(pipe);
574 was_empty = pipe_empty(pipe->head, pipe->tail);
575 wake_next_writer = true;
576 }
577 out:
578 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
579 wake_next_writer = false;
580 __pipe_unlock(pipe);
581
582 /*
583 * If we do do a wakeup event, we do a 'sync' wakeup, because we
584 * want the reader to start processing things asap, rather than
585 * leave the data pending.
586 *
587 * This is particularly important for small writes, because of
588 * how (for example) the GNU make jobserver uses small writes to
589 * wake up pending jobs
590 *
591 * Epoll nonsensically wants a wakeup whether the pipe
592 * was already empty or not.
593 */
594 if (was_empty || pipe->poll_usage)
595 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
596 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
597 if (wake_next_writer)
598 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
599 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
600 int err = file_update_time(filp);
601 if (err)
602 ret = err;
603 sb_end_write(file_inode(filp)->i_sb);
604 }
605 return ret;
606 }
607
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)608 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
609 {
610 struct pipe_inode_info *pipe = filp->private_data;
611 unsigned int count, head, tail, mask;
612
613 switch (cmd) {
614 case FIONREAD:
615 __pipe_lock(pipe);
616 count = 0;
617 head = pipe->head;
618 tail = pipe->tail;
619 mask = pipe->ring_size - 1;
620
621 while (tail != head) {
622 count += pipe->bufs[tail & mask].len;
623 tail++;
624 }
625 __pipe_unlock(pipe);
626
627 return put_user(count, (int __user *)arg);
628
629 #ifdef CONFIG_WATCH_QUEUE
630 case IOC_WATCH_QUEUE_SET_SIZE: {
631 int ret;
632 __pipe_lock(pipe);
633 ret = watch_queue_set_size(pipe, arg);
634 __pipe_unlock(pipe);
635 return ret;
636 }
637
638 case IOC_WATCH_QUEUE_SET_FILTER:
639 return watch_queue_set_filter(
640 pipe, (struct watch_notification_filter __user *)arg);
641 #endif
642
643 default:
644 return -ENOIOCTLCMD;
645 }
646 }
647
648 /* No kernel lock held - fine */
649 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)650 pipe_poll(struct file *filp, poll_table *wait)
651 {
652 __poll_t mask;
653 struct pipe_inode_info *pipe = filp->private_data;
654 unsigned int head, tail;
655
656 /* Epoll has some historical nasty semantics, this enables them */
657 WRITE_ONCE(pipe->poll_usage, true);
658
659 /*
660 * Reading pipe state only -- no need for acquiring the semaphore.
661 *
662 * But because this is racy, the code has to add the
663 * entry to the poll table _first_ ..
664 */
665 if (filp->f_mode & FMODE_READ)
666 poll_wait(filp, &pipe->rd_wait, wait);
667 if (filp->f_mode & FMODE_WRITE)
668 poll_wait(filp, &pipe->wr_wait, wait);
669
670 /*
671 * .. and only then can you do the racy tests. That way,
672 * if something changes and you got it wrong, the poll
673 * table entry will wake you up and fix it.
674 */
675 head = READ_ONCE(pipe->head);
676 tail = READ_ONCE(pipe->tail);
677
678 mask = 0;
679 if (filp->f_mode & FMODE_READ) {
680 if (!pipe_empty(head, tail))
681 mask |= EPOLLIN | EPOLLRDNORM;
682 if (!pipe->writers && filp->f_version != pipe->w_counter)
683 mask |= EPOLLHUP;
684 }
685
686 if (filp->f_mode & FMODE_WRITE) {
687 if (!pipe_full(head, tail, pipe->max_usage))
688 mask |= EPOLLOUT | EPOLLWRNORM;
689 /*
690 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
691 * behave exactly like pipes for poll().
692 */
693 if (!pipe->readers)
694 mask |= EPOLLERR;
695 }
696
697 return mask;
698 }
699
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)700 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
701 {
702 int kill = 0;
703
704 spin_lock(&inode->i_lock);
705 if (!--pipe->files) {
706 inode->i_pipe = NULL;
707 kill = 1;
708 }
709 spin_unlock(&inode->i_lock);
710
711 if (kill)
712 free_pipe_info(pipe);
713 }
714
715 static int
pipe_release(struct inode * inode,struct file * file)716 pipe_release(struct inode *inode, struct file *file)
717 {
718 struct pipe_inode_info *pipe = file->private_data;
719
720 __pipe_lock(pipe);
721 if (file->f_mode & FMODE_READ)
722 pipe->readers--;
723 if (file->f_mode & FMODE_WRITE)
724 pipe->writers--;
725
726 /* Was that the last reader or writer, but not the other side? */
727 if (!pipe->readers != !pipe->writers) {
728 wake_up_interruptible_all(&pipe->rd_wait);
729 wake_up_interruptible_all(&pipe->wr_wait);
730 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
731 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
732 }
733 __pipe_unlock(pipe);
734
735 put_pipe_info(inode, pipe);
736 return 0;
737 }
738
739 static int
pipe_fasync(int fd,struct file * filp,int on)740 pipe_fasync(int fd, struct file *filp, int on)
741 {
742 struct pipe_inode_info *pipe = filp->private_data;
743 int retval = 0;
744
745 __pipe_lock(pipe);
746 if (filp->f_mode & FMODE_READ)
747 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
748 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
749 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
750 if (retval < 0 && (filp->f_mode & FMODE_READ))
751 /* this can happen only if on == T */
752 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
753 }
754 __pipe_unlock(pipe);
755 return retval;
756 }
757
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)758 unsigned long account_pipe_buffers(struct user_struct *user,
759 unsigned long old, unsigned long new)
760 {
761 return atomic_long_add_return(new - old, &user->pipe_bufs);
762 }
763
too_many_pipe_buffers_soft(unsigned long user_bufs)764 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
765 {
766 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
767
768 return soft_limit && user_bufs > soft_limit;
769 }
770
too_many_pipe_buffers_hard(unsigned long user_bufs)771 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
772 {
773 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
774
775 return hard_limit && user_bufs > hard_limit;
776 }
777
pipe_is_unprivileged_user(void)778 bool pipe_is_unprivileged_user(void)
779 {
780 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
781 }
782
alloc_pipe_info(void)783 struct pipe_inode_info *alloc_pipe_info(void)
784 {
785 struct pipe_inode_info *pipe;
786 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
787 struct user_struct *user = get_current_user();
788 unsigned long user_bufs;
789 unsigned int max_size = READ_ONCE(pipe_max_size);
790
791 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
792 if (pipe == NULL)
793 goto out_free_uid;
794
795 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
796 pipe_bufs = max_size >> PAGE_SHIFT;
797
798 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
799
800 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
801 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
802 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
803 }
804
805 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
806 goto out_revert_acct;
807
808 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
809 GFP_KERNEL_ACCOUNT);
810
811 if (pipe->bufs) {
812 init_waitqueue_head(&pipe->rd_wait);
813 init_waitqueue_head(&pipe->wr_wait);
814 pipe->r_counter = pipe->w_counter = 1;
815 pipe->max_usage = pipe_bufs;
816 pipe->ring_size = pipe_bufs;
817 pipe->nr_accounted = pipe_bufs;
818 pipe->user = user;
819 mutex_init(&pipe->mutex);
820 return pipe;
821 }
822
823 out_revert_acct:
824 (void) account_pipe_buffers(user, pipe_bufs, 0);
825 kfree(pipe);
826 out_free_uid:
827 free_uid(user);
828 return NULL;
829 }
830
free_pipe_info(struct pipe_inode_info * pipe)831 void free_pipe_info(struct pipe_inode_info *pipe)
832 {
833 unsigned int i;
834
835 #ifdef CONFIG_WATCH_QUEUE
836 if (pipe->watch_queue)
837 watch_queue_clear(pipe->watch_queue);
838 #endif
839
840 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
841 free_uid(pipe->user);
842 for (i = 0; i < pipe->ring_size; i++) {
843 struct pipe_buffer *buf = pipe->bufs + i;
844 if (buf->ops)
845 pipe_buf_release(pipe, buf);
846 }
847 #ifdef CONFIG_WATCH_QUEUE
848 if (pipe->watch_queue)
849 put_watch_queue(pipe->watch_queue);
850 #endif
851 if (pipe->tmp_page)
852 __free_page(pipe->tmp_page);
853 kfree(pipe->bufs);
854 kfree(pipe);
855 }
856
857 static struct vfsmount *pipe_mnt __read_mostly;
858
859 /*
860 * pipefs_dname() is called from d_path().
861 */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)862 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
863 {
864 return dynamic_dname(buffer, buflen, "pipe:[%lu]",
865 d_inode(dentry)->i_ino);
866 }
867
868 static const struct dentry_operations pipefs_dentry_operations = {
869 .d_dname = pipefs_dname,
870 };
871
get_pipe_inode(void)872 static struct inode * get_pipe_inode(void)
873 {
874 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
875 struct pipe_inode_info *pipe;
876
877 if (!inode)
878 goto fail_inode;
879
880 inode->i_ino = get_next_ino();
881
882 pipe = alloc_pipe_info();
883 if (!pipe)
884 goto fail_iput;
885
886 inode->i_pipe = pipe;
887 pipe->files = 2;
888 pipe->readers = pipe->writers = 1;
889 inode->i_fop = &pipefifo_fops;
890
891 /*
892 * Mark the inode dirty from the very beginning,
893 * that way it will never be moved to the dirty
894 * list because "mark_inode_dirty()" will think
895 * that it already _is_ on the dirty list.
896 */
897 inode->i_state = I_DIRTY;
898 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
899 inode->i_uid = current_fsuid();
900 inode->i_gid = current_fsgid();
901 inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
902
903 return inode;
904
905 fail_iput:
906 iput(inode);
907
908 fail_inode:
909 return NULL;
910 }
911
create_pipe_files(struct file ** res,int flags)912 int create_pipe_files(struct file **res, int flags)
913 {
914 struct inode *inode = get_pipe_inode();
915 struct file *f;
916 int error;
917
918 if (!inode)
919 return -ENFILE;
920
921 if (flags & O_NOTIFICATION_PIPE) {
922 error = watch_queue_init(inode->i_pipe);
923 if (error) {
924 free_pipe_info(inode->i_pipe);
925 iput(inode);
926 return error;
927 }
928 }
929
930 f = alloc_file_pseudo(inode, pipe_mnt, "",
931 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
932 &pipefifo_fops);
933 if (IS_ERR(f)) {
934 free_pipe_info(inode->i_pipe);
935 iput(inode);
936 return PTR_ERR(f);
937 }
938
939 f->private_data = inode->i_pipe;
940
941 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
942 &pipefifo_fops);
943 if (IS_ERR(res[0])) {
944 put_pipe_info(inode, inode->i_pipe);
945 fput(f);
946 return PTR_ERR(res[0]);
947 }
948 res[0]->private_data = inode->i_pipe;
949 res[1] = f;
950 stream_open(inode, res[0]);
951 stream_open(inode, res[1]);
952 return 0;
953 }
954
__do_pipe_flags(int * fd,struct file ** files,int flags)955 static int __do_pipe_flags(int *fd, struct file **files, int flags)
956 {
957 int error;
958 int fdw, fdr;
959
960 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
961 return -EINVAL;
962
963 error = create_pipe_files(files, flags);
964 if (error)
965 return error;
966
967 error = get_unused_fd_flags(flags);
968 if (error < 0)
969 goto err_read_pipe;
970 fdr = error;
971
972 error = get_unused_fd_flags(flags);
973 if (error < 0)
974 goto err_fdr;
975 fdw = error;
976
977 audit_fd_pair(fdr, fdw);
978 fd[0] = fdr;
979 fd[1] = fdw;
980 /* pipe groks IOCB_NOWAIT */
981 files[0]->f_mode |= FMODE_NOWAIT;
982 files[1]->f_mode |= FMODE_NOWAIT;
983 return 0;
984
985 err_fdr:
986 put_unused_fd(fdr);
987 err_read_pipe:
988 fput(files[0]);
989 fput(files[1]);
990 return error;
991 }
992
do_pipe_flags(int * fd,int flags)993 int do_pipe_flags(int *fd, int flags)
994 {
995 struct file *files[2];
996 int error = __do_pipe_flags(fd, files, flags);
997 if (!error) {
998 fd_install(fd[0], files[0]);
999 fd_install(fd[1], files[1]);
1000 }
1001 return error;
1002 }
1003
1004 /*
1005 * sys_pipe() is the normal C calling standard for creating
1006 * a pipe. It's not the way Unix traditionally does this, though.
1007 */
do_pipe2(int __user * fildes,int flags)1008 static int do_pipe2(int __user *fildes, int flags)
1009 {
1010 struct file *files[2];
1011 int fd[2];
1012 int error;
1013
1014 error = __do_pipe_flags(fd, files, flags);
1015 if (!error) {
1016 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1017 fput(files[0]);
1018 fput(files[1]);
1019 put_unused_fd(fd[0]);
1020 put_unused_fd(fd[1]);
1021 error = -EFAULT;
1022 } else {
1023 fd_install(fd[0], files[0]);
1024 fd_install(fd[1], files[1]);
1025 }
1026 }
1027 return error;
1028 }
1029
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1030 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1031 {
1032 return do_pipe2(fildes, flags);
1033 }
1034
SYSCALL_DEFINE1(pipe,int __user *,fildes)1035 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1036 {
1037 return do_pipe2(fildes, 0);
1038 }
1039
1040 /*
1041 * This is the stupid "wait for pipe to be readable or writable"
1042 * model.
1043 *
1044 * See pipe_read/write() for the proper kind of exclusive wait,
1045 * but that requires that we wake up any other readers/writers
1046 * if we then do not end up reading everything (ie the whole
1047 * "wake_next_reader/writer" logic in pipe_read/write()).
1048 */
pipe_wait_readable(struct pipe_inode_info * pipe)1049 void pipe_wait_readable(struct pipe_inode_info *pipe)
1050 {
1051 pipe_unlock(pipe);
1052 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1053 pipe_lock(pipe);
1054 }
1055
pipe_wait_writable(struct pipe_inode_info * pipe)1056 void pipe_wait_writable(struct pipe_inode_info *pipe)
1057 {
1058 pipe_unlock(pipe);
1059 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1060 pipe_lock(pipe);
1061 }
1062
1063 /*
1064 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1065 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1066 * race with the count check and waitqueue prep.
1067 *
1068 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1069 * then check the condition you're waiting for, and only then sleep. But
1070 * because of the pipe lock, we can check the condition before being on
1071 * the wait queue.
1072 *
1073 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1074 */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1075 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1076 {
1077 DEFINE_WAIT(rdwait);
1078 int cur = *cnt;
1079
1080 while (cur == *cnt) {
1081 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1082 pipe_unlock(pipe);
1083 schedule();
1084 finish_wait(&pipe->rd_wait, &rdwait);
1085 pipe_lock(pipe);
1086 if (signal_pending(current))
1087 break;
1088 }
1089 return cur == *cnt ? -ERESTARTSYS : 0;
1090 }
1091
wake_up_partner(struct pipe_inode_info * pipe)1092 static void wake_up_partner(struct pipe_inode_info *pipe)
1093 {
1094 wake_up_interruptible_all(&pipe->rd_wait);
1095 }
1096
fifo_open(struct inode * inode,struct file * filp)1097 static int fifo_open(struct inode *inode, struct file *filp)
1098 {
1099 struct pipe_inode_info *pipe;
1100 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1101 int ret;
1102
1103 filp->f_version = 0;
1104
1105 spin_lock(&inode->i_lock);
1106 if (inode->i_pipe) {
1107 pipe = inode->i_pipe;
1108 pipe->files++;
1109 spin_unlock(&inode->i_lock);
1110 } else {
1111 spin_unlock(&inode->i_lock);
1112 pipe = alloc_pipe_info();
1113 if (!pipe)
1114 return -ENOMEM;
1115 pipe->files = 1;
1116 spin_lock(&inode->i_lock);
1117 if (unlikely(inode->i_pipe)) {
1118 inode->i_pipe->files++;
1119 spin_unlock(&inode->i_lock);
1120 free_pipe_info(pipe);
1121 pipe = inode->i_pipe;
1122 } else {
1123 inode->i_pipe = pipe;
1124 spin_unlock(&inode->i_lock);
1125 }
1126 }
1127 filp->private_data = pipe;
1128 /* OK, we have a pipe and it's pinned down */
1129
1130 __pipe_lock(pipe);
1131
1132 /* We can only do regular read/write on fifos */
1133 stream_open(inode, filp);
1134
1135 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1136 case FMODE_READ:
1137 /*
1138 * O_RDONLY
1139 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1140 * opened, even when there is no process writing the FIFO.
1141 */
1142 pipe->r_counter++;
1143 if (pipe->readers++ == 0)
1144 wake_up_partner(pipe);
1145
1146 if (!is_pipe && !pipe->writers) {
1147 if ((filp->f_flags & O_NONBLOCK)) {
1148 /* suppress EPOLLHUP until we have
1149 * seen a writer */
1150 filp->f_version = pipe->w_counter;
1151 } else {
1152 if (wait_for_partner(pipe, &pipe->w_counter))
1153 goto err_rd;
1154 }
1155 }
1156 break;
1157
1158 case FMODE_WRITE:
1159 /*
1160 * O_WRONLY
1161 * POSIX.1 says that O_NONBLOCK means return -1 with
1162 * errno=ENXIO when there is no process reading the FIFO.
1163 */
1164 ret = -ENXIO;
1165 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1166 goto err;
1167
1168 pipe->w_counter++;
1169 if (!pipe->writers++)
1170 wake_up_partner(pipe);
1171
1172 if (!is_pipe && !pipe->readers) {
1173 if (wait_for_partner(pipe, &pipe->r_counter))
1174 goto err_wr;
1175 }
1176 break;
1177
1178 case FMODE_READ | FMODE_WRITE:
1179 /*
1180 * O_RDWR
1181 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1182 * This implementation will NEVER block on a O_RDWR open, since
1183 * the process can at least talk to itself.
1184 */
1185
1186 pipe->readers++;
1187 pipe->writers++;
1188 pipe->r_counter++;
1189 pipe->w_counter++;
1190 if (pipe->readers == 1 || pipe->writers == 1)
1191 wake_up_partner(pipe);
1192 break;
1193
1194 default:
1195 ret = -EINVAL;
1196 goto err;
1197 }
1198
1199 /* Ok! */
1200 __pipe_unlock(pipe);
1201 return 0;
1202
1203 err_rd:
1204 if (!--pipe->readers)
1205 wake_up_interruptible(&pipe->wr_wait);
1206 ret = -ERESTARTSYS;
1207 goto err;
1208
1209 err_wr:
1210 if (!--pipe->writers)
1211 wake_up_interruptible_all(&pipe->rd_wait);
1212 ret = -ERESTARTSYS;
1213 goto err;
1214
1215 err:
1216 __pipe_unlock(pipe);
1217
1218 put_pipe_info(inode, pipe);
1219 return ret;
1220 }
1221
1222 const struct file_operations pipefifo_fops = {
1223 .open = fifo_open,
1224 .llseek = no_llseek,
1225 .read_iter = pipe_read,
1226 .write_iter = pipe_write,
1227 .poll = pipe_poll,
1228 .unlocked_ioctl = pipe_ioctl,
1229 .release = pipe_release,
1230 .fasync = pipe_fasync,
1231 .splice_write = iter_file_splice_write,
1232 };
1233
1234 /*
1235 * Currently we rely on the pipe array holding a power-of-2 number
1236 * of pages. Returns 0 on error.
1237 */
round_pipe_size(unsigned int size)1238 unsigned int round_pipe_size(unsigned int size)
1239 {
1240 if (size > (1U << 31))
1241 return 0;
1242
1243 /* Minimum pipe size, as required by POSIX */
1244 if (size < PAGE_SIZE)
1245 return PAGE_SIZE;
1246
1247 return roundup_pow_of_two(size);
1248 }
1249
1250 /*
1251 * Resize the pipe ring to a number of slots.
1252 *
1253 * Note the pipe can be reduced in capacity, but only if the current
1254 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1255 * returned instead.
1256 */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1257 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1258 {
1259 struct pipe_buffer *bufs;
1260 unsigned int head, tail, mask, n;
1261
1262 bufs = kcalloc(nr_slots, sizeof(*bufs),
1263 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1264 if (unlikely(!bufs))
1265 return -ENOMEM;
1266
1267 spin_lock_irq(&pipe->rd_wait.lock);
1268 mask = pipe->ring_size - 1;
1269 head = pipe->head;
1270 tail = pipe->tail;
1271
1272 n = pipe_occupancy(head, tail);
1273 if (nr_slots < n) {
1274 spin_unlock_irq(&pipe->rd_wait.lock);
1275 kfree(bufs);
1276 return -EBUSY;
1277 }
1278
1279 /*
1280 * The pipe array wraps around, so just start the new one at zero
1281 * and adjust the indices.
1282 */
1283 if (n > 0) {
1284 unsigned int h = head & mask;
1285 unsigned int t = tail & mask;
1286 if (h > t) {
1287 memcpy(bufs, pipe->bufs + t,
1288 n * sizeof(struct pipe_buffer));
1289 } else {
1290 unsigned int tsize = pipe->ring_size - t;
1291 if (h > 0)
1292 memcpy(bufs + tsize, pipe->bufs,
1293 h * sizeof(struct pipe_buffer));
1294 memcpy(bufs, pipe->bufs + t,
1295 tsize * sizeof(struct pipe_buffer));
1296 }
1297 }
1298
1299 head = n;
1300 tail = 0;
1301
1302 kfree(pipe->bufs);
1303 pipe->bufs = bufs;
1304 pipe->ring_size = nr_slots;
1305 if (pipe->max_usage > nr_slots)
1306 pipe->max_usage = nr_slots;
1307 pipe->tail = tail;
1308 pipe->head = head;
1309
1310 spin_unlock_irq(&pipe->rd_wait.lock);
1311
1312 /* This might have made more room for writers */
1313 wake_up_interruptible(&pipe->wr_wait);
1314 return 0;
1315 }
1316
1317 /*
1318 * Allocate a new array of pipe buffers and copy the info over. Returns the
1319 * pipe size if successful, or return -ERROR on error.
1320 */
pipe_set_size(struct pipe_inode_info * pipe,unsigned int arg)1321 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
1322 {
1323 unsigned long user_bufs;
1324 unsigned int nr_slots, size;
1325 long ret = 0;
1326
1327 #ifdef CONFIG_WATCH_QUEUE
1328 if (pipe->watch_queue)
1329 return -EBUSY;
1330 #endif
1331
1332 size = round_pipe_size(arg);
1333 nr_slots = size >> PAGE_SHIFT;
1334
1335 if (!nr_slots)
1336 return -EINVAL;
1337
1338 /*
1339 * If trying to increase the pipe capacity, check that an
1340 * unprivileged user is not trying to exceed various limits
1341 * (soft limit check here, hard limit check just below).
1342 * Decreasing the pipe capacity is always permitted, even
1343 * if the user is currently over a limit.
1344 */
1345 if (nr_slots > pipe->max_usage &&
1346 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1347 return -EPERM;
1348
1349 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1350
1351 if (nr_slots > pipe->max_usage &&
1352 (too_many_pipe_buffers_hard(user_bufs) ||
1353 too_many_pipe_buffers_soft(user_bufs)) &&
1354 pipe_is_unprivileged_user()) {
1355 ret = -EPERM;
1356 goto out_revert_acct;
1357 }
1358
1359 ret = pipe_resize_ring(pipe, nr_slots);
1360 if (ret < 0)
1361 goto out_revert_acct;
1362
1363 pipe->max_usage = nr_slots;
1364 pipe->nr_accounted = nr_slots;
1365 return pipe->max_usage * PAGE_SIZE;
1366
1367 out_revert_acct:
1368 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1369 return ret;
1370 }
1371
1372 /*
1373 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1374 * not enough to verify that this is a pipe.
1375 */
get_pipe_info(struct file * file,bool for_splice)1376 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1377 {
1378 struct pipe_inode_info *pipe = file->private_data;
1379
1380 if (file->f_op != &pipefifo_fops || !pipe)
1381 return NULL;
1382 #ifdef CONFIG_WATCH_QUEUE
1383 if (for_splice && pipe->watch_queue)
1384 return NULL;
1385 #endif
1386 return pipe;
1387 }
1388
pipe_fcntl(struct file * file,unsigned int cmd,unsigned int arg)1389 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
1390 {
1391 struct pipe_inode_info *pipe;
1392 long ret;
1393
1394 pipe = get_pipe_info(file, false);
1395 if (!pipe)
1396 return -EBADF;
1397
1398 __pipe_lock(pipe);
1399
1400 switch (cmd) {
1401 case F_SETPIPE_SZ:
1402 ret = pipe_set_size(pipe, arg);
1403 break;
1404 case F_GETPIPE_SZ:
1405 ret = pipe->max_usage * PAGE_SIZE;
1406 break;
1407 default:
1408 ret = -EINVAL;
1409 break;
1410 }
1411
1412 __pipe_unlock(pipe);
1413 return ret;
1414 }
1415
1416 static const struct super_operations pipefs_ops = {
1417 .destroy_inode = free_inode_nonrcu,
1418 .statfs = simple_statfs,
1419 };
1420
1421 /*
1422 * pipefs should _never_ be mounted by userland - too much of security hassle,
1423 * no real gain from having the whole whorehouse mounted. So we don't need
1424 * any operations on the root directory. However, we need a non-trivial
1425 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1426 */
1427
pipefs_init_fs_context(struct fs_context * fc)1428 static int pipefs_init_fs_context(struct fs_context *fc)
1429 {
1430 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1431 if (!ctx)
1432 return -ENOMEM;
1433 ctx->ops = &pipefs_ops;
1434 ctx->dops = &pipefs_dentry_operations;
1435 return 0;
1436 }
1437
1438 static struct file_system_type pipe_fs_type = {
1439 .name = "pipefs",
1440 .init_fs_context = pipefs_init_fs_context,
1441 .kill_sb = kill_anon_super,
1442 };
1443
1444 #ifdef CONFIG_SYSCTL
do_proc_dopipe_max_size_conv(unsigned long * lvalp,unsigned int * valp,int write,void * data)1445 static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
1446 unsigned int *valp,
1447 int write, void *data)
1448 {
1449 if (write) {
1450 unsigned int val;
1451
1452 val = round_pipe_size(*lvalp);
1453 if (val == 0)
1454 return -EINVAL;
1455
1456 *valp = val;
1457 } else {
1458 unsigned int val = *valp;
1459 *lvalp = (unsigned long) val;
1460 }
1461
1462 return 0;
1463 }
1464
proc_dopipe_max_size(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1465 static int proc_dopipe_max_size(struct ctl_table *table, int write,
1466 void *buffer, size_t *lenp, loff_t *ppos)
1467 {
1468 return do_proc_douintvec(table, write, buffer, lenp, ppos,
1469 do_proc_dopipe_max_size_conv, NULL);
1470 }
1471
1472 static struct ctl_table fs_pipe_sysctls[] = {
1473 {
1474 .procname = "pipe-max-size",
1475 .data = &pipe_max_size,
1476 .maxlen = sizeof(pipe_max_size),
1477 .mode = 0644,
1478 .proc_handler = proc_dopipe_max_size,
1479 },
1480 {
1481 .procname = "pipe-user-pages-hard",
1482 .data = &pipe_user_pages_hard,
1483 .maxlen = sizeof(pipe_user_pages_hard),
1484 .mode = 0644,
1485 .proc_handler = proc_doulongvec_minmax,
1486 },
1487 {
1488 .procname = "pipe-user-pages-soft",
1489 .data = &pipe_user_pages_soft,
1490 .maxlen = sizeof(pipe_user_pages_soft),
1491 .mode = 0644,
1492 .proc_handler = proc_doulongvec_minmax,
1493 },
1494 { }
1495 };
1496 #endif
1497
init_pipe_fs(void)1498 static int __init init_pipe_fs(void)
1499 {
1500 int err = register_filesystem(&pipe_fs_type);
1501
1502 if (!err) {
1503 pipe_mnt = kern_mount(&pipe_fs_type);
1504 if (IS_ERR(pipe_mnt)) {
1505 err = PTR_ERR(pipe_mnt);
1506 unregister_filesystem(&pipe_fs_type);
1507 }
1508 }
1509 #ifdef CONFIG_SYSCTL
1510 register_sysctl_init("fs", fs_pipe_sysctls);
1511 #endif
1512 return err;
1513 }
1514
1515 fs_initcall(init_pipe_fs);
1516