1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
26 
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
29 
30 #include "internal.h"
31 
32 /*
33  * New pipe buffers will be restricted to this size while the user is exceeding
34  * their pipe buffer quota. The general pipe use case needs at least two
35  * buffers: one for data yet to be read, and one for new data. If this is less
36  * than two, then a write to a non-empty pipe may block even if the pipe is not
37  * full. This can occur with GNU make jobserver or similar uses of pipes as
38  * semaphores: multiple processes may be waiting to write tokens back to the
39  * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
40  *
41  * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
42  * own risk, namely: pipe writes to non-full pipes may block until the pipe is
43  * emptied.
44  */
45 #define PIPE_MIN_DEF_BUFFERS 2
46 
47 /*
48  * The max size that a non-root user is allowed to grow the pipe. Can
49  * be set by root in /proc/sys/fs/pipe-max-size
50  */
51 unsigned int pipe_max_size = 1048576;
52 
53 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
54  * matches default values.
55  */
56 unsigned long pipe_user_pages_hard;
57 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
58 
59 /*
60  * We use a start+len construction, which provides full use of the
61  * allocated memory.
62  * -- Florian Coosmann (FGC)
63  *
64  * Reads with count = 0 should always return 0.
65  * -- Julian Bradfield 1999-06-07.
66  *
67  * FIFOs and Pipes now generate SIGIO for both readers and writers.
68  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
69  *
70  * pipe_read & write cleanup
71  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
72  */
73 
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)74 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
75 {
76 	if (pipe->files)
77 		mutex_lock_nested(&pipe->mutex, subclass);
78 }
79 
pipe_lock(struct pipe_inode_info * pipe)80 void pipe_lock(struct pipe_inode_info *pipe)
81 {
82 	/*
83 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
84 	 */
85 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
86 }
87 EXPORT_SYMBOL(pipe_lock);
88 
pipe_unlock(struct pipe_inode_info * pipe)89 void pipe_unlock(struct pipe_inode_info *pipe)
90 {
91 	if (pipe->files)
92 		mutex_unlock(&pipe->mutex);
93 }
94 EXPORT_SYMBOL(pipe_unlock);
95 
__pipe_lock(struct pipe_inode_info * pipe)96 static inline void __pipe_lock(struct pipe_inode_info *pipe)
97 {
98 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
99 }
100 
__pipe_unlock(struct pipe_inode_info * pipe)101 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
102 {
103 	mutex_unlock(&pipe->mutex);
104 }
105 
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)106 void pipe_double_lock(struct pipe_inode_info *pipe1,
107 		      struct pipe_inode_info *pipe2)
108 {
109 	BUG_ON(pipe1 == pipe2);
110 
111 	if (pipe1 < pipe2) {
112 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
113 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
114 	} else {
115 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
116 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
117 	}
118 }
119 
120 /* Drop the inode semaphore and wait for a pipe event, atomically */
pipe_wait(struct pipe_inode_info * pipe)121 void pipe_wait(struct pipe_inode_info *pipe)
122 {
123 	DEFINE_WAIT(wait);
124 
125 	/*
126 	 * Pipes are system-local resources, so sleeping on them
127 	 * is considered a noninteractive wait:
128 	 */
129 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
130 	pipe_unlock(pipe);
131 	schedule();
132 	finish_wait(&pipe->wait, &wait);
133 	pipe_lock(pipe);
134 }
135 
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)136 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
137 				  struct pipe_buffer *buf)
138 {
139 	struct page *page = buf->page;
140 
141 	/*
142 	 * If nobody else uses this page, and we don't already have a
143 	 * temporary page, let's keep track of it as a one-deep
144 	 * allocation cache. (Otherwise just release our reference to it)
145 	 */
146 	if (page_count(page) == 1 && !pipe->tmp_page)
147 		pipe->tmp_page = page;
148 	else
149 		put_page(page);
150 }
151 
anon_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)152 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
153 			       struct pipe_buffer *buf)
154 {
155 	struct page *page = buf->page;
156 
157 	if (page_count(page) == 1) {
158 		if (memcg_kmem_enabled())
159 			memcg_kmem_uncharge(page, 0);
160 		__SetPageLocked(page);
161 		return 0;
162 	}
163 	return 1;
164 }
165 
166 /**
167  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
168  * @pipe:	the pipe that the buffer belongs to
169  * @buf:	the buffer to attempt to steal
170  *
171  * Description:
172  *	This function attempts to steal the &struct page attached to
173  *	@buf. If successful, this function returns 0 and returns with
174  *	the page locked. The caller may then reuse the page for whatever
175  *	he wishes; the typical use is insertion into a different file
176  *	page cache.
177  */
generic_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)178 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
179 			   struct pipe_buffer *buf)
180 {
181 	struct page *page = buf->page;
182 
183 	/*
184 	 * A reference of one is golden, that means that the owner of this
185 	 * page is the only one holding a reference to it. lock the page
186 	 * and return OK.
187 	 */
188 	if (page_count(page) == 1) {
189 		lock_page(page);
190 		return 0;
191 	}
192 
193 	return 1;
194 }
195 EXPORT_SYMBOL(generic_pipe_buf_steal);
196 
197 /**
198  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
199  * @pipe:	the pipe that the buffer belongs to
200  * @buf:	the buffer to get a reference to
201  *
202  * Description:
203  *	This function grabs an extra reference to @buf. It's used in
204  *	in the tee() system call, when we duplicate the buffers in one
205  *	pipe into another.
206  */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)207 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
208 {
209 	return try_get_page(buf->page);
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_get);
212 
213 /**
214  * generic_pipe_buf_confirm - verify contents of the pipe buffer
215  * @info:	the pipe that the buffer belongs to
216  * @buf:	the buffer to confirm
217  *
218  * Description:
219  *	This function does nothing, because the generic pipe code uses
220  *	pages that are always good when inserted into the pipe.
221  */
generic_pipe_buf_confirm(struct pipe_inode_info * info,struct pipe_buffer * buf)222 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
223 			     struct pipe_buffer *buf)
224 {
225 	return 0;
226 }
227 EXPORT_SYMBOL(generic_pipe_buf_confirm);
228 
229 /**
230  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
231  * @pipe:	the pipe that the buffer belongs to
232  * @buf:	the buffer to put a reference to
233  *
234  * Description:
235  *	This function releases a reference to @buf.
236  */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)237 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
238 			      struct pipe_buffer *buf)
239 {
240 	put_page(buf->page);
241 }
242 EXPORT_SYMBOL(generic_pipe_buf_release);
243 
244 static const struct pipe_buf_operations anon_pipe_buf_ops = {
245 	.can_merge = 1,
246 	.confirm = generic_pipe_buf_confirm,
247 	.release = anon_pipe_buf_release,
248 	.steal = anon_pipe_buf_steal,
249 	.get = generic_pipe_buf_get,
250 };
251 
252 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
253 	.can_merge = 0,
254 	.confirm = generic_pipe_buf_confirm,
255 	.release = anon_pipe_buf_release,
256 	.steal = anon_pipe_buf_steal,
257 	.get = generic_pipe_buf_get,
258 };
259 
260 static const struct pipe_buf_operations packet_pipe_buf_ops = {
261 	.can_merge = 0,
262 	.confirm = generic_pipe_buf_confirm,
263 	.release = anon_pipe_buf_release,
264 	.steal = anon_pipe_buf_steal,
265 	.get = generic_pipe_buf_get,
266 };
267 
pipe_buf_mark_unmergeable(struct pipe_buffer * buf)268 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
269 {
270 	if (buf->ops == &anon_pipe_buf_ops)
271 		buf->ops = &anon_pipe_buf_nomerge_ops;
272 }
273 
274 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)275 pipe_read(struct kiocb *iocb, struct iov_iter *to)
276 {
277 	size_t total_len = iov_iter_count(to);
278 	struct file *filp = iocb->ki_filp;
279 	struct pipe_inode_info *pipe = filp->private_data;
280 	int do_wakeup;
281 	ssize_t ret;
282 
283 	/* Null read succeeds. */
284 	if (unlikely(total_len == 0))
285 		return 0;
286 
287 	do_wakeup = 0;
288 	ret = 0;
289 	__pipe_lock(pipe);
290 	for (;;) {
291 		int bufs = pipe->nrbufs;
292 		if (bufs) {
293 			int curbuf = pipe->curbuf;
294 			struct pipe_buffer *buf = pipe->bufs + curbuf;
295 			size_t chars = buf->len;
296 			size_t written;
297 			int error;
298 
299 			if (chars > total_len)
300 				chars = total_len;
301 
302 			error = pipe_buf_confirm(pipe, buf);
303 			if (error) {
304 				if (!ret)
305 					ret = error;
306 				break;
307 			}
308 
309 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
310 			if (unlikely(written < chars)) {
311 				if (!ret)
312 					ret = -EFAULT;
313 				break;
314 			}
315 			ret += chars;
316 			buf->offset += chars;
317 			buf->len -= chars;
318 
319 			/* Was it a packet buffer? Clean up and exit */
320 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
321 				total_len = chars;
322 				buf->len = 0;
323 			}
324 
325 			if (!buf->len) {
326 				pipe_buf_release(pipe, buf);
327 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
328 				pipe->curbuf = curbuf;
329 				pipe->nrbufs = --bufs;
330 				do_wakeup = 1;
331 			}
332 			total_len -= chars;
333 			if (!total_len)
334 				break;	/* common path: read succeeded */
335 		}
336 		if (bufs)	/* More to do? */
337 			continue;
338 		if (!pipe->writers)
339 			break;
340 		if (!pipe->waiting_writers) {
341 			/* syscall merging: Usually we must not sleep
342 			 * if O_NONBLOCK is set, or if we got some data.
343 			 * But if a writer sleeps in kernel space, then
344 			 * we can wait for that data without violating POSIX.
345 			 */
346 			if (ret)
347 				break;
348 			if (filp->f_flags & O_NONBLOCK) {
349 				ret = -EAGAIN;
350 				break;
351 			}
352 		}
353 		if (signal_pending(current)) {
354 			if (!ret)
355 				ret = -ERESTARTSYS;
356 			break;
357 		}
358 		if (do_wakeup) {
359 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
360  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
361 		}
362 		pipe_wait(pipe);
363 	}
364 	__pipe_unlock(pipe);
365 
366 	/* Signal writers asynchronously that there is more room. */
367 	if (do_wakeup) {
368 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
369 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
370 	}
371 	if (ret > 0)
372 		file_accessed(filp);
373 	return ret;
374 }
375 
is_packetized(struct file * file)376 static inline int is_packetized(struct file *file)
377 {
378 	return (file->f_flags & O_DIRECT) != 0;
379 }
380 
381 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)382 pipe_write(struct kiocb *iocb, struct iov_iter *from)
383 {
384 	struct file *filp = iocb->ki_filp;
385 	struct pipe_inode_info *pipe = filp->private_data;
386 	ssize_t ret = 0;
387 	int do_wakeup = 0;
388 	size_t total_len = iov_iter_count(from);
389 	ssize_t chars;
390 
391 	/* Null write succeeds. */
392 	if (unlikely(total_len == 0))
393 		return 0;
394 
395 	__pipe_lock(pipe);
396 
397 	if (!pipe->readers) {
398 		send_sig(SIGPIPE, current, 0);
399 		ret = -EPIPE;
400 		goto out;
401 	}
402 
403 	/* We try to merge small writes */
404 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
405 	if (pipe->nrbufs && chars != 0) {
406 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
407 							(pipe->buffers - 1);
408 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
409 		int offset = buf->offset + buf->len;
410 
411 		if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
412 			ret = pipe_buf_confirm(pipe, buf);
413 			if (ret)
414 				goto out;
415 
416 			ret = copy_page_from_iter(buf->page, offset, chars, from);
417 			if (unlikely(ret < chars)) {
418 				ret = -EFAULT;
419 				goto out;
420 			}
421 			do_wakeup = 1;
422 			buf->len += ret;
423 			if (!iov_iter_count(from))
424 				goto out;
425 		}
426 	}
427 
428 	for (;;) {
429 		int bufs;
430 
431 		if (!pipe->readers) {
432 			send_sig(SIGPIPE, current, 0);
433 			if (!ret)
434 				ret = -EPIPE;
435 			break;
436 		}
437 		bufs = pipe->nrbufs;
438 		if (bufs < pipe->buffers) {
439 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
440 			struct pipe_buffer *buf = pipe->bufs + newbuf;
441 			struct page *page = pipe->tmp_page;
442 			int copied;
443 
444 			if (!page) {
445 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
446 				if (unlikely(!page)) {
447 					ret = ret ? : -ENOMEM;
448 					break;
449 				}
450 				pipe->tmp_page = page;
451 			}
452 			/* Always wake up, even if the copy fails. Otherwise
453 			 * we lock up (O_NONBLOCK-)readers that sleep due to
454 			 * syscall merging.
455 			 * FIXME! Is this really true?
456 			 */
457 			do_wakeup = 1;
458 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
459 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
460 				if (!ret)
461 					ret = -EFAULT;
462 				break;
463 			}
464 			ret += copied;
465 
466 			/* Insert it into the buffer array */
467 			buf->page = page;
468 			buf->ops = &anon_pipe_buf_ops;
469 			buf->offset = 0;
470 			buf->len = copied;
471 			buf->flags = 0;
472 			if (is_packetized(filp)) {
473 				buf->ops = &packet_pipe_buf_ops;
474 				buf->flags = PIPE_BUF_FLAG_PACKET;
475 			}
476 			pipe->nrbufs = ++bufs;
477 			pipe->tmp_page = NULL;
478 
479 			if (!iov_iter_count(from))
480 				break;
481 		}
482 		if (bufs < pipe->buffers)
483 			continue;
484 		if (filp->f_flags & O_NONBLOCK) {
485 			if (!ret)
486 				ret = -EAGAIN;
487 			break;
488 		}
489 		if (signal_pending(current)) {
490 			if (!ret)
491 				ret = -ERESTARTSYS;
492 			break;
493 		}
494 		if (do_wakeup) {
495 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
496 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
497 			do_wakeup = 0;
498 		}
499 		pipe->waiting_writers++;
500 		pipe_wait(pipe);
501 		pipe->waiting_writers--;
502 	}
503 out:
504 	__pipe_unlock(pipe);
505 	if (do_wakeup) {
506 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
507 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
508 	}
509 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
510 		int err = file_update_time(filp);
511 		if (err)
512 			ret = err;
513 		sb_end_write(file_inode(filp)->i_sb);
514 	}
515 	return ret;
516 }
517 
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)518 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
519 {
520 	struct pipe_inode_info *pipe = filp->private_data;
521 	int count, buf, nrbufs;
522 
523 	switch (cmd) {
524 		case FIONREAD:
525 			__pipe_lock(pipe);
526 			count = 0;
527 			buf = pipe->curbuf;
528 			nrbufs = pipe->nrbufs;
529 			while (--nrbufs >= 0) {
530 				count += pipe->bufs[buf].len;
531 				buf = (buf+1) & (pipe->buffers - 1);
532 			}
533 			__pipe_unlock(pipe);
534 
535 			return put_user(count, (int __user *)arg);
536 		default:
537 			return -ENOIOCTLCMD;
538 	}
539 }
540 
541 /* No kernel lock held - fine */
542 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)543 pipe_poll(struct file *filp, poll_table *wait)
544 {
545 	__poll_t mask;
546 	struct pipe_inode_info *pipe = filp->private_data;
547 	int nrbufs;
548 
549 	poll_wait(filp, &pipe->wait, wait);
550 
551 	/* Reading only -- no need for acquiring the semaphore.  */
552 	nrbufs = pipe->nrbufs;
553 	mask = 0;
554 	if (filp->f_mode & FMODE_READ) {
555 		mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
556 		if (!pipe->writers && filp->f_version != pipe->w_counter)
557 			mask |= EPOLLHUP;
558 	}
559 
560 	if (filp->f_mode & FMODE_WRITE) {
561 		mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
562 		/*
563 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
564 		 * behave exactly like pipes for poll().
565 		 */
566 		if (!pipe->readers)
567 			mask |= EPOLLERR;
568 	}
569 
570 	return mask;
571 }
572 
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)573 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
574 {
575 	int kill = 0;
576 
577 	spin_lock(&inode->i_lock);
578 	if (!--pipe->files) {
579 		inode->i_pipe = NULL;
580 		kill = 1;
581 	}
582 	spin_unlock(&inode->i_lock);
583 
584 	if (kill)
585 		free_pipe_info(pipe);
586 }
587 
588 static int
pipe_release(struct inode * inode,struct file * file)589 pipe_release(struct inode *inode, struct file *file)
590 {
591 	struct pipe_inode_info *pipe = file->private_data;
592 
593 	__pipe_lock(pipe);
594 	if (file->f_mode & FMODE_READ)
595 		pipe->readers--;
596 	if (file->f_mode & FMODE_WRITE)
597 		pipe->writers--;
598 
599 	if (pipe->readers || pipe->writers) {
600 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
601 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
602 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
603 	}
604 	__pipe_unlock(pipe);
605 
606 	put_pipe_info(inode, pipe);
607 	return 0;
608 }
609 
610 static int
pipe_fasync(int fd,struct file * filp,int on)611 pipe_fasync(int fd, struct file *filp, int on)
612 {
613 	struct pipe_inode_info *pipe = filp->private_data;
614 	int retval = 0;
615 
616 	__pipe_lock(pipe);
617 	if (filp->f_mode & FMODE_READ)
618 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
619 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
620 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
621 		if (retval < 0 && (filp->f_mode & FMODE_READ))
622 			/* this can happen only if on == T */
623 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
624 	}
625 	__pipe_unlock(pipe);
626 	return retval;
627 }
628 
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)629 static unsigned long account_pipe_buffers(struct user_struct *user,
630                                  unsigned long old, unsigned long new)
631 {
632 	return atomic_long_add_return(new - old, &user->pipe_bufs);
633 }
634 
too_many_pipe_buffers_soft(unsigned long user_bufs)635 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
636 {
637 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
638 
639 	return soft_limit && user_bufs > soft_limit;
640 }
641 
too_many_pipe_buffers_hard(unsigned long user_bufs)642 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
643 {
644 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
645 
646 	return hard_limit && user_bufs > hard_limit;
647 }
648 
is_unprivileged_user(void)649 static bool is_unprivileged_user(void)
650 {
651 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
652 }
653 
alloc_pipe_info(void)654 struct pipe_inode_info *alloc_pipe_info(void)
655 {
656 	struct pipe_inode_info *pipe;
657 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
658 	struct user_struct *user = get_current_user();
659 	unsigned long user_bufs;
660 	unsigned int max_size = READ_ONCE(pipe_max_size);
661 
662 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
663 	if (pipe == NULL)
664 		goto out_free_uid;
665 
666 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
667 		pipe_bufs = max_size >> PAGE_SHIFT;
668 
669 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
670 
671 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
672 		user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
673 		pipe_bufs = PIPE_MIN_DEF_BUFFERS;
674 	}
675 
676 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
677 		goto out_revert_acct;
678 
679 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
680 			     GFP_KERNEL_ACCOUNT);
681 
682 	if (pipe->bufs) {
683 		init_waitqueue_head(&pipe->wait);
684 		pipe->r_counter = pipe->w_counter = 1;
685 		pipe->buffers = pipe_bufs;
686 		pipe->user = user;
687 		mutex_init(&pipe->mutex);
688 		return pipe;
689 	}
690 
691 out_revert_acct:
692 	(void) account_pipe_buffers(user, pipe_bufs, 0);
693 	kfree(pipe);
694 out_free_uid:
695 	free_uid(user);
696 	return NULL;
697 }
698 
free_pipe_info(struct pipe_inode_info * pipe)699 void free_pipe_info(struct pipe_inode_info *pipe)
700 {
701 	int i;
702 
703 	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
704 	free_uid(pipe->user);
705 	for (i = 0; i < pipe->buffers; i++) {
706 		struct pipe_buffer *buf = pipe->bufs + i;
707 		if (buf->ops)
708 			pipe_buf_release(pipe, buf);
709 	}
710 	if (pipe->tmp_page)
711 		__free_page(pipe->tmp_page);
712 	kfree(pipe->bufs);
713 	kfree(pipe);
714 }
715 
716 static struct vfsmount *pipe_mnt __read_mostly;
717 
718 /*
719  * pipefs_dname() is called from d_path().
720  */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)721 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
722 {
723 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
724 				d_inode(dentry)->i_ino);
725 }
726 
727 static const struct dentry_operations pipefs_dentry_operations = {
728 	.d_dname	= pipefs_dname,
729 };
730 
get_pipe_inode(void)731 static struct inode * get_pipe_inode(void)
732 {
733 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
734 	struct pipe_inode_info *pipe;
735 
736 	if (!inode)
737 		goto fail_inode;
738 
739 	inode->i_ino = get_next_ino();
740 
741 	pipe = alloc_pipe_info();
742 	if (!pipe)
743 		goto fail_iput;
744 
745 	inode->i_pipe = pipe;
746 	pipe->files = 2;
747 	pipe->readers = pipe->writers = 1;
748 	inode->i_fop = &pipefifo_fops;
749 
750 	/*
751 	 * Mark the inode dirty from the very beginning,
752 	 * that way it will never be moved to the dirty
753 	 * list because "mark_inode_dirty()" will think
754 	 * that it already _is_ on the dirty list.
755 	 */
756 	inode->i_state = I_DIRTY;
757 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
758 	inode->i_uid = current_fsuid();
759 	inode->i_gid = current_fsgid();
760 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
761 
762 	return inode;
763 
764 fail_iput:
765 	iput(inode);
766 
767 fail_inode:
768 	return NULL;
769 }
770 
create_pipe_files(struct file ** res,int flags)771 int create_pipe_files(struct file **res, int flags)
772 {
773 	struct inode *inode = get_pipe_inode();
774 	struct file *f;
775 
776 	if (!inode)
777 		return -ENFILE;
778 
779 	f = alloc_file_pseudo(inode, pipe_mnt, "",
780 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
781 				&pipefifo_fops);
782 	if (IS_ERR(f)) {
783 		free_pipe_info(inode->i_pipe);
784 		iput(inode);
785 		return PTR_ERR(f);
786 	}
787 
788 	f->private_data = inode->i_pipe;
789 
790 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
791 				  &pipefifo_fops);
792 	if (IS_ERR(res[0])) {
793 		put_pipe_info(inode, inode->i_pipe);
794 		fput(f);
795 		return PTR_ERR(res[0]);
796 	}
797 	res[0]->private_data = inode->i_pipe;
798 	res[1] = f;
799 	return 0;
800 }
801 
__do_pipe_flags(int * fd,struct file ** files,int flags)802 static int __do_pipe_flags(int *fd, struct file **files, int flags)
803 {
804 	int error;
805 	int fdw, fdr;
806 
807 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
808 		return -EINVAL;
809 
810 	error = create_pipe_files(files, flags);
811 	if (error)
812 		return error;
813 
814 	error = get_unused_fd_flags(flags);
815 	if (error < 0)
816 		goto err_read_pipe;
817 	fdr = error;
818 
819 	error = get_unused_fd_flags(flags);
820 	if (error < 0)
821 		goto err_fdr;
822 	fdw = error;
823 
824 	audit_fd_pair(fdr, fdw);
825 	fd[0] = fdr;
826 	fd[1] = fdw;
827 	return 0;
828 
829  err_fdr:
830 	put_unused_fd(fdr);
831  err_read_pipe:
832 	fput(files[0]);
833 	fput(files[1]);
834 	return error;
835 }
836 
do_pipe_flags(int * fd,int flags)837 int do_pipe_flags(int *fd, int flags)
838 {
839 	struct file *files[2];
840 	int error = __do_pipe_flags(fd, files, flags);
841 	if (!error) {
842 		fd_install(fd[0], files[0]);
843 		fd_install(fd[1], files[1]);
844 	}
845 	return error;
846 }
847 
848 /*
849  * sys_pipe() is the normal C calling standard for creating
850  * a pipe. It's not the way Unix traditionally does this, though.
851  */
do_pipe2(int __user * fildes,int flags)852 static int do_pipe2(int __user *fildes, int flags)
853 {
854 	struct file *files[2];
855 	int fd[2];
856 	int error;
857 
858 	error = __do_pipe_flags(fd, files, flags);
859 	if (!error) {
860 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
861 			fput(files[0]);
862 			fput(files[1]);
863 			put_unused_fd(fd[0]);
864 			put_unused_fd(fd[1]);
865 			error = -EFAULT;
866 		} else {
867 			fd_install(fd[0], files[0]);
868 			fd_install(fd[1], files[1]);
869 		}
870 	}
871 	return error;
872 }
873 
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)874 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
875 {
876 	return do_pipe2(fildes, flags);
877 }
878 
SYSCALL_DEFINE1(pipe,int __user *,fildes)879 SYSCALL_DEFINE1(pipe, int __user *, fildes)
880 {
881 	return do_pipe2(fildes, 0);
882 }
883 
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)884 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
885 {
886 	int cur = *cnt;
887 
888 	while (cur == *cnt) {
889 		pipe_wait(pipe);
890 		if (signal_pending(current))
891 			break;
892 	}
893 	return cur == *cnt ? -ERESTARTSYS : 0;
894 }
895 
wake_up_partner(struct pipe_inode_info * pipe)896 static void wake_up_partner(struct pipe_inode_info *pipe)
897 {
898 	wake_up_interruptible(&pipe->wait);
899 }
900 
fifo_open(struct inode * inode,struct file * filp)901 static int fifo_open(struct inode *inode, struct file *filp)
902 {
903 	struct pipe_inode_info *pipe;
904 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
905 	int ret;
906 
907 	filp->f_version = 0;
908 
909 	spin_lock(&inode->i_lock);
910 	if (inode->i_pipe) {
911 		pipe = inode->i_pipe;
912 		pipe->files++;
913 		spin_unlock(&inode->i_lock);
914 	} else {
915 		spin_unlock(&inode->i_lock);
916 		pipe = alloc_pipe_info();
917 		if (!pipe)
918 			return -ENOMEM;
919 		pipe->files = 1;
920 		spin_lock(&inode->i_lock);
921 		if (unlikely(inode->i_pipe)) {
922 			inode->i_pipe->files++;
923 			spin_unlock(&inode->i_lock);
924 			free_pipe_info(pipe);
925 			pipe = inode->i_pipe;
926 		} else {
927 			inode->i_pipe = pipe;
928 			spin_unlock(&inode->i_lock);
929 		}
930 	}
931 	filp->private_data = pipe;
932 	/* OK, we have a pipe and it's pinned down */
933 
934 	__pipe_lock(pipe);
935 
936 	/* We can only do regular read/write on fifos */
937 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
938 
939 	switch (filp->f_mode) {
940 	case FMODE_READ:
941 	/*
942 	 *  O_RDONLY
943 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
944 	 *  opened, even when there is no process writing the FIFO.
945 	 */
946 		pipe->r_counter++;
947 		if (pipe->readers++ == 0)
948 			wake_up_partner(pipe);
949 
950 		if (!is_pipe && !pipe->writers) {
951 			if ((filp->f_flags & O_NONBLOCK)) {
952 				/* suppress EPOLLHUP until we have
953 				 * seen a writer */
954 				filp->f_version = pipe->w_counter;
955 			} else {
956 				if (wait_for_partner(pipe, &pipe->w_counter))
957 					goto err_rd;
958 			}
959 		}
960 		break;
961 
962 	case FMODE_WRITE:
963 	/*
964 	 *  O_WRONLY
965 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
966 	 *  errno=ENXIO when there is no process reading the FIFO.
967 	 */
968 		ret = -ENXIO;
969 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
970 			goto err;
971 
972 		pipe->w_counter++;
973 		if (!pipe->writers++)
974 			wake_up_partner(pipe);
975 
976 		if (!is_pipe && !pipe->readers) {
977 			if (wait_for_partner(pipe, &pipe->r_counter))
978 				goto err_wr;
979 		}
980 		break;
981 
982 	case FMODE_READ | FMODE_WRITE:
983 	/*
984 	 *  O_RDWR
985 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
986 	 *  This implementation will NEVER block on a O_RDWR open, since
987 	 *  the process can at least talk to itself.
988 	 */
989 
990 		pipe->readers++;
991 		pipe->writers++;
992 		pipe->r_counter++;
993 		pipe->w_counter++;
994 		if (pipe->readers == 1 || pipe->writers == 1)
995 			wake_up_partner(pipe);
996 		break;
997 
998 	default:
999 		ret = -EINVAL;
1000 		goto err;
1001 	}
1002 
1003 	/* Ok! */
1004 	__pipe_unlock(pipe);
1005 	return 0;
1006 
1007 err_rd:
1008 	if (!--pipe->readers)
1009 		wake_up_interruptible(&pipe->wait);
1010 	ret = -ERESTARTSYS;
1011 	goto err;
1012 
1013 err_wr:
1014 	if (!--pipe->writers)
1015 		wake_up_interruptible(&pipe->wait);
1016 	ret = -ERESTARTSYS;
1017 	goto err;
1018 
1019 err:
1020 	__pipe_unlock(pipe);
1021 
1022 	put_pipe_info(inode, pipe);
1023 	return ret;
1024 }
1025 
1026 const struct file_operations pipefifo_fops = {
1027 	.open		= fifo_open,
1028 	.llseek		= no_llseek,
1029 	.read_iter	= pipe_read,
1030 	.write_iter	= pipe_write,
1031 	.poll		= pipe_poll,
1032 	.unlocked_ioctl	= pipe_ioctl,
1033 	.release	= pipe_release,
1034 	.fasync		= pipe_fasync,
1035 };
1036 
1037 /*
1038  * Currently we rely on the pipe array holding a power-of-2 number
1039  * of pages. Returns 0 on error.
1040  */
round_pipe_size(unsigned long size)1041 unsigned int round_pipe_size(unsigned long size)
1042 {
1043 	if (size > (1U << 31))
1044 		return 0;
1045 
1046 	/* Minimum pipe size, as required by POSIX */
1047 	if (size < PAGE_SIZE)
1048 		return PAGE_SIZE;
1049 
1050 	return roundup_pow_of_two(size);
1051 }
1052 
1053 /*
1054  * Allocate a new array of pipe buffers and copy the info over. Returns the
1055  * pipe size if successful, or return -ERROR on error.
1056  */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1057 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1058 {
1059 	struct pipe_buffer *bufs;
1060 	unsigned int size, nr_pages;
1061 	unsigned long user_bufs;
1062 	long ret = 0;
1063 
1064 	size = round_pipe_size(arg);
1065 	nr_pages = size >> PAGE_SHIFT;
1066 
1067 	if (!nr_pages)
1068 		return -EINVAL;
1069 
1070 	/*
1071 	 * If trying to increase the pipe capacity, check that an
1072 	 * unprivileged user is not trying to exceed various limits
1073 	 * (soft limit check here, hard limit check just below).
1074 	 * Decreasing the pipe capacity is always permitted, even
1075 	 * if the user is currently over a limit.
1076 	 */
1077 	if (nr_pages > pipe->buffers &&
1078 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1079 		return -EPERM;
1080 
1081 	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1082 
1083 	if (nr_pages > pipe->buffers &&
1084 			(too_many_pipe_buffers_hard(user_bufs) ||
1085 			 too_many_pipe_buffers_soft(user_bufs)) &&
1086 			is_unprivileged_user()) {
1087 		ret = -EPERM;
1088 		goto out_revert_acct;
1089 	}
1090 
1091 	/*
1092 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1093 	 * expect a lot of shrink+grow operations, just free and allocate
1094 	 * again like we would do for growing. If the pipe currently
1095 	 * contains more buffers than arg, then return busy.
1096 	 */
1097 	if (nr_pages < pipe->nrbufs) {
1098 		ret = -EBUSY;
1099 		goto out_revert_acct;
1100 	}
1101 
1102 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1103 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1104 	if (unlikely(!bufs)) {
1105 		ret = -ENOMEM;
1106 		goto out_revert_acct;
1107 	}
1108 
1109 	/*
1110 	 * The pipe array wraps around, so just start the new one at zero
1111 	 * and adjust the indexes.
1112 	 */
1113 	if (pipe->nrbufs) {
1114 		unsigned int tail;
1115 		unsigned int head;
1116 
1117 		tail = pipe->curbuf + pipe->nrbufs;
1118 		if (tail < pipe->buffers)
1119 			tail = 0;
1120 		else
1121 			tail &= (pipe->buffers - 1);
1122 
1123 		head = pipe->nrbufs - tail;
1124 		if (head)
1125 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1126 		if (tail)
1127 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1128 	}
1129 
1130 	pipe->curbuf = 0;
1131 	kfree(pipe->bufs);
1132 	pipe->bufs = bufs;
1133 	pipe->buffers = nr_pages;
1134 	return nr_pages * PAGE_SIZE;
1135 
1136 out_revert_acct:
1137 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1138 	return ret;
1139 }
1140 
1141 /*
1142  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1143  * location, so checking ->i_pipe is not enough to verify that this is a
1144  * pipe.
1145  */
get_pipe_info(struct file * file)1146 struct pipe_inode_info *get_pipe_info(struct file *file)
1147 {
1148 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1149 }
1150 
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1151 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1152 {
1153 	struct pipe_inode_info *pipe;
1154 	long ret;
1155 
1156 	pipe = get_pipe_info(file);
1157 	if (!pipe)
1158 		return -EBADF;
1159 
1160 	__pipe_lock(pipe);
1161 
1162 	switch (cmd) {
1163 	case F_SETPIPE_SZ:
1164 		ret = pipe_set_size(pipe, arg);
1165 		break;
1166 	case F_GETPIPE_SZ:
1167 		ret = pipe->buffers * PAGE_SIZE;
1168 		break;
1169 	default:
1170 		ret = -EINVAL;
1171 		break;
1172 	}
1173 
1174 	__pipe_unlock(pipe);
1175 	return ret;
1176 }
1177 
1178 static const struct super_operations pipefs_ops = {
1179 	.destroy_inode = free_inode_nonrcu,
1180 	.statfs = simple_statfs,
1181 };
1182 
1183 /*
1184  * pipefs should _never_ be mounted by userland - too much of security hassle,
1185  * no real gain from having the whole whorehouse mounted. So we don't need
1186  * any operations on the root directory. However, we need a non-trivial
1187  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1188  */
pipefs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1189 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1190 			 int flags, const char *dev_name, void *data)
1191 {
1192 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1193 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1194 }
1195 
1196 static struct file_system_type pipe_fs_type = {
1197 	.name		= "pipefs",
1198 	.mount		= pipefs_mount,
1199 	.kill_sb	= kill_anon_super,
1200 };
1201 
init_pipe_fs(void)1202 static int __init init_pipe_fs(void)
1203 {
1204 	int err = register_filesystem(&pipe_fs_type);
1205 
1206 	if (!err) {
1207 		pipe_mnt = kern_mount(&pipe_fs_type);
1208 		if (IS_ERR(pipe_mnt)) {
1209 			err = PTR_ERR(pipe_mnt);
1210 			unregister_filesystem(&pipe_fs_type);
1211 		}
1212 	}
1213 	return err;
1214 }
1215 
1216 fs_initcall(init_pipe_fs);
1217