drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / pipe.c
1 /*
2 * linux/fs/pipe.c
3 *
4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
5 */
6
7 #include <linux/mm.h>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
25
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
28
29 #include "internal.h"
30
31 /*
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
34 */
35 unsigned int pipe_max_size = 1048576;
36
37 /*
38 * Minimum pipe size, as required by POSIX
39 */
40 unsigned int pipe_min_size = PAGE_SIZE;
41
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43 * matches default values.
44 */
45 unsigned long pipe_user_pages_hard;
46 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
47
48 /*
49 * We use a start+len construction, which provides full use of the
50 * allocated memory.
51 * -- Florian Coosmann (FGC)
52 *
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
55 *
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
58 *
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
61 */
62
63 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
64 {
65 if (pipe->files)
66 mutex_lock_nested(&pipe->mutex, subclass);
67 }
68
69 void pipe_lock(struct pipe_inode_info *pipe)
70 {
71 /*
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
73 */
74 pipe_lock_nested(pipe, I_MUTEX_PARENT);
75 }
76 EXPORT_SYMBOL(pipe_lock);
77
78 void pipe_unlock(struct pipe_inode_info *pipe)
79 {
80 if (pipe->files)
81 mutex_unlock(&pipe->mutex);
82 }
83 EXPORT_SYMBOL(pipe_unlock);
84
85 static inline void __pipe_lock(struct pipe_inode_info *pipe)
86 {
87 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
88 }
89
90 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
91 {
92 mutex_unlock(&pipe->mutex);
93 }
94
95 void pipe_double_lock(struct pipe_inode_info *pipe1,
96 struct pipe_inode_info *pipe2)
97 {
98 BUG_ON(pipe1 == pipe2);
99
100 if (pipe1 < pipe2) {
101 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
102 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103 } else {
104 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
105 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
106 }
107 }
108
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info *pipe)
111 {
112 DEFINE_WAIT(wait);
113
114 /*
115 * Pipes are system-local resources, so sleeping on them
116 * is considered a noninteractive wait:
117 */
118 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
119 pipe_unlock(pipe);
120 schedule();
121 finish_wait(&pipe->wait, &wait);
122 pipe_lock(pipe);
123 }
124
125 static int
126 pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
127 size_t *remaining, int atomic)
128 {
129 unsigned long copy;
130
131 while (*remaining > 0) {
132 while (!iov->iov_len)
133 iov++;
134 copy = min_t(unsigned long, *remaining, iov->iov_len);
135
136 if (atomic) {
137 if (__copy_from_user_inatomic(addr + *offset,
138 iov->iov_base, copy))
139 return -EFAULT;
140 } else {
141 if (copy_from_user(addr + *offset,
142 iov->iov_base, copy))
143 return -EFAULT;
144 }
145 *offset += copy;
146 *remaining -= copy;
147 iov->iov_base += copy;
148 iov->iov_len -= copy;
149 }
150 return 0;
151 }
152
153 static int
154 pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
155 size_t *remaining, int atomic)
156 {
157 unsigned long copy;
158
159 while (*remaining > 0) {
160 while (!iov->iov_len)
161 iov++;
162 copy = min_t(unsigned long, *remaining, iov->iov_len);
163
164 if (atomic) {
165 if (__copy_to_user_inatomic(iov->iov_base,
166 addr + *offset, copy))
167 return -EFAULT;
168 } else {
169 if (copy_to_user(iov->iov_base,
170 addr + *offset, copy))
171 return -EFAULT;
172 }
173 *offset += copy;
174 *remaining -= copy;
175 iov->iov_base += copy;
176 iov->iov_len -= copy;
177 }
178 return 0;
179 }
180
181 /*
182 * Attempt to pre-fault in the user memory, so we can use atomic copies.
183 * Returns the number of bytes not faulted in.
184 */
185 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
186 {
187 while (!iov->iov_len)
188 iov++;
189
190 while (len > 0) {
191 unsigned long this_len;
192
193 this_len = min_t(unsigned long, len, iov->iov_len);
194 if (fault_in_pages_writeable(iov->iov_base, this_len))
195 break;
196
197 len -= this_len;
198 iov++;
199 }
200
201 return len;
202 }
203
204 /*
205 * Pre-fault in the user memory, so we can use atomic copies.
206 */
207 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
208 {
209 while (!iov->iov_len)
210 iov++;
211
212 while (len > 0) {
213 unsigned long this_len;
214
215 this_len = min_t(unsigned long, len, iov->iov_len);
216 fault_in_pages_readable(iov->iov_base, this_len);
217 len -= this_len;
218 iov++;
219 }
220 }
221
222 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
223 struct pipe_buffer *buf)
224 {
225 struct page *page = buf->page;
226
227 /*
228 * If nobody else uses this page, and we don't already have a
229 * temporary page, let's keep track of it as a one-deep
230 * allocation cache. (Otherwise just release our reference to it)
231 */
232 if (page_count(page) == 1 && !pipe->tmp_page)
233 pipe->tmp_page = page;
234 else
235 page_cache_release(page);
236 }
237
238 /**
239 * generic_pipe_buf_map - virtually map a pipe buffer
240 * @pipe: the pipe that the buffer belongs to
241 * @buf: the buffer that should be mapped
242 * @atomic: whether to use an atomic map
243 *
244 * Description:
245 * This function returns a kernel virtual address mapping for the
246 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
247 * and the caller has to be careful not to fault before calling
248 * the unmap function.
249 *
250 * Note that this function calls kmap_atomic() if @atomic != 0.
251 */
252 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
253 struct pipe_buffer *buf, int atomic)
254 {
255 if (atomic) {
256 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
257 return kmap_atomic(buf->page);
258 }
259
260 return kmap(buf->page);
261 }
262 EXPORT_SYMBOL(generic_pipe_buf_map);
263
264 /**
265 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
266 * @pipe: the pipe that the buffer belongs to
267 * @buf: the buffer that should be unmapped
268 * @map_data: the data that the mapping function returned
269 *
270 * Description:
271 * This function undoes the mapping that ->map() provided.
272 */
273 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
274 struct pipe_buffer *buf, void *map_data)
275 {
276 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
277 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
278 kunmap_atomic(map_data);
279 } else
280 kunmap(buf->page);
281 }
282 EXPORT_SYMBOL(generic_pipe_buf_unmap);
283
284 /**
285 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
286 * @pipe: the pipe that the buffer belongs to
287 * @buf: the buffer to attempt to steal
288 *
289 * Description:
290 * This function attempts to steal the &struct page attached to
291 * @buf. If successful, this function returns 0 and returns with
292 * the page locked. The caller may then reuse the page for whatever
293 * he wishes; the typical use is insertion into a different file
294 * page cache.
295 */
296 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
297 struct pipe_buffer *buf)
298 {
299 struct page *page = buf->page;
300
301 /*
302 * A reference of one is golden, that means that the owner of this
303 * page is the only one holding a reference to it. lock the page
304 * and return OK.
305 */
306 if (page_count(page) == 1) {
307 lock_page(page);
308 return 0;
309 }
310
311 return 1;
312 }
313 EXPORT_SYMBOL(generic_pipe_buf_steal);
314
315 /**
316 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
317 * @pipe: the pipe that the buffer belongs to
318 * @buf: the buffer to get a reference to
319 *
320 * Description:
321 * This function grabs an extra reference to @buf. It's used in
322 * in the tee() system call, when we duplicate the buffers in one
323 * pipe into another.
324 */
325 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
326 {
327 page_cache_get(buf->page);
328 }
329 EXPORT_SYMBOL(generic_pipe_buf_get);
330
331 /**
332 * generic_pipe_buf_confirm - verify contents of the pipe buffer
333 * @info: the pipe that the buffer belongs to
334 * @buf: the buffer to confirm
335 *
336 * Description:
337 * This function does nothing, because the generic pipe code uses
338 * pages that are always good when inserted into the pipe.
339 */
340 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
341 struct pipe_buffer *buf)
342 {
343 return 0;
344 }
345 EXPORT_SYMBOL(generic_pipe_buf_confirm);
346
347 /**
348 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
349 * @pipe: the pipe that the buffer belongs to
350 * @buf: the buffer to put a reference to
351 *
352 * Description:
353 * This function releases a reference to @buf.
354 */
355 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
356 struct pipe_buffer *buf)
357 {
358 page_cache_release(buf->page);
359 }
360 EXPORT_SYMBOL(generic_pipe_buf_release);
361
362 static const struct pipe_buf_operations anon_pipe_buf_ops = {
363 .can_merge = 1,
364 .map = generic_pipe_buf_map,
365 .unmap = generic_pipe_buf_unmap,
366 .confirm = generic_pipe_buf_confirm,
367 .release = anon_pipe_buf_release,
368 .steal = generic_pipe_buf_steal,
369 .get = generic_pipe_buf_get,
370 };
371
372 static const struct pipe_buf_operations packet_pipe_buf_ops = {
373 .can_merge = 0,
374 .map = generic_pipe_buf_map,
375 .unmap = generic_pipe_buf_unmap,
376 .confirm = generic_pipe_buf_confirm,
377 .release = anon_pipe_buf_release,
378 .steal = generic_pipe_buf_steal,
379 .get = generic_pipe_buf_get,
380 };
381
382 static ssize_t
383 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
384 unsigned long nr_segs, loff_t pos)
385 {
386 struct file *filp = iocb->ki_filp;
387 struct pipe_inode_info *pipe = filp->private_data;
388 int do_wakeup;
389 ssize_t ret;
390 struct iovec *iov = (struct iovec *)_iov;
391 size_t total_len;
392
393 total_len = iov_length(iov, nr_segs);
394 /* Null read succeeds. */
395 if (unlikely(total_len == 0))
396 return 0;
397
398 do_wakeup = 0;
399 ret = 0;
400 __pipe_lock(pipe);
401 for (;;) {
402 int bufs = pipe->nrbufs;
403 if (bufs) {
404 int curbuf = pipe->curbuf;
405 struct pipe_buffer *buf = pipe->bufs + curbuf;
406 const struct pipe_buf_operations *ops = buf->ops;
407 void *addr;
408 size_t chars = buf->len, remaining;
409 int error, atomic;
410 int offset;
411
412 if (chars > total_len)
413 chars = total_len;
414
415 error = ops->confirm(pipe, buf);
416 if (error) {
417 if (!ret)
418 ret = error;
419 break;
420 }
421
422 atomic = !iov_fault_in_pages_write(iov, chars);
423 remaining = chars;
424 offset = buf->offset;
425 redo:
426 addr = ops->map(pipe, buf, atomic);
427 error = pipe_iov_copy_to_user(iov, addr, &offset,
428 &remaining, atomic);
429 ops->unmap(pipe, buf, addr);
430 if (unlikely(error)) {
431 /*
432 * Just retry with the slow path if we failed.
433 */
434 if (atomic) {
435 atomic = 0;
436 goto redo;
437 }
438 if (!ret)
439 ret = error;
440 break;
441 }
442 ret += chars;
443 buf->offset += chars;
444 buf->len -= chars;
445
446 /* Was it a packet buffer? Clean up and exit */
447 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
448 total_len = chars;
449 buf->len = 0;
450 }
451
452 if (!buf->len) {
453 buf->ops = NULL;
454 ops->release(pipe, buf);
455 curbuf = (curbuf + 1) & (pipe->buffers - 1);
456 pipe->curbuf = curbuf;
457 pipe->nrbufs = --bufs;
458 do_wakeup = 1;
459 }
460 total_len -= chars;
461 if (!total_len)
462 break; /* common path: read succeeded */
463 }
464 if (bufs) /* More to do? */
465 continue;
466 if (!pipe->writers)
467 break;
468 if (!pipe->waiting_writers) {
469 /* syscall merging: Usually we must not sleep
470 * if O_NONBLOCK is set, or if we got some data.
471 * But if a writer sleeps in kernel space, then
472 * we can wait for that data without violating POSIX.
473 */
474 if (ret)
475 break;
476 if (filp->f_flags & O_NONBLOCK) {
477 ret = -EAGAIN;
478 break;
479 }
480 }
481 if (signal_pending(current)) {
482 if (!ret)
483 ret = -ERESTARTSYS;
484 break;
485 }
486 if (do_wakeup) {
487 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
488 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
489 }
490 pipe_wait(pipe);
491 }
492 __pipe_unlock(pipe);
493
494 /* Signal writers asynchronously that there is more room. */
495 if (do_wakeup) {
496 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
497 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
498 }
499 if (ret > 0)
500 file_accessed(filp);
501 return ret;
502 }
503
504 static inline int is_packetized(struct file *file)
505 {
506 return (file->f_flags & O_DIRECT) != 0;
507 }
508
509 static ssize_t
510 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
511 unsigned long nr_segs, loff_t ppos)
512 {
513 struct file *filp = iocb->ki_filp;
514 struct pipe_inode_info *pipe = filp->private_data;
515 ssize_t ret;
516 int do_wakeup;
517 struct iovec *iov = (struct iovec *)_iov;
518 size_t total_len;
519 ssize_t chars;
520
521 total_len = iov_length(iov, nr_segs);
522 /* Null write succeeds. */
523 if (unlikely(total_len == 0))
524 return 0;
525
526 do_wakeup = 0;
527 ret = 0;
528 __pipe_lock(pipe);
529
530 if (!pipe->readers) {
531 send_sig(SIGPIPE, current, 0);
532 ret = -EPIPE;
533 goto out;
534 }
535
536 /* We try to merge small writes */
537 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
538 if (pipe->nrbufs && chars != 0) {
539 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
540 (pipe->buffers - 1);
541 struct pipe_buffer *buf = pipe->bufs + lastbuf;
542 const struct pipe_buf_operations *ops = buf->ops;
543 int offset = buf->offset + buf->len;
544
545 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
546 int error, atomic = 1;
547 void *addr;
548 size_t remaining = chars;
549
550 error = ops->confirm(pipe, buf);
551 if (error)
552 goto out;
553
554 iov_fault_in_pages_read(iov, chars);
555 redo1:
556 addr = ops->map(pipe, buf, atomic);
557 error = pipe_iov_copy_from_user(addr, &offset, iov,
558 &remaining, atomic);
559 ops->unmap(pipe, buf, addr);
560 ret = error;
561 do_wakeup = 1;
562 if (error) {
563 if (atomic) {
564 atomic = 0;
565 goto redo1;
566 }
567 goto out;
568 }
569 buf->len += chars;
570 total_len -= chars;
571 ret = chars;
572 if (!total_len)
573 goto out;
574 }
575 }
576
577 for (;;) {
578 int bufs;
579
580 if (!pipe->readers) {
581 send_sig(SIGPIPE, current, 0);
582 if (!ret)
583 ret = -EPIPE;
584 break;
585 }
586 bufs = pipe->nrbufs;
587 if (bufs < pipe->buffers) {
588 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
589 struct pipe_buffer *buf = pipe->bufs + newbuf;
590 struct page *page = pipe->tmp_page;
591 char *src;
592 int error, atomic = 1;
593 int offset = 0;
594 size_t remaining;
595
596 if (!page) {
597 page = alloc_page(GFP_HIGHUSER);
598 if (unlikely(!page)) {
599 ret = ret ? : -ENOMEM;
600 break;
601 }
602 pipe->tmp_page = page;
603 }
604 /* Always wake up, even if the copy fails. Otherwise
605 * we lock up (O_NONBLOCK-)readers that sleep due to
606 * syscall merging.
607 * FIXME! Is this really true?
608 */
609 do_wakeup = 1;
610 chars = PAGE_SIZE;
611 if (chars > total_len)
612 chars = total_len;
613
614 iov_fault_in_pages_read(iov, chars);
615 remaining = chars;
616 redo2:
617 if (atomic)
618 src = kmap_atomic(page);
619 else
620 src = kmap(page);
621
622 error = pipe_iov_copy_from_user(src, &offset, iov,
623 &remaining, atomic);
624 if (atomic)
625 kunmap_atomic(src);
626 else
627 kunmap(page);
628
629 if (unlikely(error)) {
630 if (atomic) {
631 atomic = 0;
632 goto redo2;
633 }
634 if (!ret)
635 ret = error;
636 break;
637 }
638 ret += chars;
639
640 /* Insert it into the buffer array */
641 buf->page = page;
642 buf->ops = &anon_pipe_buf_ops;
643 buf->offset = 0;
644 buf->len = chars;
645 buf->flags = 0;
646 if (is_packetized(filp)) {
647 buf->ops = &packet_pipe_buf_ops;
648 buf->flags = PIPE_BUF_FLAG_PACKET;
649 }
650 pipe->nrbufs = ++bufs;
651 pipe->tmp_page = NULL;
652
653 total_len -= chars;
654 if (!total_len)
655 break;
656 }
657 if (bufs < pipe->buffers)
658 continue;
659 if (filp->f_flags & O_NONBLOCK) {
660 if (!ret)
661 ret = -EAGAIN;
662 break;
663 }
664 if (signal_pending(current)) {
665 if (!ret)
666 ret = -ERESTARTSYS;
667 break;
668 }
669 if (do_wakeup) {
670 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
671 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
672 do_wakeup = 0;
673 }
674 pipe->waiting_writers++;
675 pipe_wait(pipe);
676 pipe->waiting_writers--;
677 }
678 out:
679 __pipe_unlock(pipe);
680 if (do_wakeup) {
681 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
682 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
683 }
684 if (ret > 0) {
685 int err = file_update_time(filp);
686 if (err)
687 ret = err;
688 }
689 return ret;
690 }
691
692 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
693 {
694 struct pipe_inode_info *pipe = filp->private_data;
695 int count, buf, nrbufs;
696
697 switch (cmd) {
698 case FIONREAD:
699 __pipe_lock(pipe);
700 count = 0;
701 buf = pipe->curbuf;
702 nrbufs = pipe->nrbufs;
703 while (--nrbufs >= 0) {
704 count += pipe->bufs[buf].len;
705 buf = (buf+1) & (pipe->buffers - 1);
706 }
707 __pipe_unlock(pipe);
708
709 return put_user(count, (int __user *)arg);
710 default:
711 return -ENOIOCTLCMD;
712 }
713 }
714
715 /* No kernel lock held - fine */
716 static unsigned int
717 pipe_poll(struct file *filp, poll_table *wait)
718 {
719 unsigned int mask;
720 struct pipe_inode_info *pipe = filp->private_data;
721 int nrbufs;
722
723 poll_wait(filp, &pipe->wait, wait);
724
725 /* Reading only -- no need for acquiring the semaphore. */
726 nrbufs = pipe->nrbufs;
727 mask = 0;
728 if (filp->f_mode & FMODE_READ) {
729 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
730 if (!pipe->writers && filp->f_version != pipe->w_counter)
731 mask |= POLLHUP;
732 }
733
734 if (filp->f_mode & FMODE_WRITE) {
735 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
736 /*
737 * Most Unices do not set POLLERR for FIFOs but on Linux they
738 * behave exactly like pipes for poll().
739 */
740 if (!pipe->readers)
741 mask |= POLLERR;
742 }
743
744 return mask;
745 }
746
747 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
748 {
749 int kill = 0;
750
751 spin_lock(&inode->i_lock);
752 if (!--pipe->files) {
753 inode->i_pipe = NULL;
754 kill = 1;
755 }
756 spin_unlock(&inode->i_lock);
757
758 if (kill)
759 free_pipe_info(pipe);
760 }
761
762 static int
763 pipe_release(struct inode *inode, struct file *file)
764 {
765 struct pipe_inode_info *pipe = file->private_data;
766
767 __pipe_lock(pipe);
768 if (file->f_mode & FMODE_READ)
769 pipe->readers--;
770 if (file->f_mode & FMODE_WRITE)
771 pipe->writers--;
772
773 if (pipe->readers || pipe->writers) {
774 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
775 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
776 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
777 }
778 __pipe_unlock(pipe);
779
780 put_pipe_info(inode, pipe);
781 return 0;
782 }
783
784 static int
785 pipe_fasync(int fd, struct file *filp, int on)
786 {
787 struct pipe_inode_info *pipe = filp->private_data;
788 int retval = 0;
789
790 __pipe_lock(pipe);
791 if (filp->f_mode & FMODE_READ)
792 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
793 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
794 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
795 if (retval < 0 && (filp->f_mode & FMODE_READ))
796 /* this can happen only if on == T */
797 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
798 }
799 __pipe_unlock(pipe);
800 return retval;
801 }
802
803 static void account_pipe_buffers(struct pipe_inode_info *pipe,
804 unsigned long old, unsigned long new)
805 {
806 atomic_long_add(new - old, &pipe->user->pipe_bufs);
807 }
808
809 static bool too_many_pipe_buffers_soft(struct user_struct *user)
810 {
811 return pipe_user_pages_soft &&
812 atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
813 }
814
815 static bool too_many_pipe_buffers_hard(struct user_struct *user)
816 {
817 return pipe_user_pages_hard &&
818 atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
819 }
820
821 struct pipe_inode_info *alloc_pipe_info(void)
822 {
823 struct pipe_inode_info *pipe;
824
825 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
826 if (pipe) {
827 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
828 struct user_struct *user = get_current_user();
829
830 if (!too_many_pipe_buffers_hard(user)) {
831 if (too_many_pipe_buffers_soft(user))
832 pipe_bufs = 1;
833 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
834 }
835
836 if (pipe->bufs) {
837 init_waitqueue_head(&pipe->wait);
838 pipe->r_counter = pipe->w_counter = 1;
839 pipe->buffers = pipe_bufs;
840 pipe->user = user;
841 account_pipe_buffers(pipe, 0, pipe_bufs);
842 mutex_init(&pipe->mutex);
843 return pipe;
844 }
845 free_uid(user);
846 kfree(pipe);
847 }
848
849 return NULL;
850 }
851
852 void free_pipe_info(struct pipe_inode_info *pipe)
853 {
854 int i;
855
856 account_pipe_buffers(pipe, pipe->buffers, 0);
857 free_uid(pipe->user);
858 for (i = 0; i < pipe->buffers; i++) {
859 struct pipe_buffer *buf = pipe->bufs + i;
860 if (buf->ops)
861 buf->ops->release(pipe, buf);
862 }
863 if (pipe->tmp_page)
864 __free_page(pipe->tmp_page);
865 kfree(pipe->bufs);
866 kfree(pipe);
867 }
868
869 static struct vfsmount *pipe_mnt __read_mostly;
870
871 /*
872 * pipefs_dname() is called from d_path().
873 */
874 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
875 {
876 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
877 dentry->d_inode->i_ino);
878 }
879
880 static const struct dentry_operations pipefs_dentry_operations = {
881 .d_dname = pipefs_dname,
882 };
883
884 static struct inode * get_pipe_inode(void)
885 {
886 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
887 struct pipe_inode_info *pipe;
888
889 if (!inode)
890 goto fail_inode;
891
892 inode->i_ino = get_next_ino();
893
894 pipe = alloc_pipe_info();
895 if (!pipe)
896 goto fail_iput;
897
898 inode->i_pipe = pipe;
899 pipe->files = 2;
900 pipe->readers = pipe->writers = 1;
901 inode->i_fop = &pipefifo_fops;
902
903 /*
904 * Mark the inode dirty from the very beginning,
905 * that way it will never be moved to the dirty
906 * list because "mark_inode_dirty()" will think
907 * that it already _is_ on the dirty list.
908 */
909 inode->i_state = I_DIRTY;
910 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
911 inode->i_uid = current_fsuid();
912 inode->i_gid = current_fsgid();
913 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
914
915 return inode;
916
917 fail_iput:
918 iput(inode);
919
920 fail_inode:
921 return NULL;
922 }
923
924 int create_pipe_files(struct file **res, int flags)
925 {
926 int err;
927 struct inode *inode = get_pipe_inode();
928 struct file *f;
929 struct path path;
930 static struct qstr name = { .name = "" };
931
932 if (!inode)
933 return -ENFILE;
934
935 err = -ENOMEM;
936 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
937 if (!path.dentry)
938 goto err_inode;
939 path.mnt = mntget(pipe_mnt);
940
941 d_instantiate(path.dentry, inode);
942
943 err = -ENFILE;
944 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
945 if (IS_ERR(f))
946 goto err_dentry;
947
948 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
949 f->private_data = inode->i_pipe;
950
951 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
952 if (IS_ERR(res[0]))
953 goto err_file;
954
955 path_get(&path);
956 res[0]->private_data = inode->i_pipe;
957 res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
958 res[1] = f;
959 return 0;
960
961 err_file:
962 put_filp(f);
963 err_dentry:
964 free_pipe_info(inode->i_pipe);
965 path_put(&path);
966 return err;
967
968 err_inode:
969 free_pipe_info(inode->i_pipe);
970 iput(inode);
971 return err;
972 }
973
974 static int __do_pipe_flags(int *fd, struct file **files, int flags)
975 {
976 int error;
977 int fdw, fdr;
978
979 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
980 return -EINVAL;
981
982 error = create_pipe_files(files, flags);
983 if (error)
984 return error;
985
986 error = get_unused_fd_flags(flags);
987 if (error < 0)
988 goto err_read_pipe;
989 fdr = error;
990
991 error = get_unused_fd_flags(flags);
992 if (error < 0)
993 goto err_fdr;
994 fdw = error;
995
996 audit_fd_pair(fdr, fdw);
997 fd[0] = fdr;
998 fd[1] = fdw;
999 return 0;
1000
1001 err_fdr:
1002 put_unused_fd(fdr);
1003 err_read_pipe:
1004 fput(files[0]);
1005 fput(files[1]);
1006 return error;
1007 }
1008
1009 int do_pipe_flags(int *fd, int flags)
1010 {
1011 struct file *files[2];
1012 int error = __do_pipe_flags(fd, files, flags);
1013 if (!error) {
1014 fd_install(fd[0], files[0]);
1015 fd_install(fd[1], files[1]);
1016 }
1017 return error;
1018 }
1019
1020 /*
1021 * sys_pipe() is the normal C calling standard for creating
1022 * a pipe. It's not the way Unix traditionally does this, though.
1023 */
1024 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1025 {
1026 struct file *files[2];
1027 int fd[2];
1028 int error;
1029
1030 error = __do_pipe_flags(fd, files, flags);
1031 if (!error) {
1032 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1033 fput(files[0]);
1034 fput(files[1]);
1035 put_unused_fd(fd[0]);
1036 put_unused_fd(fd[1]);
1037 error = -EFAULT;
1038 } else {
1039 fd_install(fd[0], files[0]);
1040 fd_install(fd[1], files[1]);
1041 }
1042 }
1043 return error;
1044 }
1045
1046 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1047 {
1048 return sys_pipe2(fildes, 0);
1049 }
1050
1051 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1052 {
1053 int cur = *cnt;
1054
1055 while (cur == *cnt) {
1056 pipe_wait(pipe);
1057 if (signal_pending(current))
1058 break;
1059 }
1060 return cur == *cnt ? -ERESTARTSYS : 0;
1061 }
1062
1063 static void wake_up_partner(struct pipe_inode_info *pipe)
1064 {
1065 wake_up_interruptible(&pipe->wait);
1066 }
1067
1068 static int fifo_open(struct inode *inode, struct file *filp)
1069 {
1070 struct pipe_inode_info *pipe;
1071 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1072 int ret;
1073
1074 filp->f_version = 0;
1075
1076 spin_lock(&inode->i_lock);
1077 if (inode->i_pipe) {
1078 pipe = inode->i_pipe;
1079 pipe->files++;
1080 spin_unlock(&inode->i_lock);
1081 } else {
1082 spin_unlock(&inode->i_lock);
1083 pipe = alloc_pipe_info();
1084 if (!pipe)
1085 return -ENOMEM;
1086 pipe->files = 1;
1087 spin_lock(&inode->i_lock);
1088 if (unlikely(inode->i_pipe)) {
1089 inode->i_pipe->files++;
1090 spin_unlock(&inode->i_lock);
1091 free_pipe_info(pipe);
1092 pipe = inode->i_pipe;
1093 } else {
1094 inode->i_pipe = pipe;
1095 spin_unlock(&inode->i_lock);
1096 }
1097 }
1098 filp->private_data = pipe;
1099 /* OK, we have a pipe and it's pinned down */
1100
1101 __pipe_lock(pipe);
1102
1103 /* We can only do regular read/write on fifos */
1104 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
1105
1106 switch (filp->f_mode) {
1107 case FMODE_READ:
1108 /*
1109 * O_RDONLY
1110 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1111 * opened, even when there is no process writing the FIFO.
1112 */
1113 pipe->r_counter++;
1114 if (pipe->readers++ == 0)
1115 wake_up_partner(pipe);
1116
1117 if (!is_pipe && !pipe->writers) {
1118 if ((filp->f_flags & O_NONBLOCK)) {
1119 /* suppress POLLHUP until we have
1120 * seen a writer */
1121 filp->f_version = pipe->w_counter;
1122 } else {
1123 if (wait_for_partner(pipe, &pipe->w_counter))
1124 goto err_rd;
1125 }
1126 }
1127 break;
1128
1129 case FMODE_WRITE:
1130 /*
1131 * O_WRONLY
1132 * POSIX.1 says that O_NONBLOCK means return -1 with
1133 * errno=ENXIO when there is no process reading the FIFO.
1134 */
1135 ret = -ENXIO;
1136 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1137 goto err;
1138
1139 pipe->w_counter++;
1140 if (!pipe->writers++)
1141 wake_up_partner(pipe);
1142
1143 if (!is_pipe && !pipe->readers) {
1144 if (wait_for_partner(pipe, &pipe->r_counter))
1145 goto err_wr;
1146 }
1147 break;
1148
1149 case FMODE_READ | FMODE_WRITE:
1150 /*
1151 * O_RDWR
1152 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1153 * This implementation will NEVER block on a O_RDWR open, since
1154 * the process can at least talk to itself.
1155 */
1156
1157 pipe->readers++;
1158 pipe->writers++;
1159 pipe->r_counter++;
1160 pipe->w_counter++;
1161 if (pipe->readers == 1 || pipe->writers == 1)
1162 wake_up_partner(pipe);
1163 break;
1164
1165 default:
1166 ret = -EINVAL;
1167 goto err;
1168 }
1169
1170 /* Ok! */
1171 __pipe_unlock(pipe);
1172 return 0;
1173
1174 err_rd:
1175 if (!--pipe->readers)
1176 wake_up_interruptible(&pipe->wait);
1177 ret = -ERESTARTSYS;
1178 goto err;
1179
1180 err_wr:
1181 if (!--pipe->writers)
1182 wake_up_interruptible(&pipe->wait);
1183 ret = -ERESTARTSYS;
1184 goto err;
1185
1186 err:
1187 __pipe_unlock(pipe);
1188
1189 put_pipe_info(inode, pipe);
1190 return ret;
1191 }
1192
1193 const struct file_operations pipefifo_fops = {
1194 .open = fifo_open,
1195 .llseek = no_llseek,
1196 .read = do_sync_read,
1197 .aio_read = pipe_read,
1198 .write = do_sync_write,
1199 .aio_write = pipe_write,
1200 .poll = pipe_poll,
1201 .unlocked_ioctl = pipe_ioctl,
1202 .release = pipe_release,
1203 .fasync = pipe_fasync,
1204 };
1205
1206 /*
1207 * Allocate a new array of pipe buffers and copy the info over. Returns the
1208 * pipe size if successful, or return -ERROR on error.
1209 */
1210 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1211 {
1212 struct pipe_buffer *bufs;
1213
1214 /*
1215 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1216 * expect a lot of shrink+grow operations, just free and allocate
1217 * again like we would do for growing. If the pipe currently
1218 * contains more buffers than arg, then return busy.
1219 */
1220 if (nr_pages < pipe->nrbufs)
1221 return -EBUSY;
1222
1223 bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
1224 if (unlikely(!bufs))
1225 return -ENOMEM;
1226
1227 /*
1228 * The pipe array wraps around, so just start the new one at zero
1229 * and adjust the indexes.
1230 */
1231 if (pipe->nrbufs) {
1232 unsigned int tail;
1233 unsigned int head;
1234
1235 tail = pipe->curbuf + pipe->nrbufs;
1236 if (tail < pipe->buffers)
1237 tail = 0;
1238 else
1239 tail &= (pipe->buffers - 1);
1240
1241 head = pipe->nrbufs - tail;
1242 if (head)
1243 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1244 if (tail)
1245 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1246 }
1247
1248 account_pipe_buffers(pipe, pipe->buffers, nr_pages);
1249 pipe->curbuf = 0;
1250 kfree(pipe->bufs);
1251 pipe->bufs = bufs;
1252 pipe->buffers = nr_pages;
1253 return nr_pages * PAGE_SIZE;
1254 }
1255
1256 /*
1257 * Currently we rely on the pipe array holding a power-of-2 number
1258 * of pages.
1259 */
1260 static inline unsigned int round_pipe_size(unsigned int size)
1261 {
1262 unsigned long nr_pages;
1263
1264 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1265 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1266 }
1267
1268 /*
1269 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1270 * will return an error.
1271 */
1272 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1273 size_t *lenp, loff_t *ppos)
1274 {
1275 int ret;
1276
1277 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1278 if (ret < 0 || !write)
1279 return ret;
1280
1281 pipe_max_size = round_pipe_size(pipe_max_size);
1282 return ret;
1283 }
1284
1285 /*
1286 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1287 * location, so checking ->i_pipe is not enough to verify that this is a
1288 * pipe.
1289 */
1290 struct pipe_inode_info *get_pipe_info(struct file *file)
1291 {
1292 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1293 }
1294
1295 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1296 {
1297 struct pipe_inode_info *pipe;
1298 long ret;
1299
1300 pipe = get_pipe_info(file);
1301 if (!pipe)
1302 return -EBADF;
1303
1304 __pipe_lock(pipe);
1305
1306 switch (cmd) {
1307 case F_SETPIPE_SZ: {
1308 unsigned int size, nr_pages;
1309
1310 size = round_pipe_size(arg);
1311 nr_pages = size >> PAGE_SHIFT;
1312
1313 ret = -EINVAL;
1314 if (!nr_pages)
1315 goto out;
1316
1317 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1318 ret = -EPERM;
1319 goto out;
1320 } else if ((too_many_pipe_buffers_hard(pipe->user) ||
1321 too_many_pipe_buffers_soft(pipe->user)) &&
1322 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1323 ret = -EPERM;
1324 goto out;
1325 }
1326 ret = pipe_set_size(pipe, nr_pages);
1327 break;
1328 }
1329 case F_GETPIPE_SZ:
1330 ret = pipe->buffers * PAGE_SIZE;
1331 break;
1332 default:
1333 ret = -EINVAL;
1334 break;
1335 }
1336
1337 out:
1338 __pipe_unlock(pipe);
1339 return ret;
1340 }
1341
1342 static const struct super_operations pipefs_ops = {
1343 .destroy_inode = free_inode_nonrcu,
1344 .statfs = simple_statfs,
1345 };
1346
1347 /*
1348 * pipefs should _never_ be mounted by userland - too much of security hassle,
1349 * no real gain from having the whole whorehouse mounted. So we don't need
1350 * any operations on the root directory. However, we need a non-trivial
1351 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1352 */
1353 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1354 int flags, const char *dev_name, void *data)
1355 {
1356 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1357 &pipefs_dentry_operations, PIPEFS_MAGIC);
1358 }
1359
1360 static struct file_system_type pipe_fs_type = {
1361 .name = "pipefs",
1362 .mount = pipefs_mount,
1363 .kill_sb = kill_anon_super,
1364 };
1365
1366 static int __init init_pipe_fs(void)
1367 {
1368 int err = register_filesystem(&pipe_fs_type);
1369
1370 if (!err) {
1371 pipe_mnt = kern_mount(&pipe_fs_type);
1372 if (IS_ERR(pipe_mnt)) {
1373 err = PTR_ERR(pipe_mnt);
1374 unregister_filesystem(&pipe_fs_type);
1375 }
1376 }
1377 return err;
1378 }
1379
1380 fs_initcall(init_pipe_fs);