4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
35 unsigned int pipe_max_size
= 1048576;
38 * Minimum pipe size, as required by POSIX
40 unsigned int pipe_min_size
= PAGE_SIZE
;
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43 * matches default values.
45 unsigned long pipe_user_pages_hard
;
46 unsigned long pipe_user_pages_soft
= PIPE_DEF_BUFFERS
* INR_OPEN_CUR
;
49 * We use a start+len construction, which provides full use of the
51 * -- Florian Coosmann (FGC)
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
63 static void pipe_lock_nested(struct pipe_inode_info
*pipe
, int subclass
)
66 mutex_lock_nested(&pipe
->mutex
, subclass
);
69 void pipe_lock(struct pipe_inode_info
*pipe
)
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
74 pipe_lock_nested(pipe
, I_MUTEX_PARENT
);
76 EXPORT_SYMBOL(pipe_lock
);
78 void pipe_unlock(struct pipe_inode_info
*pipe
)
81 mutex_unlock(&pipe
->mutex
);
83 EXPORT_SYMBOL(pipe_unlock
);
85 static inline void __pipe_lock(struct pipe_inode_info
*pipe
)
87 mutex_lock_nested(&pipe
->mutex
, I_MUTEX_PARENT
);
90 static inline void __pipe_unlock(struct pipe_inode_info
*pipe
)
92 mutex_unlock(&pipe
->mutex
);
95 void pipe_double_lock(struct pipe_inode_info
*pipe1
,
96 struct pipe_inode_info
*pipe2
)
98 BUG_ON(pipe1
== pipe2
);
101 pipe_lock_nested(pipe1
, I_MUTEX_PARENT
);
102 pipe_lock_nested(pipe2
, I_MUTEX_CHILD
);
104 pipe_lock_nested(pipe2
, I_MUTEX_PARENT
);
105 pipe_lock_nested(pipe1
, I_MUTEX_CHILD
);
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info
*pipe
)
115 * Pipes are system-local resources, so sleeping on them
116 * is considered a noninteractive wait:
118 prepare_to_wait(&pipe
->wait
, &wait
, TASK_INTERRUPTIBLE
);
121 finish_wait(&pipe
->wait
, &wait
);
126 pipe_iov_copy_from_user(void *addr
, int *offset
, struct iovec
*iov
,
127 size_t *remaining
, int atomic
)
131 while (*remaining
> 0) {
132 while (!iov
->iov_len
)
134 copy
= min_t(unsigned long, *remaining
, iov
->iov_len
);
137 if (__copy_from_user_inatomic(addr
+ *offset
,
138 iov
->iov_base
, copy
))
141 if (copy_from_user(addr
+ *offset
,
142 iov
->iov_base
, copy
))
147 iov
->iov_base
+= copy
;
148 iov
->iov_len
-= copy
;
154 pipe_iov_copy_to_user(struct iovec
*iov
, void *addr
, int *offset
,
155 size_t *remaining
, int atomic
)
159 while (*remaining
> 0) {
160 while (!iov
->iov_len
)
162 copy
= min_t(unsigned long, *remaining
, iov
->iov_len
);
165 if (__copy_to_user_inatomic(iov
->iov_base
,
166 addr
+ *offset
, copy
))
169 if (copy_to_user(iov
->iov_base
,
170 addr
+ *offset
, copy
))
175 iov
->iov_base
+= copy
;
176 iov
->iov_len
-= copy
;
182 * Attempt to pre-fault in the user memory, so we can use atomic copies.
183 * Returns the number of bytes not faulted in.
185 static int iov_fault_in_pages_write(struct iovec
*iov
, unsigned long len
)
187 while (!iov
->iov_len
)
191 unsigned long this_len
;
193 this_len
= min_t(unsigned long, len
, iov
->iov_len
);
194 if (fault_in_pages_writeable(iov
->iov_base
, this_len
))
205 * Pre-fault in the user memory, so we can use atomic copies.
207 static void iov_fault_in_pages_read(struct iovec
*iov
, unsigned long len
)
209 while (!iov
->iov_len
)
213 unsigned long this_len
;
215 this_len
= min_t(unsigned long, len
, iov
->iov_len
);
216 fault_in_pages_readable(iov
->iov_base
, this_len
);
222 static void anon_pipe_buf_release(struct pipe_inode_info
*pipe
,
223 struct pipe_buffer
*buf
)
225 struct page
*page
= buf
->page
;
228 * If nobody else uses this page, and we don't already have a
229 * temporary page, let's keep track of it as a one-deep
230 * allocation cache. (Otherwise just release our reference to it)
232 if (page_count(page
) == 1 && !pipe
->tmp_page
)
233 pipe
->tmp_page
= page
;
235 page_cache_release(page
);
239 * generic_pipe_buf_map - virtually map a pipe buffer
240 * @pipe: the pipe that the buffer belongs to
241 * @buf: the buffer that should be mapped
242 * @atomic: whether to use an atomic map
245 * This function returns a kernel virtual address mapping for the
246 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
247 * and the caller has to be careful not to fault before calling
248 * the unmap function.
250 * Note that this function calls kmap_atomic() if @atomic != 0.
252 void *generic_pipe_buf_map(struct pipe_inode_info
*pipe
,
253 struct pipe_buffer
*buf
, int atomic
)
256 buf
->flags
|= PIPE_BUF_FLAG_ATOMIC
;
257 return kmap_atomic(buf
->page
);
260 return kmap(buf
->page
);
262 EXPORT_SYMBOL(generic_pipe_buf_map
);
265 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
266 * @pipe: the pipe that the buffer belongs to
267 * @buf: the buffer that should be unmapped
268 * @map_data: the data that the mapping function returned
271 * This function undoes the mapping that ->map() provided.
273 void generic_pipe_buf_unmap(struct pipe_inode_info
*pipe
,
274 struct pipe_buffer
*buf
, void *map_data
)
276 if (buf
->flags
& PIPE_BUF_FLAG_ATOMIC
) {
277 buf
->flags
&= ~PIPE_BUF_FLAG_ATOMIC
;
278 kunmap_atomic(map_data
);
282 EXPORT_SYMBOL(generic_pipe_buf_unmap
);
285 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
286 * @pipe: the pipe that the buffer belongs to
287 * @buf: the buffer to attempt to steal
290 * This function attempts to steal the &struct page attached to
291 * @buf. If successful, this function returns 0 and returns with
292 * the page locked. The caller may then reuse the page for whatever
293 * he wishes; the typical use is insertion into a different file
296 int generic_pipe_buf_steal(struct pipe_inode_info
*pipe
,
297 struct pipe_buffer
*buf
)
299 struct page
*page
= buf
->page
;
302 * A reference of one is golden, that means that the owner of this
303 * page is the only one holding a reference to it. lock the page
306 if (page_count(page
) == 1) {
313 EXPORT_SYMBOL(generic_pipe_buf_steal
);
316 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
317 * @pipe: the pipe that the buffer belongs to
318 * @buf: the buffer to get a reference to
321 * This function grabs an extra reference to @buf. It's used in
322 * in the tee() system call, when we duplicate the buffers in one
325 void generic_pipe_buf_get(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
)
327 page_cache_get(buf
->page
);
329 EXPORT_SYMBOL(generic_pipe_buf_get
);
332 * generic_pipe_buf_confirm - verify contents of the pipe buffer
333 * @info: the pipe that the buffer belongs to
334 * @buf: the buffer to confirm
337 * This function does nothing, because the generic pipe code uses
338 * pages that are always good when inserted into the pipe.
340 int generic_pipe_buf_confirm(struct pipe_inode_info
*info
,
341 struct pipe_buffer
*buf
)
345 EXPORT_SYMBOL(generic_pipe_buf_confirm
);
348 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
349 * @pipe: the pipe that the buffer belongs to
350 * @buf: the buffer to put a reference to
353 * This function releases a reference to @buf.
355 void generic_pipe_buf_release(struct pipe_inode_info
*pipe
,
356 struct pipe_buffer
*buf
)
358 page_cache_release(buf
->page
);
360 EXPORT_SYMBOL(generic_pipe_buf_release
);
362 static const struct pipe_buf_operations anon_pipe_buf_ops
= {
364 .map
= generic_pipe_buf_map
,
365 .unmap
= generic_pipe_buf_unmap
,
366 .confirm
= generic_pipe_buf_confirm
,
367 .release
= anon_pipe_buf_release
,
368 .steal
= generic_pipe_buf_steal
,
369 .get
= generic_pipe_buf_get
,
372 static const struct pipe_buf_operations packet_pipe_buf_ops
= {
374 .map
= generic_pipe_buf_map
,
375 .unmap
= generic_pipe_buf_unmap
,
376 .confirm
= generic_pipe_buf_confirm
,
377 .release
= anon_pipe_buf_release
,
378 .steal
= generic_pipe_buf_steal
,
379 .get
= generic_pipe_buf_get
,
383 pipe_read(struct kiocb
*iocb
, const struct iovec
*_iov
,
384 unsigned long nr_segs
, loff_t pos
)
386 struct file
*filp
= iocb
->ki_filp
;
387 struct pipe_inode_info
*pipe
= filp
->private_data
;
390 struct iovec
*iov
= (struct iovec
*)_iov
;
393 total_len
= iov_length(iov
, nr_segs
);
394 /* Null read succeeds. */
395 if (unlikely(total_len
== 0))
402 int bufs
= pipe
->nrbufs
;
404 int curbuf
= pipe
->curbuf
;
405 struct pipe_buffer
*buf
= pipe
->bufs
+ curbuf
;
406 const struct pipe_buf_operations
*ops
= buf
->ops
;
408 size_t chars
= buf
->len
, remaining
;
412 if (chars
> total_len
)
415 error
= ops
->confirm(pipe
, buf
);
422 atomic
= !iov_fault_in_pages_write(iov
, chars
);
424 offset
= buf
->offset
;
426 addr
= ops
->map(pipe
, buf
, atomic
);
427 error
= pipe_iov_copy_to_user(iov
, addr
, &offset
,
429 ops
->unmap(pipe
, buf
, addr
);
430 if (unlikely(error
)) {
432 * Just retry with the slow path if we failed.
443 buf
->offset
+= chars
;
446 /* Was it a packet buffer? Clean up and exit */
447 if (buf
->flags
& PIPE_BUF_FLAG_PACKET
) {
454 ops
->release(pipe
, buf
);
455 curbuf
= (curbuf
+ 1) & (pipe
->buffers
- 1);
456 pipe
->curbuf
= curbuf
;
457 pipe
->nrbufs
= --bufs
;
462 break; /* common path: read succeeded */
464 if (bufs
) /* More to do? */
468 if (!pipe
->waiting_writers
) {
469 /* syscall merging: Usually we must not sleep
470 * if O_NONBLOCK is set, or if we got some data.
471 * But if a writer sleeps in kernel space, then
472 * we can wait for that data without violating POSIX.
476 if (filp
->f_flags
& O_NONBLOCK
) {
481 if (signal_pending(current
)) {
487 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
488 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
494 /* Signal writers asynchronously that there is more room. */
496 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLOUT
| POLLWRNORM
);
497 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
504 static inline int is_packetized(struct file
*file
)
506 return (file
->f_flags
& O_DIRECT
) != 0;
510 pipe_write(struct kiocb
*iocb
, const struct iovec
*_iov
,
511 unsigned long nr_segs
, loff_t ppos
)
513 struct file
*filp
= iocb
->ki_filp
;
514 struct pipe_inode_info
*pipe
= filp
->private_data
;
517 struct iovec
*iov
= (struct iovec
*)_iov
;
521 total_len
= iov_length(iov
, nr_segs
);
522 /* Null write succeeds. */
523 if (unlikely(total_len
== 0))
530 if (!pipe
->readers
) {
531 send_sig(SIGPIPE
, current
, 0);
536 /* We try to merge small writes */
537 chars
= total_len
& (PAGE_SIZE
-1); /* size of the last buffer */
538 if (pipe
->nrbufs
&& chars
!= 0) {
539 int lastbuf
= (pipe
->curbuf
+ pipe
->nrbufs
- 1) &
541 struct pipe_buffer
*buf
= pipe
->bufs
+ lastbuf
;
542 const struct pipe_buf_operations
*ops
= buf
->ops
;
543 int offset
= buf
->offset
+ buf
->len
;
545 if (ops
->can_merge
&& offset
+ chars
<= PAGE_SIZE
) {
546 int error
, atomic
= 1;
548 size_t remaining
= chars
;
550 error
= ops
->confirm(pipe
, buf
);
554 iov_fault_in_pages_read(iov
, chars
);
556 addr
= ops
->map(pipe
, buf
, atomic
);
557 error
= pipe_iov_copy_from_user(addr
, &offset
, iov
,
559 ops
->unmap(pipe
, buf
, addr
);
580 if (!pipe
->readers
) {
581 send_sig(SIGPIPE
, current
, 0);
587 if (bufs
< pipe
->buffers
) {
588 int newbuf
= (pipe
->curbuf
+ bufs
) & (pipe
->buffers
-1);
589 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
590 struct page
*page
= pipe
->tmp_page
;
592 int error
, atomic
= 1;
597 page
= alloc_page(GFP_HIGHUSER
);
598 if (unlikely(!page
)) {
599 ret
= ret
? : -ENOMEM
;
602 pipe
->tmp_page
= page
;
604 /* Always wake up, even if the copy fails. Otherwise
605 * we lock up (O_NONBLOCK-)readers that sleep due to
607 * FIXME! Is this really true?
611 if (chars
> total_len
)
614 iov_fault_in_pages_read(iov
, chars
);
618 src
= kmap_atomic(page
);
622 error
= pipe_iov_copy_from_user(src
, &offset
, iov
,
629 if (unlikely(error
)) {
640 /* Insert it into the buffer array */
642 buf
->ops
= &anon_pipe_buf_ops
;
646 if (is_packetized(filp
)) {
647 buf
->ops
= &packet_pipe_buf_ops
;
648 buf
->flags
= PIPE_BUF_FLAG_PACKET
;
650 pipe
->nrbufs
= ++bufs
;
651 pipe
->tmp_page
= NULL
;
657 if (bufs
< pipe
->buffers
)
659 if (filp
->f_flags
& O_NONBLOCK
) {
664 if (signal_pending(current
)) {
670 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
671 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
674 pipe
->waiting_writers
++;
676 pipe
->waiting_writers
--;
681 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLRDNORM
);
682 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
685 int err
= file_update_time(filp
);
692 static long pipe_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
694 struct pipe_inode_info
*pipe
= filp
->private_data
;
695 int count
, buf
, nrbufs
;
702 nrbufs
= pipe
->nrbufs
;
703 while (--nrbufs
>= 0) {
704 count
+= pipe
->bufs
[buf
].len
;
705 buf
= (buf
+1) & (pipe
->buffers
- 1);
709 return put_user(count
, (int __user
*)arg
);
715 /* No kernel lock held - fine */
717 pipe_poll(struct file
*filp
, poll_table
*wait
)
720 struct pipe_inode_info
*pipe
= filp
->private_data
;
723 poll_wait(filp
, &pipe
->wait
, wait
);
725 /* Reading only -- no need for acquiring the semaphore. */
726 nrbufs
= pipe
->nrbufs
;
728 if (filp
->f_mode
& FMODE_READ
) {
729 mask
= (nrbufs
> 0) ? POLLIN
| POLLRDNORM
: 0;
730 if (!pipe
->writers
&& filp
->f_version
!= pipe
->w_counter
)
734 if (filp
->f_mode
& FMODE_WRITE
) {
735 mask
|= (nrbufs
< pipe
->buffers
) ? POLLOUT
| POLLWRNORM
: 0;
737 * Most Unices do not set POLLERR for FIFOs but on Linux they
738 * behave exactly like pipes for poll().
747 static void put_pipe_info(struct inode
*inode
, struct pipe_inode_info
*pipe
)
751 spin_lock(&inode
->i_lock
);
752 if (!--pipe
->files
) {
753 inode
->i_pipe
= NULL
;
756 spin_unlock(&inode
->i_lock
);
759 free_pipe_info(pipe
);
763 pipe_release(struct inode
*inode
, struct file
*file
)
765 struct pipe_inode_info
*pipe
= file
->private_data
;
768 if (file
->f_mode
& FMODE_READ
)
770 if (file
->f_mode
& FMODE_WRITE
)
773 if (pipe
->readers
|| pipe
->writers
) {
774 wake_up_interruptible_sync_poll(&pipe
->wait
, POLLIN
| POLLOUT
| POLLRDNORM
| POLLWRNORM
| POLLERR
| POLLHUP
);
775 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
776 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
780 put_pipe_info(inode
, pipe
);
785 pipe_fasync(int fd
, struct file
*filp
, int on
)
787 struct pipe_inode_info
*pipe
= filp
->private_data
;
791 if (filp
->f_mode
& FMODE_READ
)
792 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_readers
);
793 if ((filp
->f_mode
& FMODE_WRITE
) && retval
>= 0) {
794 retval
= fasync_helper(fd
, filp
, on
, &pipe
->fasync_writers
);
795 if (retval
< 0 && (filp
->f_mode
& FMODE_READ
))
796 /* this can happen only if on == T */
797 fasync_helper(-1, filp
, 0, &pipe
->fasync_readers
);
803 static void account_pipe_buffers(struct pipe_inode_info
*pipe
,
804 unsigned long old
, unsigned long new)
806 atomic_long_add(new - old
, &pipe
->user
->pipe_bufs
);
809 static bool too_many_pipe_buffers_soft(struct user_struct
*user
)
811 return pipe_user_pages_soft
&&
812 atomic_long_read(&user
->pipe_bufs
) >= pipe_user_pages_soft
;
815 static bool too_many_pipe_buffers_hard(struct user_struct
*user
)
817 return pipe_user_pages_hard
&&
818 atomic_long_read(&user
->pipe_bufs
) >= pipe_user_pages_hard
;
821 struct pipe_inode_info
*alloc_pipe_info(void)
823 struct pipe_inode_info
*pipe
;
825 pipe
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL
);
827 unsigned long pipe_bufs
= PIPE_DEF_BUFFERS
;
828 struct user_struct
*user
= get_current_user();
830 if (!too_many_pipe_buffers_hard(user
)) {
831 if (too_many_pipe_buffers_soft(user
))
833 pipe
->bufs
= kzalloc(sizeof(struct pipe_buffer
) * pipe_bufs
, GFP_KERNEL
);
837 init_waitqueue_head(&pipe
->wait
);
838 pipe
->r_counter
= pipe
->w_counter
= 1;
839 pipe
->buffers
= pipe_bufs
;
841 account_pipe_buffers(pipe
, 0, pipe_bufs
);
842 mutex_init(&pipe
->mutex
);
852 void free_pipe_info(struct pipe_inode_info
*pipe
)
856 account_pipe_buffers(pipe
, pipe
->buffers
, 0);
857 free_uid(pipe
->user
);
858 for (i
= 0; i
< pipe
->buffers
; i
++) {
859 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
861 buf
->ops
->release(pipe
, buf
);
864 __free_page(pipe
->tmp_page
);
869 static struct vfsmount
*pipe_mnt __read_mostly
;
872 * pipefs_dname() is called from d_path().
874 static char *pipefs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
876 return dynamic_dname(dentry
, buffer
, buflen
, "pipe:[%lu]",
877 dentry
->d_inode
->i_ino
);
880 static const struct dentry_operations pipefs_dentry_operations
= {
881 .d_dname
= pipefs_dname
,
884 static struct inode
* get_pipe_inode(void)
886 struct inode
*inode
= new_inode_pseudo(pipe_mnt
->mnt_sb
);
887 struct pipe_inode_info
*pipe
;
892 inode
->i_ino
= get_next_ino();
894 pipe
= alloc_pipe_info();
898 inode
->i_pipe
= pipe
;
900 pipe
->readers
= pipe
->writers
= 1;
901 inode
->i_fop
= &pipefifo_fops
;
904 * Mark the inode dirty from the very beginning,
905 * that way it will never be moved to the dirty
906 * list because "mark_inode_dirty()" will think
907 * that it already _is_ on the dirty list.
909 inode
->i_state
= I_DIRTY
;
910 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
911 inode
->i_uid
= current_fsuid();
912 inode
->i_gid
= current_fsgid();
913 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
924 int create_pipe_files(struct file
**res
, int flags
)
927 struct inode
*inode
= get_pipe_inode();
930 static struct qstr name
= { .name
= "" };
936 path
.dentry
= d_alloc_pseudo(pipe_mnt
->mnt_sb
, &name
);
939 path
.mnt
= mntget(pipe_mnt
);
941 d_instantiate(path
.dentry
, inode
);
944 f
= alloc_file(&path
, FMODE_WRITE
, &pipefifo_fops
);
948 f
->f_flags
= O_WRONLY
| (flags
& (O_NONBLOCK
| O_DIRECT
));
949 f
->private_data
= inode
->i_pipe
;
951 res
[0] = alloc_file(&path
, FMODE_READ
, &pipefifo_fops
);
956 res
[0]->private_data
= inode
->i_pipe
;
957 res
[0]->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
964 free_pipe_info(inode
->i_pipe
);
969 free_pipe_info(inode
->i_pipe
);
974 static int __do_pipe_flags(int *fd
, struct file
**files
, int flags
)
979 if (flags
& ~(O_CLOEXEC
| O_NONBLOCK
| O_DIRECT
))
982 error
= create_pipe_files(files
, flags
);
986 error
= get_unused_fd_flags(flags
);
991 error
= get_unused_fd_flags(flags
);
996 audit_fd_pair(fdr
, fdw
);
1009 int do_pipe_flags(int *fd
, int flags
)
1011 struct file
*files
[2];
1012 int error
= __do_pipe_flags(fd
, files
, flags
);
1014 fd_install(fd
[0], files
[0]);
1015 fd_install(fd
[1], files
[1]);
1021 * sys_pipe() is the normal C calling standard for creating
1022 * a pipe. It's not the way Unix traditionally does this, though.
1024 SYSCALL_DEFINE2(pipe2
, int __user
*, fildes
, int, flags
)
1026 struct file
*files
[2];
1030 error
= __do_pipe_flags(fd
, files
, flags
);
1032 if (unlikely(copy_to_user(fildes
, fd
, sizeof(fd
)))) {
1035 put_unused_fd(fd
[0]);
1036 put_unused_fd(fd
[1]);
1039 fd_install(fd
[0], files
[0]);
1040 fd_install(fd
[1], files
[1]);
1046 SYSCALL_DEFINE1(pipe
, int __user
*, fildes
)
1048 return sys_pipe2(fildes
, 0);
1051 static int wait_for_partner(struct pipe_inode_info
*pipe
, unsigned int *cnt
)
1055 while (cur
== *cnt
) {
1057 if (signal_pending(current
))
1060 return cur
== *cnt
? -ERESTARTSYS
: 0;
1063 static void wake_up_partner(struct pipe_inode_info
*pipe
)
1065 wake_up_interruptible(&pipe
->wait
);
1068 static int fifo_open(struct inode
*inode
, struct file
*filp
)
1070 struct pipe_inode_info
*pipe
;
1071 bool is_pipe
= inode
->i_sb
->s_magic
== PIPEFS_MAGIC
;
1074 filp
->f_version
= 0;
1076 spin_lock(&inode
->i_lock
);
1077 if (inode
->i_pipe
) {
1078 pipe
= inode
->i_pipe
;
1080 spin_unlock(&inode
->i_lock
);
1082 spin_unlock(&inode
->i_lock
);
1083 pipe
= alloc_pipe_info();
1087 spin_lock(&inode
->i_lock
);
1088 if (unlikely(inode
->i_pipe
)) {
1089 inode
->i_pipe
->files
++;
1090 spin_unlock(&inode
->i_lock
);
1091 free_pipe_info(pipe
);
1092 pipe
= inode
->i_pipe
;
1094 inode
->i_pipe
= pipe
;
1095 spin_unlock(&inode
->i_lock
);
1098 filp
->private_data
= pipe
;
1099 /* OK, we have a pipe and it's pinned down */
1103 /* We can only do regular read/write on fifos */
1104 filp
->f_mode
&= (FMODE_READ
| FMODE_WRITE
);
1106 switch (filp
->f_mode
) {
1110 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1111 * opened, even when there is no process writing the FIFO.
1114 if (pipe
->readers
++ == 0)
1115 wake_up_partner(pipe
);
1117 if (!is_pipe
&& !pipe
->writers
) {
1118 if ((filp
->f_flags
& O_NONBLOCK
)) {
1119 /* suppress POLLHUP until we have
1121 filp
->f_version
= pipe
->w_counter
;
1123 if (wait_for_partner(pipe
, &pipe
->w_counter
))
1132 * POSIX.1 says that O_NONBLOCK means return -1 with
1133 * errno=ENXIO when there is no process reading the FIFO.
1136 if (!is_pipe
&& (filp
->f_flags
& O_NONBLOCK
) && !pipe
->readers
)
1140 if (!pipe
->writers
++)
1141 wake_up_partner(pipe
);
1143 if (!is_pipe
&& !pipe
->readers
) {
1144 if (wait_for_partner(pipe
, &pipe
->r_counter
))
1149 case FMODE_READ
| FMODE_WRITE
:
1152 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1153 * This implementation will NEVER block on a O_RDWR open, since
1154 * the process can at least talk to itself.
1161 if (pipe
->readers
== 1 || pipe
->writers
== 1)
1162 wake_up_partner(pipe
);
1171 __pipe_unlock(pipe
);
1175 if (!--pipe
->readers
)
1176 wake_up_interruptible(&pipe
->wait
);
1181 if (!--pipe
->writers
)
1182 wake_up_interruptible(&pipe
->wait
);
1187 __pipe_unlock(pipe
);
1189 put_pipe_info(inode
, pipe
);
1193 const struct file_operations pipefifo_fops
= {
1195 .llseek
= no_llseek
,
1196 .read
= do_sync_read
,
1197 .aio_read
= pipe_read
,
1198 .write
= do_sync_write
,
1199 .aio_write
= pipe_write
,
1201 .unlocked_ioctl
= pipe_ioctl
,
1202 .release
= pipe_release
,
1203 .fasync
= pipe_fasync
,
1207 * Allocate a new array of pipe buffers and copy the info over. Returns the
1208 * pipe size if successful, or return -ERROR on error.
1210 static long pipe_set_size(struct pipe_inode_info
*pipe
, unsigned long nr_pages
)
1212 struct pipe_buffer
*bufs
;
1215 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1216 * expect a lot of shrink+grow operations, just free and allocate
1217 * again like we would do for growing. If the pipe currently
1218 * contains more buffers than arg, then return busy.
1220 if (nr_pages
< pipe
->nrbufs
)
1223 bufs
= kcalloc(nr_pages
, sizeof(*bufs
), GFP_KERNEL
| __GFP_NOWARN
);
1224 if (unlikely(!bufs
))
1228 * The pipe array wraps around, so just start the new one at zero
1229 * and adjust the indexes.
1235 tail
= pipe
->curbuf
+ pipe
->nrbufs
;
1236 if (tail
< pipe
->buffers
)
1239 tail
&= (pipe
->buffers
- 1);
1241 head
= pipe
->nrbufs
- tail
;
1243 memcpy(bufs
, pipe
->bufs
+ pipe
->curbuf
, head
* sizeof(struct pipe_buffer
));
1245 memcpy(bufs
+ head
, pipe
->bufs
, tail
* sizeof(struct pipe_buffer
));
1248 account_pipe_buffers(pipe
, pipe
->buffers
, nr_pages
);
1252 pipe
->buffers
= nr_pages
;
1253 return nr_pages
* PAGE_SIZE
;
1257 * Currently we rely on the pipe array holding a power-of-2 number
1260 static inline unsigned int round_pipe_size(unsigned int size
)
1262 unsigned long nr_pages
;
1264 nr_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1265 return roundup_pow_of_two(nr_pages
) << PAGE_SHIFT
;
1269 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1270 * will return an error.
1272 int pipe_proc_fn(struct ctl_table
*table
, int write
, void __user
*buf
,
1273 size_t *lenp
, loff_t
*ppos
)
1277 ret
= proc_dointvec_minmax(table
, write
, buf
, lenp
, ppos
);
1278 if (ret
< 0 || !write
)
1281 pipe_max_size
= round_pipe_size(pipe_max_size
);
1286 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1287 * location, so checking ->i_pipe is not enough to verify that this is a
1290 struct pipe_inode_info
*get_pipe_info(struct file
*file
)
1292 return file
->f_op
== &pipefifo_fops
? file
->private_data
: NULL
;
1295 long pipe_fcntl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1297 struct pipe_inode_info
*pipe
;
1300 pipe
= get_pipe_info(file
);
1307 case F_SETPIPE_SZ
: {
1308 unsigned int size
, nr_pages
;
1310 size
= round_pipe_size(arg
);
1311 nr_pages
= size
>> PAGE_SHIFT
;
1317 if (!capable(CAP_SYS_RESOURCE
) && size
> pipe_max_size
) {
1320 } else if ((too_many_pipe_buffers_hard(pipe
->user
) ||
1321 too_many_pipe_buffers_soft(pipe
->user
)) &&
1322 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
)) {
1326 ret
= pipe_set_size(pipe
, nr_pages
);
1330 ret
= pipe
->buffers
* PAGE_SIZE
;
1338 __pipe_unlock(pipe
);
1342 static const struct super_operations pipefs_ops
= {
1343 .destroy_inode
= free_inode_nonrcu
,
1344 .statfs
= simple_statfs
,
1348 * pipefs should _never_ be mounted by userland - too much of security hassle,
1349 * no real gain from having the whole whorehouse mounted. So we don't need
1350 * any operations on the root directory. However, we need a non-trivial
1351 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1353 static struct dentry
*pipefs_mount(struct file_system_type
*fs_type
,
1354 int flags
, const char *dev_name
, void *data
)
1356 return mount_pseudo(fs_type
, "pipe:", &pipefs_ops
,
1357 &pipefs_dentry_operations
, PIPEFS_MAGIC
);
1360 static struct file_system_type pipe_fs_type
= {
1362 .mount
= pipefs_mount
,
1363 .kill_sb
= kill_anon_super
,
1366 static int __init
init_pipe_fs(void)
1368 int err
= register_filesystem(&pipe_fs_type
);
1371 pipe_mnt
= kern_mount(&pipe_fs_type
);
1372 if (IS_ERR(pipe_mnt
)) {
1373 err
= PTR_ERR(pipe_mnt
);
1374 unregister_filesystem(&pipe_fs_type
);
1380 fs_initcall(init_pipe_fs
);