pipe: limit the per-user amount of pages allocated in pipes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / pipe.c
index d2c45e14e6d8126e41bc6c463a509e68946e9fa4..c281867c453e601b2bf973f0d3dd4f5aaa8042aa 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -39,6 +39,12 @@ unsigned int pipe_max_size = 1048576;
  */
 unsigned int pipe_min_size = PAGE_SIZE;
 
+/* Maximum allocatable pages per user. Hard limit is unset by default, soft
+ * matches default values.
+ */
+unsigned long pipe_user_pages_hard;
+unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
+
 /*
  * We use a start+len construction, which provides full use of the 
  * allocated memory.
@@ -117,25 +123,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
 }
 
 static int
-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
-                       int atomic)
+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
+                       size_t *remaining, int atomic)
 {
        unsigned long copy;
 
-       while (len > 0) {
+       while (*remaining > 0) {
                while (!iov->iov_len)
                        iov++;
-               copy = min_t(unsigned long, len, iov->iov_len);
+               copy = min_t(unsigned long, *remaining, iov->iov_len);
 
                if (atomic) {
-                       if (__copy_from_user_inatomic(to, iov->iov_base, copy))
+                       if (__copy_from_user_inatomic(addr + *offset,
+                                                     iov->iov_base, copy))
                                return -EFAULT;
                } else {
-                       if (copy_from_user(to, iov->iov_base, copy))
+                       if (copy_from_user(addr + *offset,
+                                          iov->iov_base, copy))
                                return -EFAULT;
                }
-               to += copy;
-               len -= copy;
+               *offset += copy;
+               *remaining -= copy;
                iov->iov_base += copy;
                iov->iov_len -= copy;
        }
@@ -143,25 +151,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
 }
 
 static int
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
-                     int atomic)
+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
+                     size_t *remaining, int atomic)
 {
        unsigned long copy;
 
-       while (len > 0) {
+       while (*remaining > 0) {
                while (!iov->iov_len)
                        iov++;
-               copy = min_t(unsigned long, len, iov->iov_len);
+               copy = min_t(unsigned long, *remaining, iov->iov_len);
 
                if (atomic) {
-                       if (__copy_to_user_inatomic(iov->iov_base, from, copy))
+                       if (__copy_to_user_inatomic(iov->iov_base,
+                                                   addr + *offset, copy))
                                return -EFAULT;
                } else {
-                       if (copy_to_user(iov->iov_base, from, copy))
+                       if (copy_to_user(iov->iov_base,
+                                        addr + *offset, copy))
                                return -EFAULT;
                }
-               from += copy;
-               len -= copy;
+               *offset += copy;
+               *remaining -= copy;
                iov->iov_base += copy;
                iov->iov_len -= copy;
        }
@@ -395,8 +405,9 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
                        struct pipe_buffer *buf = pipe->bufs + curbuf;
                        const struct pipe_buf_operations *ops = buf->ops;
                        void *addr;
-                       size_t chars = buf->len;
+                       size_t chars = buf->len, remaining;
                        int error, atomic;
+                       int offset;
 
                        if (chars > total_len)
                                chars = total_len;
@@ -409,9 +420,12 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
                        }
 
                        atomic = !iov_fault_in_pages_write(iov, chars);
+                       remaining = chars;
+                       offset = buf->offset;
 redo:
                        addr = ops->map(pipe, buf, atomic);
-                       error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
+                       error = pipe_iov_copy_to_user(iov, addr, &offset,
+                                                     &remaining, atomic);
                        ops->unmap(pipe, buf, addr);
                        if (unlikely(error)) {
                                /*
@@ -531,6 +545,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
                if (ops->can_merge && offset + chars <= PAGE_SIZE) {
                        int error, atomic = 1;
                        void *addr;
+                       size_t remaining = chars;
 
                        error = ops->confirm(pipe, buf);
                        if (error)
@@ -539,8 +554,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
                        iov_fault_in_pages_read(iov, chars);
 redo1:
                        addr = ops->map(pipe, buf, atomic);
-                       error = pipe_iov_copy_from_user(offset + addr, iov,
-                                                       chars, atomic);
+                       error = pipe_iov_copy_from_user(addr, &offset, iov,
+                                                       &remaining, atomic);
                        ops->unmap(pipe, buf, addr);
                        ret = error;
                        do_wakeup = 1;
@@ -575,6 +590,8 @@ redo1:
                        struct page *page = pipe->tmp_page;
                        char *src;
                        int error, atomic = 1;
+                       int offset = 0;
+                       size_t remaining;
 
                        if (!page) {
                                page = alloc_page(GFP_HIGHUSER);
@@ -595,14 +612,15 @@ redo1:
                                chars = total_len;
 
                        iov_fault_in_pages_read(iov, chars);
+                       remaining = chars;
 redo2:
                        if (atomic)
                                src = kmap_atomic(page);
                        else
                                src = kmap(page);
 
-                       error = pipe_iov_copy_from_user(src, iov, chars,
-                                                       atomic);
+                       error = pipe_iov_copy_from_user(src, &offset, iov,
+                                                       &remaining, atomic);
                        if (atomic)
                                kunmap_atomic(src);
                        else
@@ -726,11 +744,25 @@ pipe_poll(struct file *filp, poll_table *wait)
        return mask;
 }
 
+static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
+{
+       int kill = 0;
+
+       spin_lock(&inode->i_lock);
+       if (!--pipe->files) {
+               inode->i_pipe = NULL;
+               kill = 1;
+       }
+       spin_unlock(&inode->i_lock);
+
+       if (kill)
+               free_pipe_info(pipe);
+}
+
 static int
 pipe_release(struct inode *inode, struct file *file)
 {
-       struct pipe_inode_info *pipe = inode->i_pipe;
-       int kill = 0;
+       struct pipe_inode_info *pipe = file->private_data;
 
        __pipe_lock(pipe);
        if (file->f_mode & FMODE_READ)
@@ -743,17 +775,9 @@ pipe_release(struct inode *inode, struct file *file)
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
-       spin_lock(&inode->i_lock);
-       if (!--pipe->files) {
-               inode->i_pipe = NULL;
-               kill = 1;
-       }
-       spin_unlock(&inode->i_lock);
        __pipe_unlock(pipe);
 
-       if (kill)
-               free_pipe_info(pipe);
-
+       put_pipe_info(inode, pipe);
        return 0;
 }
 
@@ -776,20 +800,49 @@ pipe_fasync(int fd, struct file *filp, int on)
        return retval;
 }
 
+static void account_pipe_buffers(struct pipe_inode_info *pipe,
+                                 unsigned long old, unsigned long new)
+{
+       atomic_long_add(new - old, &pipe->user->pipe_bufs);
+}
+
+static bool too_many_pipe_buffers_soft(struct user_struct *user)
+{
+       return pipe_user_pages_soft &&
+              atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+}
+
+static bool too_many_pipe_buffers_hard(struct user_struct *user)
+{
+       return pipe_user_pages_hard &&
+              atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+}
+
 struct pipe_inode_info *alloc_pipe_info(void)
 {
        struct pipe_inode_info *pipe;
 
        pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
        if (pipe) {
-               pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
+               unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+               struct user_struct *user = get_current_user();
+
+               if (!too_many_pipe_buffers_hard(user)) {
+                       if (too_many_pipe_buffers_soft(user))
+                               pipe_bufs = 1;
+                       pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
+               }
+
                if (pipe->bufs) {
                        init_waitqueue_head(&pipe->wait);
                        pipe->r_counter = pipe->w_counter = 1;
-                       pipe->buffers = PIPE_DEF_BUFFERS;
+                       pipe->buffers = pipe_bufs;
+                       pipe->user = user;
+                       account_pipe_buffers(pipe, 0, pipe_bufs);
                        mutex_init(&pipe->mutex);
                        return pipe;
                }
+               free_uid(user);
                kfree(pipe);
        }
 
@@ -800,6 +853,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
 {
        int i;
 
+       account_pipe_buffers(pipe, pipe->buffers, 0);
+       free_uid(pipe->user);
        for (i = 0; i < pipe->buffers; i++) {
                struct pipe_buffer *buf = pipe->bufs + i;
                if (buf->ops)
@@ -1014,7 +1069,6 @@ static int fifo_open(struct inode *inode, struct file *filp)
 {
        struct pipe_inode_info *pipe;
        bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
-       int kill = 0;
        int ret;
 
        filp->f_version = 0;
@@ -1130,15 +1184,9 @@ err_wr:
        goto err;
 
 err:
-       spin_lock(&inode->i_lock);
-       if (!--pipe->files) {
-               inode->i_pipe = NULL;
-               kill = 1;
-       }
-       spin_unlock(&inode->i_lock);
        __pipe_unlock(pipe);
-       if (kill)
-               free_pipe_info(pipe);
+
+       put_pipe_info(inode, pipe);
        return ret;
 }
 
@@ -1197,6 +1245,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
                        memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
        }
 
+       account_pipe_buffers(pipe, pipe->buffers, nr_pages);
        pipe->curbuf = 0;
        kfree(pipe->bufs);
        pipe->bufs = bufs;
@@ -1268,6 +1317,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
                if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
                        ret = -EPERM;
                        goto out;
+               } else if ((too_many_pipe_buffers_hard(pipe->user) ||
+                           too_many_pipe_buffers_soft(pipe->user)) &&
+                          !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
+                       ret = -EPERM;
+                       goto out;
                }
                ret = pipe_set_size(pipe, nr_pages);
                break;