signalfd: fix information leak in signalfd_copyinfo
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / splice.c
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
14 *
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18 *
19 */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm_inline.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/export.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31 #include <linux/security.h>
32 #include <linux/gfp.h>
33 #include <linux/socket.h>
34 #include <linux/compat.h>
35 #include "internal.h"
36
37 /*
38 * Attempt to steal a page from a pipe buffer. This should perhaps go into
39 * a vm helper function, it's already simplified quite a bit by the
40 * addition of remove_mapping(). If success is returned, the caller may
41 * attempt to reuse this page for another destination.
42 */
43 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
44 struct pipe_buffer *buf)
45 {
46 struct page *page = buf->page;
47 struct address_space *mapping;
48
49 lock_page(page);
50
51 mapping = page_mapping(page);
52 if (mapping) {
53 WARN_ON(!PageUptodate(page));
54
55 /*
56 * At least for ext2 with nobh option, we need to wait on
57 * writeback completing on this page, since we'll remove it
58 * from the pagecache. Otherwise truncate wont wait on the
59 * page, allowing the disk blocks to be reused by someone else
60 * before we actually wrote our data to them. fs corruption
61 * ensues.
62 */
63 wait_on_page_writeback(page);
64
65 if (page_has_private(page) &&
66 !try_to_release_page(page, GFP_KERNEL))
67 goto out_unlock;
68
69 /*
70 * If we succeeded in removing the mapping, set LRU flag
71 * and return good.
72 */
73 if (remove_mapping(mapping, page)) {
74 buf->flags |= PIPE_BUF_FLAG_LRU;
75 return 0;
76 }
77 }
78
79 /*
80 * Raced with truncate or failed to remove page from current
81 * address space, unlock and return failure.
82 */
83 out_unlock:
84 unlock_page(page);
85 return 1;
86 }
87
88 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf)
90 {
91 page_cache_release(buf->page);
92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
93 }
94
95 /*
96 * Check whether the contents of buf is OK to access. Since the content
97 * is a page cache page, IO may be in flight.
98 */
99 static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
100 struct pipe_buffer *buf)
101 {
102 struct page *page = buf->page;
103 int err;
104
105 if (!PageUptodate(page)) {
106 lock_page(page);
107
108 /*
109 * Page got truncated/unhashed. This will cause a 0-byte
110 * splice, if this is the first page.
111 */
112 if (!page->mapping) {
113 err = -ENODATA;
114 goto error;
115 }
116
117 /*
118 * Uh oh, read-error from disk.
119 */
120 if (!PageUptodate(page)) {
121 err = -EIO;
122 goto error;
123 }
124
125 /*
126 * Page is ok afterall, we are done.
127 */
128 unlock_page(page);
129 }
130
131 return 0;
132 error:
133 unlock_page(page);
134 return err;
135 }
136
137 const struct pipe_buf_operations page_cache_pipe_buf_ops = {
138 .can_merge = 0,
139 .map = generic_pipe_buf_map,
140 .unmap = generic_pipe_buf_unmap,
141 .confirm = page_cache_pipe_buf_confirm,
142 .release = page_cache_pipe_buf_release,
143 .steal = page_cache_pipe_buf_steal,
144 .get = generic_pipe_buf_get,
145 };
146
147 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
148 struct pipe_buffer *buf)
149 {
150 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
151 return 1;
152
153 buf->flags |= PIPE_BUF_FLAG_LRU;
154 return generic_pipe_buf_steal(pipe, buf);
155 }
156
157 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
158 .can_merge = 0,
159 .map = generic_pipe_buf_map,
160 .unmap = generic_pipe_buf_unmap,
161 .confirm = generic_pipe_buf_confirm,
162 .release = page_cache_pipe_buf_release,
163 .steal = user_page_pipe_buf_steal,
164 .get = generic_pipe_buf_get,
165 };
166
167 static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
168 {
169 smp_mb();
170 if (waitqueue_active(&pipe->wait))
171 wake_up_interruptible(&pipe->wait);
172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
173 }
174
175 /**
176 * splice_to_pipe - fill passed data into a pipe
177 * @pipe: pipe to fill
178 * @spd: data to fill
179 *
180 * Description:
181 * @spd contains a map of pages and len/offset tuples, along with
182 * the struct pipe_buf_operations associated with these pages. This
183 * function will link that data to the pipe.
184 *
185 */
186 ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
187 struct splice_pipe_desc *spd)
188 {
189 unsigned int spd_pages = spd->nr_pages;
190 int ret, do_wakeup, page_nr;
191
192 ret = 0;
193 do_wakeup = 0;
194 page_nr = 0;
195
196 pipe_lock(pipe);
197
198 for (;;) {
199 if (!pipe->readers) {
200 send_sig(SIGPIPE, current, 0);
201 if (!ret)
202 ret = -EPIPE;
203 break;
204 }
205
206 if (pipe->nrbufs < pipe->buffers) {
207 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
208 struct pipe_buffer *buf = pipe->bufs + newbuf;
209
210 buf->page = spd->pages[page_nr];
211 buf->offset = spd->partial[page_nr].offset;
212 buf->len = spd->partial[page_nr].len;
213 buf->private = spd->partial[page_nr].private;
214 buf->ops = spd->ops;
215 if (spd->flags & SPLICE_F_GIFT)
216 buf->flags |= PIPE_BUF_FLAG_GIFT;
217
218 pipe->nrbufs++;
219 page_nr++;
220 ret += buf->len;
221
222 if (pipe->files)
223 do_wakeup = 1;
224
225 if (!--spd->nr_pages)
226 break;
227 if (pipe->nrbufs < pipe->buffers)
228 continue;
229
230 break;
231 }
232
233 if (spd->flags & SPLICE_F_NONBLOCK) {
234 if (!ret)
235 ret = -EAGAIN;
236 break;
237 }
238
239 if (signal_pending(current)) {
240 if (!ret)
241 ret = -ERESTARTSYS;
242 break;
243 }
244
245 if (do_wakeup) {
246 smp_mb();
247 if (waitqueue_active(&pipe->wait))
248 wake_up_interruptible_sync(&pipe->wait);
249 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
250 do_wakeup = 0;
251 }
252
253 pipe->waiting_writers++;
254 pipe_wait(pipe);
255 pipe->waiting_writers--;
256 }
257
258 pipe_unlock(pipe);
259
260 if (do_wakeup)
261 wakeup_pipe_readers(pipe);
262
263 while (page_nr < spd_pages)
264 spd->spd_release(spd, page_nr++);
265
266 return ret;
267 }
268
269 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
270 {
271 page_cache_release(spd->pages[i]);
272 }
273
274 /*
275 * Check if we need to grow the arrays holding pages and partial page
276 * descriptions.
277 */
278 int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
279 {
280 unsigned int buffers = ACCESS_ONCE(pipe->buffers);
281
282 spd->nr_pages_max = buffers;
283 if (buffers <= PIPE_DEF_BUFFERS)
284 return 0;
285
286 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
287 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
288
289 if (spd->pages && spd->partial)
290 return 0;
291
292 kfree(spd->pages);
293 kfree(spd->partial);
294 return -ENOMEM;
295 }
296
297 void splice_shrink_spd(struct splice_pipe_desc *spd)
298 {
299 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
300 return;
301
302 kfree(spd->pages);
303 kfree(spd->partial);
304 }
305
306 static int
307 __generic_file_splice_read(struct file *in, loff_t *ppos,
308 struct pipe_inode_info *pipe, size_t len,
309 unsigned int flags)
310 {
311 struct address_space *mapping = in->f_mapping;
312 unsigned int loff, nr_pages, req_pages;
313 struct page *pages[PIPE_DEF_BUFFERS];
314 struct partial_page partial[PIPE_DEF_BUFFERS];
315 struct page *page;
316 pgoff_t index, end_index;
317 loff_t isize;
318 int error, page_nr;
319 struct splice_pipe_desc spd = {
320 .pages = pages,
321 .partial = partial,
322 .nr_pages_max = PIPE_DEF_BUFFERS,
323 .flags = flags,
324 .ops = &page_cache_pipe_buf_ops,
325 .spd_release = spd_release_page,
326 };
327
328 if (splice_grow_spd(pipe, &spd))
329 return -ENOMEM;
330
331 index = *ppos >> PAGE_CACHE_SHIFT;
332 loff = *ppos & ~PAGE_CACHE_MASK;
333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
334 nr_pages = min(req_pages, spd.nr_pages_max);
335
336 /*
337 * Lookup the (hopefully) full range of pages we need.
338 */
339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
340 index += spd.nr_pages;
341
342 /*
343 * If find_get_pages_contig() returned fewer pages than we needed,
344 * readahead/allocate the rest and fill in the holes.
345 */
346 if (spd.nr_pages < nr_pages)
347 page_cache_sync_readahead(mapping, &in->f_ra, in,
348 index, req_pages - spd.nr_pages);
349
350 error = 0;
351 while (spd.nr_pages < nr_pages) {
352 /*
353 * Page could be there, find_get_pages_contig() breaks on
354 * the first hole.
355 */
356 page = find_get_page(mapping, index);
357 if (!page) {
358 /*
359 * page didn't exist, allocate one.
360 */
361 page = page_cache_alloc_cold(mapping);
362 if (!page)
363 break;
364
365 error = add_to_page_cache_lru(page, mapping, index,
366 GFP_KERNEL);
367 if (unlikely(error)) {
368 page_cache_release(page);
369 if (error == -EEXIST)
370 continue;
371 break;
372 }
373 /*
374 * add_to_page_cache() locks the page, unlock it
375 * to avoid convoluting the logic below even more.
376 */
377 unlock_page(page);
378 }
379
380 spd.pages[spd.nr_pages++] = page;
381 index++;
382 }
383
384 /*
385 * Now loop over the map and see if we need to start IO on any
386 * pages, fill in the partial map, etc.
387 */
388 index = *ppos >> PAGE_CACHE_SHIFT;
389 nr_pages = spd.nr_pages;
390 spd.nr_pages = 0;
391 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
392 unsigned int this_len;
393
394 if (!len)
395 break;
396
397 /*
398 * this_len is the max we'll use from this page
399 */
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
401 page = spd.pages[page_nr];
402
403 if (PageReadahead(page))
404 page_cache_async_readahead(mapping, &in->f_ra, in,
405 page, index, req_pages - page_nr);
406
407 /*
408 * If the page isn't uptodate, we may need to start io on it
409 */
410 if (!PageUptodate(page)) {
411 lock_page(page);
412
413 /*
414 * Page was truncated, or invalidated by the
415 * filesystem. Redo the find/create, but this time the
416 * page is kept locked, so there's no chance of another
417 * race with truncate/invalidate.
418 */
419 if (!page->mapping) {
420 unlock_page(page);
421 page = find_or_create_page(mapping, index,
422 mapping_gfp_mask(mapping));
423
424 if (!page) {
425 error = -ENOMEM;
426 break;
427 }
428 page_cache_release(spd.pages[page_nr]);
429 spd.pages[page_nr] = page;
430 }
431 /*
432 * page was already under io and is now done, great
433 */
434 if (PageUptodate(page)) {
435 unlock_page(page);
436 goto fill_it;
437 }
438
439 /*
440 * need to read in the page
441 */
442 error = mapping->a_ops->readpage(in, page);
443 if (unlikely(error)) {
444 /*
445 * We really should re-lookup the page here,
446 * but it complicates things a lot. Instead
447 * lets just do what we already stored, and
448 * we'll get it the next time we are called.
449 */
450 if (error == AOP_TRUNCATED_PAGE)
451 error = 0;
452
453 break;
454 }
455 }
456 fill_it:
457 /*
458 * i_size must be checked after PageUptodate.
459 */
460 isize = i_size_read(mapping->host);
461 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
462 if (unlikely(!isize || index > end_index))
463 break;
464
465 /*
466 * if this is the last page, see if we need to shrink
467 * the length and stop
468 */
469 if (end_index == index) {
470 unsigned int plen;
471
472 /*
473 * max good bytes in this page
474 */
475 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
476 if (plen <= loff)
477 break;
478
479 /*
480 * force quit after adding this page
481 */
482 this_len = min(this_len, plen - loff);
483 len = this_len;
484 }
485
486 spd.partial[page_nr].offset = loff;
487 spd.partial[page_nr].len = this_len;
488 len -= this_len;
489 loff = 0;
490 spd.nr_pages++;
491 index++;
492 }
493
494 /*
495 * Release any pages at the end, if we quit early. 'page_nr' is how far
496 * we got, 'nr_pages' is how many pages are in the map.
497 */
498 while (page_nr < nr_pages)
499 page_cache_release(spd.pages[page_nr++]);
500 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
501
502 if (spd.nr_pages)
503 error = splice_to_pipe(pipe, &spd);
504
505 splice_shrink_spd(&spd);
506 return error;
507 }
508
509 /**
510 * generic_file_splice_read - splice data from file to a pipe
511 * @in: file to splice from
512 * @ppos: position in @in
513 * @pipe: pipe to splice to
514 * @len: number of bytes to splice
515 * @flags: splice modifier flags
516 *
517 * Description:
518 * Will read pages from given file and fill them into a pipe. Can be
519 * used as long as the address_space operations for the source implements
520 * a readpage() hook.
521 *
522 */
523 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
524 struct pipe_inode_info *pipe, size_t len,
525 unsigned int flags)
526 {
527 loff_t isize, left;
528 int ret;
529
530 isize = i_size_read(in->f_mapping->host);
531 if (unlikely(*ppos >= isize))
532 return 0;
533
534 left = isize - *ppos;
535 if (unlikely(left < len))
536 len = left;
537
538 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
539 if (ret > 0) {
540 *ppos += ret;
541 file_accessed(in);
542 }
543
544 return ret;
545 }
546 EXPORT_SYMBOL(generic_file_splice_read);
547
548 static const struct pipe_buf_operations default_pipe_buf_ops = {
549 .can_merge = 0,
550 .map = generic_pipe_buf_map,
551 .unmap = generic_pipe_buf_unmap,
552 .confirm = generic_pipe_buf_confirm,
553 .release = generic_pipe_buf_release,
554 .steal = generic_pipe_buf_steal,
555 .get = generic_pipe_buf_get,
556 };
557
558 static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
559 struct pipe_buffer *buf)
560 {
561 return 1;
562 }
563
564 /* Pipe buffer operations for a socket and similar. */
565 const struct pipe_buf_operations nosteal_pipe_buf_ops = {
566 .can_merge = 0,
567 .map = generic_pipe_buf_map,
568 .unmap = generic_pipe_buf_unmap,
569 .confirm = generic_pipe_buf_confirm,
570 .release = generic_pipe_buf_release,
571 .steal = generic_pipe_buf_nosteal,
572 .get = generic_pipe_buf_get,
573 };
574 EXPORT_SYMBOL(nosteal_pipe_buf_ops);
575
576 static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
577 unsigned long vlen, loff_t offset)
578 {
579 mm_segment_t old_fs;
580 loff_t pos = offset;
581 ssize_t res;
582
583 old_fs = get_fs();
584 set_fs(get_ds());
585 /* The cast to a user pointer is valid due to the set_fs() */
586 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
587 set_fs(old_fs);
588
589 return res;
590 }
591
592 ssize_t kernel_write(struct file *file, const char *buf, size_t count,
593 loff_t pos)
594 {
595 mm_segment_t old_fs;
596 ssize_t res;
597
598 old_fs = get_fs();
599 set_fs(get_ds());
600 /* The cast to a user pointer is valid due to the set_fs() */
601 res = vfs_write(file, (__force const char __user *)buf, count, &pos);
602 set_fs(old_fs);
603
604 return res;
605 }
606 EXPORT_SYMBOL(kernel_write);
607
608 ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
609 struct pipe_inode_info *pipe, size_t len,
610 unsigned int flags)
611 {
612 unsigned int nr_pages;
613 unsigned int nr_freed;
614 size_t offset;
615 struct page *pages[PIPE_DEF_BUFFERS];
616 struct partial_page partial[PIPE_DEF_BUFFERS];
617 struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
618 ssize_t res;
619 size_t this_len;
620 int error;
621 int i;
622 struct splice_pipe_desc spd = {
623 .pages = pages,
624 .partial = partial,
625 .nr_pages_max = PIPE_DEF_BUFFERS,
626 .flags = flags,
627 .ops = &default_pipe_buf_ops,
628 .spd_release = spd_release_page,
629 };
630
631 if (splice_grow_spd(pipe, &spd))
632 return -ENOMEM;
633
634 res = -ENOMEM;
635 vec = __vec;
636 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
637 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
638 if (!vec)
639 goto shrink_ret;
640 }
641
642 offset = *ppos & ~PAGE_CACHE_MASK;
643 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
644
645 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
646 struct page *page;
647
648 page = alloc_page(GFP_USER);
649 error = -ENOMEM;
650 if (!page)
651 goto err;
652
653 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
654 vec[i].iov_base = (void __user *) page_address(page);
655 vec[i].iov_len = this_len;
656 spd.pages[i] = page;
657 spd.nr_pages++;
658 len -= this_len;
659 offset = 0;
660 }
661
662 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
663 if (res < 0) {
664 error = res;
665 goto err;
666 }
667
668 error = 0;
669 if (!res)
670 goto err;
671
672 nr_freed = 0;
673 for (i = 0; i < spd.nr_pages; i++) {
674 this_len = min_t(size_t, vec[i].iov_len, res);
675 spd.partial[i].offset = 0;
676 spd.partial[i].len = this_len;
677 if (!this_len) {
678 __free_page(spd.pages[i]);
679 spd.pages[i] = NULL;
680 nr_freed++;
681 }
682 res -= this_len;
683 }
684 spd.nr_pages -= nr_freed;
685
686 res = splice_to_pipe(pipe, &spd);
687 if (res > 0)
688 *ppos += res;
689
690 shrink_ret:
691 if (vec != __vec)
692 kfree(vec);
693 splice_shrink_spd(&spd);
694 return res;
695
696 err:
697 for (i = 0; i < spd.nr_pages; i++)
698 __free_page(spd.pages[i]);
699
700 res = error;
701 goto shrink_ret;
702 }
703 EXPORT_SYMBOL(default_file_splice_read);
704
705 /*
706 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
707 * using sendpage(). Return the number of bytes sent.
708 */
709 static int pipe_to_sendpage(struct pipe_inode_info *pipe,
710 struct pipe_buffer *buf, struct splice_desc *sd)
711 {
712 struct file *file = sd->u.file;
713 loff_t pos = sd->pos;
714 int more;
715
716 if (!likely(file->f_op && file->f_op->sendpage))
717 return -EINVAL;
718
719 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
720
721 if (sd->len < sd->total_len && pipe->nrbufs > 1)
722 more |= MSG_SENDPAGE_NOTLAST;
723
724 return file->f_op->sendpage(file, buf->page, buf->offset,
725 sd->len, &pos, more);
726 }
727
728 /*
729 * This is a little more tricky than the file -> pipe splicing. There are
730 * basically three cases:
731 *
732 * - Destination page already exists in the address space and there
733 * are users of it. For that case we have no other option that
734 * copying the data. Tough luck.
735 * - Destination page already exists in the address space, but there
736 * are no users of it. Make sure it's uptodate, then drop it. Fall
737 * through to last case.
738 * - Destination page does not exist, we can add the pipe page to
739 * the page cache and avoid the copy.
740 *
741 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
742 * sd->flags), we attempt to migrate pages from the pipe to the output
743 * file address space page cache. This is possible if no one else has
744 * the pipe page referenced outside of the pipe and page cache. If
745 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
746 * a new page in the output file page cache and fill/dirty that.
747 */
748 int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
749 struct splice_desc *sd)
750 {
751 struct file *file = sd->u.file;
752 struct address_space *mapping = file->f_mapping;
753 unsigned int offset, this_len;
754 struct page *page;
755 void *fsdata;
756 int ret;
757
758 offset = sd->pos & ~PAGE_CACHE_MASK;
759
760 this_len = sd->len;
761 if (this_len + offset > PAGE_CACHE_SIZE)
762 this_len = PAGE_CACHE_SIZE - offset;
763
764 ret = pagecache_write_begin(file, mapping, sd->pos, this_len,
765 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
766 if (unlikely(ret))
767 goto out;
768
769 if (buf->page != page) {
770 char *src = buf->ops->map(pipe, buf, 1);
771 char *dst = kmap_atomic(page);
772
773 memcpy(dst + offset, src + buf->offset, this_len);
774 flush_dcache_page(page);
775 kunmap_atomic(dst);
776 buf->ops->unmap(pipe, buf, src);
777 }
778 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
779 page, fsdata);
780 out:
781 return ret;
782 }
783 EXPORT_SYMBOL(pipe_to_file);
784
785 static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
786 {
787 smp_mb();
788 if (waitqueue_active(&pipe->wait))
789 wake_up_interruptible(&pipe->wait);
790 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
791 }
792
793 /**
794 * splice_from_pipe_feed - feed available data from a pipe to a file
795 * @pipe: pipe to splice from
796 * @sd: information to @actor
797 * @actor: handler that splices the data
798 *
799 * Description:
800 * This function loops over the pipe and calls @actor to do the
801 * actual moving of a single struct pipe_buffer to the desired
802 * destination. It returns when there's no more buffers left in
803 * the pipe or if the requested number of bytes (@sd->total_len)
804 * have been copied. It returns a positive number (one) if the
805 * pipe needs to be filled with more data, zero if the required
806 * number of bytes have been copied and -errno on error.
807 *
808 * This, together with splice_from_pipe_{begin,end,next}, may be
809 * used to implement the functionality of __splice_from_pipe() when
810 * locking is required around copying the pipe buffers to the
811 * destination.
812 */
813 int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
814 splice_actor *actor)
815 {
816 int ret;
817
818 while (pipe->nrbufs) {
819 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
820 const struct pipe_buf_operations *ops = buf->ops;
821
822 sd->len = buf->len;
823 if (sd->len > sd->total_len)
824 sd->len = sd->total_len;
825
826 ret = buf->ops->confirm(pipe, buf);
827 if (unlikely(ret)) {
828 if (ret == -ENODATA)
829 ret = 0;
830 return ret;
831 }
832
833 ret = actor(pipe, buf, sd);
834 if (ret <= 0)
835 return ret;
836
837 buf->offset += ret;
838 buf->len -= ret;
839
840 sd->num_spliced += ret;
841 sd->len -= ret;
842 sd->pos += ret;
843 sd->total_len -= ret;
844
845 if (!buf->len) {
846 buf->ops = NULL;
847 ops->release(pipe, buf);
848 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
849 pipe->nrbufs--;
850 if (pipe->files)
851 sd->need_wakeup = true;
852 }
853
854 if (!sd->total_len)
855 return 0;
856 }
857
858 return 1;
859 }
860 EXPORT_SYMBOL(splice_from_pipe_feed);
861
862 /**
863 * splice_from_pipe_next - wait for some data to splice from
864 * @pipe: pipe to splice from
865 * @sd: information about the splice operation
866 *
867 * Description:
868 * This function will wait for some data and return a positive
869 * value (one) if pipe buffers are available. It will return zero
870 * or -errno if no more data needs to be spliced.
871 */
872 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
873 {
874 while (!pipe->nrbufs) {
875 if (!pipe->writers)
876 return 0;
877
878 if (!pipe->waiting_writers && sd->num_spliced)
879 return 0;
880
881 if (sd->flags & SPLICE_F_NONBLOCK)
882 return -EAGAIN;
883
884 if (signal_pending(current))
885 return -ERESTARTSYS;
886
887 if (sd->need_wakeup) {
888 wakeup_pipe_writers(pipe);
889 sd->need_wakeup = false;
890 }
891
892 pipe_wait(pipe);
893 }
894
895 return 1;
896 }
897 EXPORT_SYMBOL(splice_from_pipe_next);
898
899 /**
900 * splice_from_pipe_begin - start splicing from pipe
901 * @sd: information about the splice operation
902 *
903 * Description:
904 * This function should be called before a loop containing
905 * splice_from_pipe_next() and splice_from_pipe_feed() to
906 * initialize the necessary fields of @sd.
907 */
908 void splice_from_pipe_begin(struct splice_desc *sd)
909 {
910 sd->num_spliced = 0;
911 sd->need_wakeup = false;
912 }
913 EXPORT_SYMBOL(splice_from_pipe_begin);
914
915 /**
916 * splice_from_pipe_end - finish splicing from pipe
917 * @pipe: pipe to splice from
918 * @sd: information about the splice operation
919 *
920 * Description:
921 * This function will wake up pipe writers if necessary. It should
922 * be called after a loop containing splice_from_pipe_next() and
923 * splice_from_pipe_feed().
924 */
925 void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
926 {
927 if (sd->need_wakeup)
928 wakeup_pipe_writers(pipe);
929 }
930 EXPORT_SYMBOL(splice_from_pipe_end);
931
932 /**
933 * __splice_from_pipe - splice data from a pipe to given actor
934 * @pipe: pipe to splice from
935 * @sd: information to @actor
936 * @actor: handler that splices the data
937 *
938 * Description:
939 * This function does little more than loop over the pipe and call
940 * @actor to do the actual moving of a single struct pipe_buffer to
941 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
942 * pipe_to_user.
943 *
944 */
945 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
946 splice_actor *actor)
947 {
948 int ret;
949
950 splice_from_pipe_begin(sd);
951 do {
952 ret = splice_from_pipe_next(pipe, sd);
953 if (ret > 0)
954 ret = splice_from_pipe_feed(pipe, sd, actor);
955 } while (ret > 0);
956 splice_from_pipe_end(pipe, sd);
957
958 return sd->num_spliced ? sd->num_spliced : ret;
959 }
960 EXPORT_SYMBOL(__splice_from_pipe);
961
962 /**
963 * splice_from_pipe - splice data from a pipe to a file
964 * @pipe: pipe to splice from
965 * @out: file to splice to
966 * @ppos: position in @out
967 * @len: how many bytes to splice
968 * @flags: splice modifier flags
969 * @actor: handler that splices the data
970 *
971 * Description:
972 * See __splice_from_pipe. This function locks the pipe inode,
973 * otherwise it's identical to __splice_from_pipe().
974 *
975 */
976 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
977 loff_t *ppos, size_t len, unsigned int flags,
978 splice_actor *actor)
979 {
980 ssize_t ret;
981 struct splice_desc sd = {
982 .total_len = len,
983 .flags = flags,
984 .pos = *ppos,
985 .u.file = out,
986 };
987
988 pipe_lock(pipe);
989 ret = __splice_from_pipe(pipe, &sd, actor);
990 pipe_unlock(pipe);
991
992 return ret;
993 }
994
995 /**
996 * generic_file_splice_write - splice data from a pipe to a file
997 * @pipe: pipe info
998 * @out: file to write to
999 * @ppos: position in @out
1000 * @len: number of bytes to splice
1001 * @flags: splice modifier flags
1002 *
1003 * Description:
1004 * Will either move or copy pages (determined by @flags options) from
1005 * the given pipe inode to the given file.
1006 *
1007 */
1008 ssize_t
1009 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1010 loff_t *ppos, size_t len, unsigned int flags)
1011 {
1012 struct address_space *mapping = out->f_mapping;
1013 struct inode *inode = mapping->host;
1014 struct splice_desc sd = {
1015 .flags = flags,
1016 .u.file = out,
1017 };
1018 ssize_t ret;
1019
1020 ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
1021 if (ret)
1022 return ret;
1023 sd.total_len = len;
1024 sd.pos = *ppos;
1025
1026 pipe_lock(pipe);
1027
1028 splice_from_pipe_begin(&sd);
1029 do {
1030 ret = splice_from_pipe_next(pipe, &sd);
1031 if (ret <= 0)
1032 break;
1033
1034 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1035 ret = file_remove_suid(out);
1036 if (!ret) {
1037 ret = file_update_time(out);
1038 if (!ret)
1039 ret = splice_from_pipe_feed(pipe, &sd,
1040 pipe_to_file);
1041 }
1042 mutex_unlock(&inode->i_mutex);
1043 } while (ret > 0);
1044 splice_from_pipe_end(pipe, &sd);
1045
1046 pipe_unlock(pipe);
1047
1048 if (sd.num_spliced)
1049 ret = sd.num_spliced;
1050
1051 if (ret > 0) {
1052 int err;
1053
1054 err = generic_write_sync(out, *ppos, ret);
1055 if (err)
1056 ret = err;
1057 else
1058 *ppos += ret;
1059 balance_dirty_pages_ratelimited(mapping);
1060 }
1061
1062 return ret;
1063 }
1064
1065 EXPORT_SYMBOL(generic_file_splice_write);
1066
1067 static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1068 struct splice_desc *sd)
1069 {
1070 int ret;
1071 void *data;
1072 loff_t tmp = sd->pos;
1073
1074 data = buf->ops->map(pipe, buf, 0);
1075 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1076 buf->ops->unmap(pipe, buf, data);
1077
1078 return ret;
1079 }
1080
1081 static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1082 struct file *out, loff_t *ppos,
1083 size_t len, unsigned int flags)
1084 {
1085 ssize_t ret;
1086
1087 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
1088 if (ret > 0)
1089 *ppos += ret;
1090
1091 return ret;
1092 }
1093
1094 /**
1095 * generic_splice_sendpage - splice data from a pipe to a socket
1096 * @pipe: pipe to splice from
1097 * @out: socket to write to
1098 * @ppos: position in @out
1099 * @len: number of bytes to splice
1100 * @flags: splice modifier flags
1101 *
1102 * Description:
1103 * Will send @len bytes from the pipe to a network socket. No data copying
1104 * is involved.
1105 *
1106 */
1107 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
1108 loff_t *ppos, size_t len, unsigned int flags)
1109 {
1110 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
1111 }
1112
1113 EXPORT_SYMBOL(generic_splice_sendpage);
1114
1115 /*
1116 * Attempt to initiate a splice from pipe to file.
1117 */
1118 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
1119 loff_t *ppos, size_t len, unsigned int flags)
1120 {
1121 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1122 loff_t *, size_t, unsigned int);
1123 int ret;
1124
1125 if (unlikely(!(out->f_mode & FMODE_WRITE)))
1126 return -EBADF;
1127
1128 if (unlikely(out->f_flags & O_APPEND))
1129 return -EINVAL;
1130
1131 ret = rw_verify_area(WRITE, out, ppos, len);
1132 if (unlikely(ret < 0))
1133 return ret;
1134
1135 if (out->f_op && out->f_op->splice_write)
1136 splice_write = out->f_op->splice_write;
1137 else
1138 splice_write = default_file_splice_write;
1139
1140 file_start_write(out);
1141 ret = splice_write(pipe, out, ppos, len, flags);
1142 file_end_write(out);
1143 return ret;
1144 }
1145
1146 /*
1147 * Attempt to initiate a splice from a file to a pipe.
1148 */
1149 static long do_splice_to(struct file *in, loff_t *ppos,
1150 struct pipe_inode_info *pipe, size_t len,
1151 unsigned int flags)
1152 {
1153 ssize_t (*splice_read)(struct file *, loff_t *,
1154 struct pipe_inode_info *, size_t, unsigned int);
1155 int ret;
1156
1157 if (unlikely(!(in->f_mode & FMODE_READ)))
1158 return -EBADF;
1159
1160 ret = rw_verify_area(READ, in, ppos, len);
1161 if (unlikely(ret < 0))
1162 return ret;
1163
1164 if (in->f_op && in->f_op->splice_read)
1165 splice_read = in->f_op->splice_read;
1166 else
1167 splice_read = default_file_splice_read;
1168
1169 return splice_read(in, ppos, pipe, len, flags);
1170 }
1171
1172 /**
1173 * splice_direct_to_actor - splices data directly between two non-pipes
1174 * @in: file to splice from
1175 * @sd: actor information on where to splice to
1176 * @actor: handles the data splicing
1177 *
1178 * Description:
1179 * This is a special case helper to splice directly between two
1180 * points, without requiring an explicit pipe. Internally an allocated
1181 * pipe is cached in the process, and reused during the lifetime of
1182 * that process.
1183 *
1184 */
1185 ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1186 splice_direct_actor *actor)
1187 {
1188 struct pipe_inode_info *pipe;
1189 long ret, bytes;
1190 umode_t i_mode;
1191 size_t len;
1192 int i, flags;
1193
1194 /*
1195 * We require the input being a regular file, as we don't want to
1196 * randomly drop data for eg socket -> socket splicing. Use the
1197 * piped splicing for that!
1198 */
1199 i_mode = file_inode(in)->i_mode;
1200 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
1201 return -EINVAL;
1202
1203 /*
1204 * neither in nor out is a pipe, setup an internal pipe attached to
1205 * 'out' and transfer the wanted data from 'in' to 'out' through that
1206 */
1207 pipe = current->splice_pipe;
1208 if (unlikely(!pipe)) {
1209 pipe = alloc_pipe_info();
1210 if (!pipe)
1211 return -ENOMEM;
1212
1213 /*
1214 * We don't have an immediate reader, but we'll read the stuff
1215 * out of the pipe right after the splice_to_pipe(). So set
1216 * PIPE_READERS appropriately.
1217 */
1218 pipe->readers = 1;
1219
1220 current->splice_pipe = pipe;
1221 }
1222
1223 /*
1224 * Do the splice.
1225 */
1226 ret = 0;
1227 bytes = 0;
1228 len = sd->total_len;
1229 flags = sd->flags;
1230
1231 /*
1232 * Don't block on output, we have to drain the direct pipe.
1233 */
1234 sd->flags &= ~SPLICE_F_NONBLOCK;
1235
1236 while (len) {
1237 size_t read_len;
1238 loff_t pos = sd->pos, prev_pos = pos;
1239
1240 ret = do_splice_to(in, &pos, pipe, len, flags);
1241 if (unlikely(ret <= 0))
1242 goto out_release;
1243
1244 read_len = ret;
1245 sd->total_len = read_len;
1246
1247 /*
1248 * NOTE: nonblocking mode only applies to the input. We
1249 * must not do the output in nonblocking mode as then we
1250 * could get stuck data in the internal pipe:
1251 */
1252 ret = actor(pipe, sd);
1253 if (unlikely(ret <= 0)) {
1254 sd->pos = prev_pos;
1255 goto out_release;
1256 }
1257
1258 bytes += ret;
1259 len -= ret;
1260 sd->pos = pos;
1261
1262 if (ret < read_len) {
1263 sd->pos = prev_pos + ret;
1264 goto out_release;
1265 }
1266 }
1267
1268 done:
1269 pipe->nrbufs = pipe->curbuf = 0;
1270 file_accessed(in);
1271 return bytes;
1272
1273 out_release:
1274 /*
1275 * If we did an incomplete transfer we must release
1276 * the pipe buffers in question:
1277 */
1278 for (i = 0; i < pipe->buffers; i++) {
1279 struct pipe_buffer *buf = pipe->bufs + i;
1280
1281 if (buf->ops) {
1282 buf->ops->release(pipe, buf);
1283 buf->ops = NULL;
1284 }
1285 }
1286
1287 if (!bytes)
1288 bytes = ret;
1289
1290 goto done;
1291 }
1292 EXPORT_SYMBOL(splice_direct_to_actor);
1293
1294 static int direct_splice_actor(struct pipe_inode_info *pipe,
1295 struct splice_desc *sd)
1296 {
1297 struct file *file = sd->u.file;
1298
1299 return do_splice_from(pipe, file, sd->opos, sd->total_len,
1300 sd->flags);
1301 }
1302
1303 /**
1304 * do_splice_direct - splices data directly between two files
1305 * @in: file to splice from
1306 * @ppos: input file offset
1307 * @out: file to splice to
1308 * @opos: output file offset
1309 * @len: number of bytes to splice
1310 * @flags: splice modifier flags
1311 *
1312 * Description:
1313 * For use by do_sendfile(). splice can easily emulate sendfile, but
1314 * doing it in the application would incur an extra system call
1315 * (splice in + splice out, as compared to just sendfile()). So this helper
1316 * can splice directly through a process-private pipe.
1317 *
1318 */
1319 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1320 loff_t *opos, size_t len, unsigned int flags)
1321 {
1322 struct splice_desc sd = {
1323 .len = len,
1324 .total_len = len,
1325 .flags = flags,
1326 .pos = *ppos,
1327 .u.file = out,
1328 .opos = opos,
1329 };
1330 long ret;
1331
1332 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
1333 if (ret > 0)
1334 *ppos = sd.pos;
1335
1336 return ret;
1337 }
1338
1339 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1340 struct pipe_inode_info *opipe,
1341 size_t len, unsigned int flags);
1342
1343 /*
1344 * Determine where to splice to/from.
1345 */
1346 static long do_splice(struct file *in, loff_t __user *off_in,
1347 struct file *out, loff_t __user *off_out,
1348 size_t len, unsigned int flags)
1349 {
1350 struct pipe_inode_info *ipipe;
1351 struct pipe_inode_info *opipe;
1352 loff_t offset;
1353 long ret;
1354
1355 ipipe = get_pipe_info(in);
1356 opipe = get_pipe_info(out);
1357
1358 if (ipipe && opipe) {
1359 if (off_in || off_out)
1360 return -ESPIPE;
1361
1362 if (!(in->f_mode & FMODE_READ))
1363 return -EBADF;
1364
1365 if (!(out->f_mode & FMODE_WRITE))
1366 return -EBADF;
1367
1368 /* Splicing to self would be fun, but... */
1369 if (ipipe == opipe)
1370 return -EINVAL;
1371
1372 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1373 }
1374
1375 if (ipipe) {
1376 if (off_in)
1377 return -ESPIPE;
1378 if (off_out) {
1379 if (!(out->f_mode & FMODE_PWRITE))
1380 return -EINVAL;
1381 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1382 return -EFAULT;
1383 } else {
1384 offset = out->f_pos;
1385 }
1386
1387 ret = do_splice_from(ipipe, out, &offset, len, flags);
1388
1389 if (!off_out)
1390 out->f_pos = offset;
1391 else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
1392 ret = -EFAULT;
1393
1394 return ret;
1395 }
1396
1397 if (opipe) {
1398 if (off_out)
1399 return -ESPIPE;
1400 if (off_in) {
1401 if (!(in->f_mode & FMODE_PREAD))
1402 return -EINVAL;
1403 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1404 return -EFAULT;
1405 } else {
1406 offset = in->f_pos;
1407 }
1408
1409 ret = do_splice_to(in, &offset, opipe, len, flags);
1410
1411 if (!off_in)
1412 in->f_pos = offset;
1413 else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
1414 ret = -EFAULT;
1415
1416 return ret;
1417 }
1418
1419 return -EINVAL;
1420 }
1421
1422 /*
1423 * Map an iov into an array of pages and offset/length tupples. With the
1424 * partial_page structure, we can map several non-contiguous ranges into
1425 * our ones pages[] map instead of splitting that operation into pieces.
1426 * Could easily be exported as a generic helper for other users, in which
1427 * case one would probably want to add a 'max_nr_pages' parameter as well.
1428 */
1429 static int get_iovec_page_array(const struct iovec __user *iov,
1430 unsigned int nr_vecs, struct page **pages,
1431 struct partial_page *partial, bool aligned,
1432 unsigned int pipe_buffers)
1433 {
1434 int buffers = 0, error = 0;
1435
1436 while (nr_vecs) {
1437 unsigned long off, npages;
1438 struct iovec entry;
1439 void __user *base;
1440 size_t len;
1441 int i;
1442
1443 error = -EFAULT;
1444 if (copy_from_user(&entry, iov, sizeof(entry)))
1445 break;
1446
1447 base = entry.iov_base;
1448 len = entry.iov_len;
1449
1450 /*
1451 * Sanity check this iovec. 0 read succeeds.
1452 */
1453 error = 0;
1454 if (unlikely(!len))
1455 break;
1456 error = -EFAULT;
1457 if (!access_ok(VERIFY_READ, base, len))
1458 break;
1459
1460 /*
1461 * Get this base offset and number of pages, then map
1462 * in the user pages.
1463 */
1464 off = (unsigned long) base & ~PAGE_MASK;
1465
1466 /*
1467 * If asked for alignment, the offset must be zero and the
1468 * length a multiple of the PAGE_SIZE.
1469 */
1470 error = -EINVAL;
1471 if (aligned && (off || len & ~PAGE_MASK))
1472 break;
1473
1474 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1475 if (npages > pipe_buffers - buffers)
1476 npages = pipe_buffers - buffers;
1477
1478 error = get_user_pages_fast((unsigned long)base, npages,
1479 0, &pages[buffers]);
1480
1481 if (unlikely(error <= 0))
1482 break;
1483
1484 /*
1485 * Fill this contiguous range into the partial page map.
1486 */
1487 for (i = 0; i < error; i++) {
1488 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1489
1490 partial[buffers].offset = off;
1491 partial[buffers].len = plen;
1492
1493 off = 0;
1494 len -= plen;
1495 buffers++;
1496 }
1497
1498 /*
1499 * We didn't complete this iov, stop here since it probably
1500 * means we have to move some of this into a pipe to
1501 * be able to continue.
1502 */
1503 if (len)
1504 break;
1505
1506 /*
1507 * Don't continue if we mapped fewer pages than we asked for,
1508 * or if we mapped the max number of pages that we have
1509 * room for.
1510 */
1511 if (error < npages || buffers == pipe_buffers)
1512 break;
1513
1514 nr_vecs--;
1515 iov++;
1516 }
1517
1518 if (buffers)
1519 return buffers;
1520
1521 return error;
1522 }
1523
1524 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1525 struct splice_desc *sd)
1526 {
1527 char *src;
1528 int ret;
1529
1530 /*
1531 * See if we can use the atomic maps, by prefaulting in the
1532 * pages and doing an atomic copy
1533 */
1534 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1535 src = buf->ops->map(pipe, buf, 1);
1536 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1537 sd->len);
1538 buf->ops->unmap(pipe, buf, src);
1539 if (!ret) {
1540 ret = sd->len;
1541 goto out;
1542 }
1543 }
1544
1545 /*
1546 * No dice, use slow non-atomic map and copy
1547 */
1548 src = buf->ops->map(pipe, buf, 0);
1549
1550 ret = sd->len;
1551 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1552 ret = -EFAULT;
1553
1554 buf->ops->unmap(pipe, buf, src);
1555 out:
1556 if (ret > 0)
1557 sd->u.userptr += ret;
1558 return ret;
1559 }
1560
1561 /*
1562 * For lack of a better implementation, implement vmsplice() to userspace
1563 * as a simple copy of the pipes pages to the user iov.
1564 */
1565 static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1566 unsigned long nr_segs, unsigned int flags)
1567 {
1568 struct pipe_inode_info *pipe;
1569 struct splice_desc sd;
1570 ssize_t size;
1571 int error;
1572 long ret;
1573
1574 pipe = get_pipe_info(file);
1575 if (!pipe)
1576 return -EBADF;
1577
1578 pipe_lock(pipe);
1579
1580 error = ret = 0;
1581 while (nr_segs) {
1582 void __user *base;
1583 size_t len;
1584
1585 /*
1586 * Get user address base and length for this iovec.
1587 */
1588 error = get_user(base, &iov->iov_base);
1589 if (unlikely(error))
1590 break;
1591 error = get_user(len, &iov->iov_len);
1592 if (unlikely(error))
1593 break;
1594
1595 /*
1596 * Sanity check this iovec. 0 read succeeds.
1597 */
1598 if (unlikely(!len))
1599 break;
1600 if (unlikely(!base)) {
1601 error = -EFAULT;
1602 break;
1603 }
1604
1605 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1606 error = -EFAULT;
1607 break;
1608 }
1609
1610 sd.len = 0;
1611 sd.total_len = len;
1612 sd.flags = flags;
1613 sd.u.userptr = base;
1614 sd.pos = 0;
1615
1616 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1617 if (size < 0) {
1618 if (!ret)
1619 ret = size;
1620
1621 break;
1622 }
1623
1624 ret += size;
1625
1626 if (size < len)
1627 break;
1628
1629 nr_segs--;
1630 iov++;
1631 }
1632
1633 pipe_unlock(pipe);
1634
1635 if (!ret)
1636 ret = error;
1637
1638 return ret;
1639 }
1640
1641 /*
1642 * vmsplice splices a user address range into a pipe. It can be thought of
1643 * as splice-from-memory, where the regular splice is splice-from-file (or
1644 * to file). In both cases the output is a pipe, naturally.
1645 */
1646 static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1647 unsigned long nr_segs, unsigned int flags)
1648 {
1649 struct pipe_inode_info *pipe;
1650 struct page *pages[PIPE_DEF_BUFFERS];
1651 struct partial_page partial[PIPE_DEF_BUFFERS];
1652 struct splice_pipe_desc spd = {
1653 .pages = pages,
1654 .partial = partial,
1655 .nr_pages_max = PIPE_DEF_BUFFERS,
1656 .flags = flags,
1657 .ops = &user_page_pipe_buf_ops,
1658 .spd_release = spd_release_page,
1659 };
1660 long ret;
1661
1662 pipe = get_pipe_info(file);
1663 if (!pipe)
1664 return -EBADF;
1665
1666 if (splice_grow_spd(pipe, &spd))
1667 return -ENOMEM;
1668
1669 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
1670 spd.partial, false,
1671 spd.nr_pages_max);
1672 if (spd.nr_pages <= 0)
1673 ret = spd.nr_pages;
1674 else
1675 ret = splice_to_pipe(pipe, &spd);
1676
1677 splice_shrink_spd(&spd);
1678 return ret;
1679 }
1680
1681 /*
1682 * Note that vmsplice only really supports true splicing _from_ user memory
1683 * to a pipe, not the other way around. Splicing from user memory is a simple
1684 * operation that can be supported without any funky alignment restrictions
1685 * or nasty vm tricks. We simply map in the user memory and fill them into
1686 * a pipe. The reverse isn't quite as easy, though. There are two possible
1687 * solutions for that:
1688 *
1689 * - memcpy() the data internally, at which point we might as well just
1690 * do a regular read() on the buffer anyway.
1691 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1692 * has restriction limitations on both ends of the pipe).
1693 *
1694 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1695 *
1696 */
1697 SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
1698 unsigned long, nr_segs, unsigned int, flags)
1699 {
1700 struct fd f;
1701 long error;
1702
1703 if (unlikely(nr_segs > UIO_MAXIOV))
1704 return -EINVAL;
1705 else if (unlikely(!nr_segs))
1706 return 0;
1707
1708 error = -EBADF;
1709 f = fdget(fd);
1710 if (f.file) {
1711 if (f.file->f_mode & FMODE_WRITE)
1712 error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
1713 else if (f.file->f_mode & FMODE_READ)
1714 error = vmsplice_to_user(f.file, iov, nr_segs, flags);
1715
1716 fdput(f);
1717 }
1718
1719 return error;
1720 }
1721
1722 #ifdef CONFIG_COMPAT
1723 COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
1724 unsigned int, nr_segs, unsigned int, flags)
1725 {
1726 unsigned i;
1727 struct iovec __user *iov;
1728 if (nr_segs > UIO_MAXIOV)
1729 return -EINVAL;
1730 iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
1731 for (i = 0; i < nr_segs; i++) {
1732 struct compat_iovec v;
1733 if (get_user(v.iov_base, &iov32[i].iov_base) ||
1734 get_user(v.iov_len, &iov32[i].iov_len) ||
1735 put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
1736 put_user(v.iov_len, &iov[i].iov_len))
1737 return -EFAULT;
1738 }
1739 return sys_vmsplice(fd, iov, nr_segs, flags);
1740 }
1741 #endif
1742
1743 SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1744 int, fd_out, loff_t __user *, off_out,
1745 size_t, len, unsigned int, flags)
1746 {
1747 struct fd in, out;
1748 long error;
1749
1750 if (unlikely(!len))
1751 return 0;
1752
1753 error = -EBADF;
1754 in = fdget(fd_in);
1755 if (in.file) {
1756 if (in.file->f_mode & FMODE_READ) {
1757 out = fdget(fd_out);
1758 if (out.file) {
1759 if (out.file->f_mode & FMODE_WRITE)
1760 error = do_splice(in.file, off_in,
1761 out.file, off_out,
1762 len, flags);
1763 fdput(out);
1764 }
1765 }
1766 fdput(in);
1767 }
1768 return error;
1769 }
1770
1771 /*
1772 * Make sure there's data to read. Wait for input if we can, otherwise
1773 * return an appropriate error.
1774 */
1775 static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1776 {
1777 int ret;
1778
1779 /*
1780 * Check ->nrbufs without the inode lock first. This function
1781 * is speculative anyways, so missing one is ok.
1782 */
1783 if (pipe->nrbufs)
1784 return 0;
1785
1786 ret = 0;
1787 pipe_lock(pipe);
1788
1789 while (!pipe->nrbufs) {
1790 if (signal_pending(current)) {
1791 ret = -ERESTARTSYS;
1792 break;
1793 }
1794 if (!pipe->writers)
1795 break;
1796 if (!pipe->waiting_writers) {
1797 if (flags & SPLICE_F_NONBLOCK) {
1798 ret = -EAGAIN;
1799 break;
1800 }
1801 }
1802 pipe_wait(pipe);
1803 }
1804
1805 pipe_unlock(pipe);
1806 return ret;
1807 }
1808
1809 /*
1810 * Make sure there's writeable room. Wait for room if we can, otherwise
1811 * return an appropriate error.
1812 */
1813 static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1814 {
1815 int ret;
1816
1817 /*
1818 * Check ->nrbufs without the inode lock first. This function
1819 * is speculative anyways, so missing one is ok.
1820 */
1821 if (pipe->nrbufs < pipe->buffers)
1822 return 0;
1823
1824 ret = 0;
1825 pipe_lock(pipe);
1826
1827 while (pipe->nrbufs >= pipe->buffers) {
1828 if (!pipe->readers) {
1829 send_sig(SIGPIPE, current, 0);
1830 ret = -EPIPE;
1831 break;
1832 }
1833 if (flags & SPLICE_F_NONBLOCK) {
1834 ret = -EAGAIN;
1835 break;
1836 }
1837 if (signal_pending(current)) {
1838 ret = -ERESTARTSYS;
1839 break;
1840 }
1841 pipe->waiting_writers++;
1842 pipe_wait(pipe);
1843 pipe->waiting_writers--;
1844 }
1845
1846 pipe_unlock(pipe);
1847 return ret;
1848 }
1849
1850 /*
1851 * Splice contents of ipipe to opipe.
1852 */
1853 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1854 struct pipe_inode_info *opipe,
1855 size_t len, unsigned int flags)
1856 {
1857 struct pipe_buffer *ibuf, *obuf;
1858 int ret = 0, nbuf;
1859 bool input_wakeup = false;
1860
1861
1862 retry:
1863 ret = ipipe_prep(ipipe, flags);
1864 if (ret)
1865 return ret;
1866
1867 ret = opipe_prep(opipe, flags);
1868 if (ret)
1869 return ret;
1870
1871 /*
1872 * Potential ABBA deadlock, work around it by ordering lock
1873 * grabbing by pipe info address. Otherwise two different processes
1874 * could deadlock (one doing tee from A -> B, the other from B -> A).
1875 */
1876 pipe_double_lock(ipipe, opipe);
1877
1878 do {
1879 if (!opipe->readers) {
1880 send_sig(SIGPIPE, current, 0);
1881 if (!ret)
1882 ret = -EPIPE;
1883 break;
1884 }
1885
1886 if (!ipipe->nrbufs && !ipipe->writers)
1887 break;
1888
1889 /*
1890 * Cannot make any progress, because either the input
1891 * pipe is empty or the output pipe is full.
1892 */
1893 if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
1894 /* Already processed some buffers, break */
1895 if (ret)
1896 break;
1897
1898 if (flags & SPLICE_F_NONBLOCK) {
1899 ret = -EAGAIN;
1900 break;
1901 }
1902
1903 /*
1904 * We raced with another reader/writer and haven't
1905 * managed to process any buffers. A zero return
1906 * value means EOF, so retry instead.
1907 */
1908 pipe_unlock(ipipe);
1909 pipe_unlock(opipe);
1910 goto retry;
1911 }
1912
1913 ibuf = ipipe->bufs + ipipe->curbuf;
1914 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
1915 obuf = opipe->bufs + nbuf;
1916
1917 if (len >= ibuf->len) {
1918 /*
1919 * Simply move the whole buffer from ipipe to opipe
1920 */
1921 *obuf = *ibuf;
1922 ibuf->ops = NULL;
1923 opipe->nrbufs++;
1924 ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
1925 ipipe->nrbufs--;
1926 input_wakeup = true;
1927 } else {
1928 /*
1929 * Get a reference to this pipe buffer,
1930 * so we can copy the contents over.
1931 */
1932 ibuf->ops->get(ipipe, ibuf);
1933 *obuf = *ibuf;
1934
1935 /*
1936 * Don't inherit the gift flag, we need to
1937 * prevent multiple steals of this page.
1938 */
1939 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1940
1941 obuf->len = len;
1942 opipe->nrbufs++;
1943 ibuf->offset += obuf->len;
1944 ibuf->len -= obuf->len;
1945 }
1946 ret += obuf->len;
1947 len -= obuf->len;
1948 } while (len);
1949
1950 pipe_unlock(ipipe);
1951 pipe_unlock(opipe);
1952
1953 /*
1954 * If we put data in the output pipe, wakeup any potential readers.
1955 */
1956 if (ret > 0)
1957 wakeup_pipe_readers(opipe);
1958
1959 if (input_wakeup)
1960 wakeup_pipe_writers(ipipe);
1961
1962 return ret;
1963 }
1964
1965 /*
1966 * Link contents of ipipe to opipe.
1967 */
1968 static int link_pipe(struct pipe_inode_info *ipipe,
1969 struct pipe_inode_info *opipe,
1970 size_t len, unsigned int flags)
1971 {
1972 struct pipe_buffer *ibuf, *obuf;
1973 int ret = 0, i = 0, nbuf;
1974
1975 /*
1976 * Potential ABBA deadlock, work around it by ordering lock
1977 * grabbing by pipe info address. Otherwise two different processes
1978 * could deadlock (one doing tee from A -> B, the other from B -> A).
1979 */
1980 pipe_double_lock(ipipe, opipe);
1981
1982 do {
1983 if (!opipe->readers) {
1984 send_sig(SIGPIPE, current, 0);
1985 if (!ret)
1986 ret = -EPIPE;
1987 break;
1988 }
1989
1990 /*
1991 * If we have iterated all input buffers or ran out of
1992 * output room, break.
1993 */
1994 if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
1995 break;
1996
1997 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
1998 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
1999
2000 /*
2001 * Get a reference to this pipe buffer,
2002 * so we can copy the contents over.
2003 */
2004 ibuf->ops->get(ipipe, ibuf);
2005
2006 obuf = opipe->bufs + nbuf;
2007 *obuf = *ibuf;
2008
2009 /*
2010 * Don't inherit the gift flag, we need to
2011 * prevent multiple steals of this page.
2012 */
2013 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2014
2015 if (obuf->len > len)
2016 obuf->len = len;
2017
2018 opipe->nrbufs++;
2019 ret += obuf->len;
2020 len -= obuf->len;
2021 i++;
2022 } while (len);
2023
2024 /*
2025 * return EAGAIN if we have the potential of some data in the
2026 * future, otherwise just return 0
2027 */
2028 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
2029 ret = -EAGAIN;
2030
2031 pipe_unlock(ipipe);
2032 pipe_unlock(opipe);
2033
2034 /*
2035 * If we put data in the output pipe, wakeup any potential readers.
2036 */
2037 if (ret > 0)
2038 wakeup_pipe_readers(opipe);
2039
2040 return ret;
2041 }
2042
2043 /*
2044 * This is a tee(1) implementation that works on pipes. It doesn't copy
2045 * any data, it simply references the 'in' pages on the 'out' pipe.
2046 * The 'flags' used are the SPLICE_F_* variants, currently the only
2047 * applicable one is SPLICE_F_NONBLOCK.
2048 */
2049 static long do_tee(struct file *in, struct file *out, size_t len,
2050 unsigned int flags)
2051 {
2052 struct pipe_inode_info *ipipe = get_pipe_info(in);
2053 struct pipe_inode_info *opipe = get_pipe_info(out);
2054 int ret = -EINVAL;
2055
2056 /*
2057 * Duplicate the contents of ipipe to opipe without actually
2058 * copying the data.
2059 */
2060 if (ipipe && opipe && ipipe != opipe) {
2061 /*
2062 * Keep going, unless we encounter an error. The ipipe/opipe
2063 * ordering doesn't really matter.
2064 */
2065 ret = ipipe_prep(ipipe, flags);
2066 if (!ret) {
2067 ret = opipe_prep(opipe, flags);
2068 if (!ret)
2069 ret = link_pipe(ipipe, opipe, len, flags);
2070 }
2071 }
2072
2073 return ret;
2074 }
2075
2076 SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
2077 {
2078 struct fd in;
2079 int error;
2080
2081 if (unlikely(!len))
2082 return 0;
2083
2084 error = -EBADF;
2085 in = fdget(fdin);
2086 if (in.file) {
2087 if (in.file->f_mode & FMODE_READ) {
2088 struct fd out = fdget(fdout);
2089 if (out.file) {
2090 if (out.file->f_mode & FMODE_WRITE)
2091 error = do_tee(in.file, out.file,
2092 len, flags);
2093 fdput(out);
2094 }
2095 }
2096 fdput(in);
2097 }
2098
2099 return error;
2100 }