watchdog: rc32434_wdt: fix ioctl error handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / splice.c
CommitLineData
5274f052
JA
1/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
c2058e06
JA
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
5274f052 14 *
0fe23479 15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
c2058e06
JA
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
5274f052
JA
18 *
19 */
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/pagemap.h>
d6b29d7c 23#include <linux/splice.h>
08e552c6 24#include <linux/memcontrol.h>
5274f052 25#include <linux/mm_inline.h>
5abc97aa 26#include <linux/swap.h>
4f6f0bd2 27#include <linux/writeback.h>
630d9c47 28#include <linux/export.h>
4f6f0bd2 29#include <linux/syscalls.h>
912d35f8 30#include <linux/uio.h>
29ce2058 31#include <linux/security.h>
5a0e3ad6 32#include <linux/gfp.h>
35f9c09f 33#include <linux/socket.h>
76b021d0 34#include <linux/compat.h>
06ae43f3 35#include "internal.h"
5274f052 36
83f9135b
JA
37/*
38 * Attempt to steal a page from a pipe buffer. This should perhaps go into
39 * a vm helper function, it's already simplified quite a bit by the
40 * addition of remove_mapping(). If success is returned, the caller may
41 * attempt to reuse this page for another destination.
42 */
76ad4d11 43static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
5abc97aa
JA
44 struct pipe_buffer *buf)
45{
46 struct page *page = buf->page;
9e94cd4f 47 struct address_space *mapping;
5abc97aa 48
9e0267c2
JA
49 lock_page(page);
50
9e94cd4f
JA
51 mapping = page_mapping(page);
52 if (mapping) {
53 WARN_ON(!PageUptodate(page));
5abc97aa 54
9e94cd4f
JA
55 /*
56 * At least for ext2 with nobh option, we need to wait on
57 * writeback completing on this page, since we'll remove it
58 * from the pagecache. Otherwise truncate wont wait on the
59 * page, allowing the disk blocks to be reused by someone else
60 * before we actually wrote our data to them. fs corruption
61 * ensues.
62 */
63 wait_on_page_writeback(page);
ad8d6f0a 64
266cf658
DH
65 if (page_has_private(page) &&
66 !try_to_release_page(page, GFP_KERNEL))
ca39d651 67 goto out_unlock;
4f6f0bd2 68
9e94cd4f
JA
69 /*
70 * If we succeeded in removing the mapping, set LRU flag
71 * and return good.
72 */
73 if (remove_mapping(mapping, page)) {
74 buf->flags |= PIPE_BUF_FLAG_LRU;
75 return 0;
76 }
9e0267c2 77 }
5abc97aa 78
9e94cd4f
JA
79 /*
80 * Raced with truncate or failed to remove page from current
81 * address space, unlock and return failure.
82 */
ca39d651 83out_unlock:
9e94cd4f
JA
84 unlock_page(page);
85 return 1;
5abc97aa
JA
86}
87
76ad4d11 88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
5274f052
JA
89 struct pipe_buffer *buf)
90{
91 page_cache_release(buf->page);
1432873a 92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
5274f052
JA
93}
94
0845718d
JA
95/*
96 * Check whether the contents of buf is OK to access. Since the content
97 * is a page cache page, IO may be in flight.
98 */
cac36bb0
JA
99static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
100 struct pipe_buffer *buf)
5274f052
JA
101{
102 struct page *page = buf->page;
49d0b21b 103 int err;
5274f052
JA
104
105 if (!PageUptodate(page)) {
49d0b21b
JA
106 lock_page(page);
107
108 /*
109 * Page got truncated/unhashed. This will cause a 0-byte
73d62d83 110 * splice, if this is the first page.
49d0b21b
JA
111 */
112 if (!page->mapping) {
113 err = -ENODATA;
114 goto error;
115 }
5274f052 116
49d0b21b 117 /*
73d62d83 118 * Uh oh, read-error from disk.
49d0b21b
JA
119 */
120 if (!PageUptodate(page)) {
121 err = -EIO;
122 goto error;
123 }
124
125 /*
f84d7519 126 * Page is ok afterall, we are done.
49d0b21b 127 */
5274f052 128 unlock_page(page);
5274f052
JA
129 }
130
f84d7519 131 return 0;
49d0b21b
JA
132error:
133 unlock_page(page);
f84d7519 134 return err;
70524490
JA
135}
136
708e3508 137const struct pipe_buf_operations page_cache_pipe_buf_ops = {
5274f052 138 .can_merge = 0,
f84d7519
JA
139 .map = generic_pipe_buf_map,
140 .unmap = generic_pipe_buf_unmap,
cac36bb0 141 .confirm = page_cache_pipe_buf_confirm,
5274f052 142 .release = page_cache_pipe_buf_release,
5abc97aa 143 .steal = page_cache_pipe_buf_steal,
f84d7519 144 .get = generic_pipe_buf_get,
5274f052
JA
145};
146
912d35f8
JA
147static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
148 struct pipe_buffer *buf)
149{
7afa6fd0
JA
150 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
151 return 1;
152
1432873a 153 buf->flags |= PIPE_BUF_FLAG_LRU;
330ab716 154 return generic_pipe_buf_steal(pipe, buf);
912d35f8
JA
155}
156
d4c3cca9 157static const struct pipe_buf_operations user_page_pipe_buf_ops = {
912d35f8 158 .can_merge = 0,
f84d7519
JA
159 .map = generic_pipe_buf_map,
160 .unmap = generic_pipe_buf_unmap,
cac36bb0 161 .confirm = generic_pipe_buf_confirm,
912d35f8
JA
162 .release = page_cache_pipe_buf_release,
163 .steal = user_page_pipe_buf_steal,
f84d7519 164 .get = generic_pipe_buf_get,
912d35f8
JA
165};
166
825cdcb1
NK
167static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
168{
169 smp_mb();
170 if (waitqueue_active(&pipe->wait))
171 wake_up_interruptible(&pipe->wait);
172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
173}
174
932cc6d4
JA
175/**
176 * splice_to_pipe - fill passed data into a pipe
177 * @pipe: pipe to fill
178 * @spd: data to fill
179 *
180 * Description:
79685b8d 181 * @spd contains a map of pages and len/offset tuples, along with
932cc6d4
JA
182 * the struct pipe_buf_operations associated with these pages. This
183 * function will link that data to the pipe.
184 *
83f9135b 185 */
d6b29d7c
JA
186ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
187 struct splice_pipe_desc *spd)
5274f052 188{
00de00bd 189 unsigned int spd_pages = spd->nr_pages;
912d35f8 190 int ret, do_wakeup, page_nr;
5274f052
JA
191
192 ret = 0;
193 do_wakeup = 0;
912d35f8 194 page_nr = 0;
5274f052 195
61e0d47c 196 pipe_lock(pipe);
5274f052 197
5274f052 198 for (;;) {
3a326a2c 199 if (!pipe->readers) {
5274f052
JA
200 send_sig(SIGPIPE, current, 0);
201 if (!ret)
202 ret = -EPIPE;
203 break;
204 }
205
35f3d14d
JA
206 if (pipe->nrbufs < pipe->buffers) {
207 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
3a326a2c 208 struct pipe_buffer *buf = pipe->bufs + newbuf;
5274f052 209
912d35f8
JA
210 buf->page = spd->pages[page_nr];
211 buf->offset = spd->partial[page_nr].offset;
212 buf->len = spd->partial[page_nr].len;
497f9625 213 buf->private = spd->partial[page_nr].private;
912d35f8 214 buf->ops = spd->ops;
7afa6fd0
JA
215 if (spd->flags & SPLICE_F_GIFT)
216 buf->flags |= PIPE_BUF_FLAG_GIFT;
217
6f767b04 218 pipe->nrbufs++;
912d35f8
JA
219 page_nr++;
220 ret += buf->len;
221
6447a3cf 222 if (pipe->files)
6f767b04 223 do_wakeup = 1;
5274f052 224
912d35f8 225 if (!--spd->nr_pages)
5274f052 226 break;
35f3d14d 227 if (pipe->nrbufs < pipe->buffers)
5274f052
JA
228 continue;
229
230 break;
231 }
232
912d35f8 233 if (spd->flags & SPLICE_F_NONBLOCK) {
29e35094
LT
234 if (!ret)
235 ret = -EAGAIN;
236 break;
237 }
238
5274f052
JA
239 if (signal_pending(current)) {
240 if (!ret)
241 ret = -ERESTARTSYS;
242 break;
243 }
244
245 if (do_wakeup) {
c0bd1f65 246 smp_mb();
3a326a2c
IM
247 if (waitqueue_active(&pipe->wait))
248 wake_up_interruptible_sync(&pipe->wait);
249 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
5274f052
JA
250 do_wakeup = 0;
251 }
252
3a326a2c
IM
253 pipe->waiting_writers++;
254 pipe_wait(pipe);
255 pipe->waiting_writers--;
5274f052
JA
256 }
257
61e0d47c 258 pipe_unlock(pipe);
5274f052 259
825cdcb1
NK
260 if (do_wakeup)
261 wakeup_pipe_readers(pipe);
5274f052 262
00de00bd 263 while (page_nr < spd_pages)
bbdfc2f7 264 spd->spd_release(spd, page_nr++);
5274f052
JA
265
266 return ret;
267}
268
708e3508 269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
bbdfc2f7
JA
270{
271 page_cache_release(spd->pages[i]);
272}
273
35f3d14d
JA
274/*
275 * Check if we need to grow the arrays holding pages and partial page
276 * descriptions.
277 */
047fe360 278int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
35f3d14d 279{
047fe360
ED
280 unsigned int buffers = ACCESS_ONCE(pipe->buffers);
281
282 spd->nr_pages_max = buffers;
283 if (buffers <= PIPE_DEF_BUFFERS)
35f3d14d
JA
284 return 0;
285
047fe360
ED
286 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
287 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
35f3d14d
JA
288
289 if (spd->pages && spd->partial)
290 return 0;
291
292 kfree(spd->pages);
293 kfree(spd->partial);
294 return -ENOMEM;
295}
296
047fe360 297void splice_shrink_spd(struct splice_pipe_desc *spd)
35f3d14d 298{
047fe360 299 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
35f3d14d
JA
300 return;
301
302 kfree(spd->pages);
303 kfree(spd->partial);
304}
305
3a326a2c 306static int
cbb7e577
JA
307__generic_file_splice_read(struct file *in, loff_t *ppos,
308 struct pipe_inode_info *pipe, size_t len,
309 unsigned int flags)
5274f052
JA
310{
311 struct address_space *mapping = in->f_mapping;
d8983910 312 unsigned int loff, nr_pages, req_pages;
35f3d14d
JA
313 struct page *pages[PIPE_DEF_BUFFERS];
314 struct partial_page partial[PIPE_DEF_BUFFERS];
5274f052 315 struct page *page;
91ad66ef
JA
316 pgoff_t index, end_index;
317 loff_t isize;
eb20796b 318 int error, page_nr;
912d35f8
JA
319 struct splice_pipe_desc spd = {
320 .pages = pages,
321 .partial = partial,
047fe360 322 .nr_pages_max = PIPE_DEF_BUFFERS,
912d35f8
JA
323 .flags = flags,
324 .ops = &page_cache_pipe_buf_ops,
bbdfc2f7 325 .spd_release = spd_release_page,
912d35f8 326 };
5274f052 327
35f3d14d
JA
328 if (splice_grow_spd(pipe, &spd))
329 return -ENOMEM;
330
cbb7e577 331 index = *ppos >> PAGE_CACHE_SHIFT;
912d35f8 332 loff = *ppos & ~PAGE_CACHE_MASK;
d8983910 333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
047fe360 334 nr_pages = min(req_pages, spd.nr_pages_max);
5274f052 335
eb20796b
JA
336 /*
337 * Lookup the (hopefully) full range of pages we need.
338 */
35f3d14d 339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
431a4820 340 index += spd.nr_pages;
82aa5d61 341
eb20796b
JA
342 /*
343 * If find_get_pages_contig() returned fewer pages than we needed,
431a4820 344 * readahead/allocate the rest and fill in the holes.
eb20796b 345 */
431a4820 346 if (spd.nr_pages < nr_pages)
cf914a7d
RR
347 page_cache_sync_readahead(mapping, &in->f_ra, in,
348 index, req_pages - spd.nr_pages);
431a4820 349
932cc6d4 350 error = 0;
eb20796b 351 while (spd.nr_pages < nr_pages) {
82aa5d61 352 /*
eb20796b
JA
353 * Page could be there, find_get_pages_contig() breaks on
354 * the first hole.
5274f052 355 */
7480a904
JA
356 page = find_get_page(mapping, index);
357 if (!page) {
7480a904 358 /*
eb20796b 359 * page didn't exist, allocate one.
7480a904
JA
360 */
361 page = page_cache_alloc_cold(mapping);
362 if (!page)
363 break;
364
365 error = add_to_page_cache_lru(page, mapping, index,
0ae0b5d0 366 GFP_KERNEL);
7480a904
JA
367 if (unlikely(error)) {
368 page_cache_release(page);
a0548871
JA
369 if (error == -EEXIST)
370 continue;
7480a904
JA
371 break;
372 }
eb20796b
JA
373 /*
374 * add_to_page_cache() locks the page, unlock it
375 * to avoid convoluting the logic below even more.
376 */
377 unlock_page(page);
7480a904
JA
378 }
379
35f3d14d 380 spd.pages[spd.nr_pages++] = page;
eb20796b
JA
381 index++;
382 }
383
384 /*
385 * Now loop over the map and see if we need to start IO on any
386 * pages, fill in the partial map, etc.
387 */
388 index = *ppos >> PAGE_CACHE_SHIFT;
389 nr_pages = spd.nr_pages;
390 spd.nr_pages = 0;
391 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
392 unsigned int this_len;
393
394 if (!len)
395 break;
396
397 /*
398 * this_len is the max we'll use from this page
399 */
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
35f3d14d 401 page = spd.pages[page_nr];
eb20796b 402
a08a166f 403 if (PageReadahead(page))
cf914a7d 404 page_cache_async_readahead(mapping, &in->f_ra, in,
d8983910 405 page, index, req_pages - page_nr);
a08a166f 406
7480a904
JA
407 /*
408 * If the page isn't uptodate, we may need to start io on it
409 */
410 if (!PageUptodate(page)) {
6965031d 411 lock_page(page);
7480a904
JA
412
413 /*
32502b84
MS
414 * Page was truncated, or invalidated by the
415 * filesystem. Redo the find/create, but this time the
416 * page is kept locked, so there's no chance of another
417 * race with truncate/invalidate.
7480a904
JA
418 */
419 if (!page->mapping) {
420 unlock_page(page);
32502b84
MS
421 page = find_or_create_page(mapping, index,
422 mapping_gfp_mask(mapping));
423
424 if (!page) {
425 error = -ENOMEM;
426 break;
427 }
35f3d14d
JA
428 page_cache_release(spd.pages[page_nr]);
429 spd.pages[page_nr] = page;
7480a904
JA
430 }
431 /*
432 * page was already under io and is now done, great
433 */
434 if (PageUptodate(page)) {
435 unlock_page(page);
436 goto fill_it;
437 }
5274f052 438
7480a904
JA
439 /*
440 * need to read in the page
441 */
442 error = mapping->a_ops->readpage(in, page);
5274f052 443 if (unlikely(error)) {
eb20796b
JA
444 /*
445 * We really should re-lookup the page here,
446 * but it complicates things a lot. Instead
447 * lets just do what we already stored, and
448 * we'll get it the next time we are called.
449 */
7480a904 450 if (error == AOP_TRUNCATED_PAGE)
eb20796b
JA
451 error = 0;
452
5274f052
JA
453 break;
454 }
620a324b
JA
455 }
456fill_it:
457 /*
458 * i_size must be checked after PageUptodate.
459 */
460 isize = i_size_read(mapping->host);
461 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
462 if (unlikely(!isize || index > end_index))
463 break;
464
465 /*
466 * if this is the last page, see if we need to shrink
467 * the length and stop
468 */
469 if (end_index == index) {
470 unsigned int plen;
91ad66ef
JA
471
472 /*
620a324b 473 * max good bytes in this page
91ad66ef 474 */
620a324b
JA
475 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
476 if (plen <= loff)
91ad66ef 477 break;
91ad66ef
JA
478
479 /*
620a324b 480 * force quit after adding this page
91ad66ef 481 */
620a324b
JA
482 this_len = min(this_len, plen - loff);
483 len = this_len;
5274f052 484 }
620a324b 485
35f3d14d
JA
486 spd.partial[page_nr].offset = loff;
487 spd.partial[page_nr].len = this_len;
82aa5d61 488 len -= this_len;
91ad66ef 489 loff = 0;
eb20796b
JA
490 spd.nr_pages++;
491 index++;
5274f052
JA
492 }
493
eb20796b 494 /*
475ecade 495 * Release any pages at the end, if we quit early. 'page_nr' is how far
eb20796b
JA
496 * we got, 'nr_pages' is how many pages are in the map.
497 */
498 while (page_nr < nr_pages)
35f3d14d 499 page_cache_release(spd.pages[page_nr++]);
f4e6b498 500 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
eb20796b 501
912d35f8 502 if (spd.nr_pages)
35f3d14d 503 error = splice_to_pipe(pipe, &spd);
5274f052 504
047fe360 505 splice_shrink_spd(&spd);
7480a904 506 return error;
5274f052
JA
507}
508
83f9135b
JA
509/**
510 * generic_file_splice_read - splice data from file to a pipe
511 * @in: file to splice from
932cc6d4 512 * @ppos: position in @in
83f9135b
JA
513 * @pipe: pipe to splice to
514 * @len: number of bytes to splice
515 * @flags: splice modifier flags
516 *
932cc6d4
JA
517 * Description:
518 * Will read pages from given file and fill them into a pipe. Can be
519 * used as long as the address_space operations for the source implements
520 * a readpage() hook.
521 *
83f9135b 522 */
cbb7e577
JA
523ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
524 struct pipe_inode_info *pipe, size_t len,
525 unsigned int flags)
5274f052 526{
d366d398 527 loff_t isize, left;
8191ecd1 528 int ret;
d366d398
JA
529
530 isize = i_size_read(in->f_mapping->host);
531 if (unlikely(*ppos >= isize))
532 return 0;
533
534 left = isize - *ppos;
535 if (unlikely(left < len))
536 len = left;
5274f052 537
8191ecd1 538 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
723590ed 539 if (ret > 0) {
cbb7e577 540 *ppos += ret;
723590ed
MS
541 file_accessed(in);
542 }
5274f052
JA
543
544 return ret;
545}
059a8f37
JA
546EXPORT_SYMBOL(generic_file_splice_read);
547
6818173b
MS
548static const struct pipe_buf_operations default_pipe_buf_ops = {
549 .can_merge = 0,
550 .map = generic_pipe_buf_map,
551 .unmap = generic_pipe_buf_unmap,
552 .confirm = generic_pipe_buf_confirm,
553 .release = generic_pipe_buf_release,
554 .steal = generic_pipe_buf_steal,
555 .get = generic_pipe_buf_get,
556};
557
d840f989
MS
558static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
559 struct pipe_buffer *buf)
560{
561 return 1;
562}
563
564/* Pipe buffer operations for a socket and similar. */
565const struct pipe_buf_operations nosteal_pipe_buf_ops = {
566 .can_merge = 0,
567 .map = generic_pipe_buf_map,
568 .unmap = generic_pipe_buf_unmap,
569 .confirm = generic_pipe_buf_confirm,
570 .release = generic_pipe_buf_release,
571 .steal = generic_pipe_buf_nosteal,
572 .get = generic_pipe_buf_get,
573};
574EXPORT_SYMBOL(nosteal_pipe_buf_ops);
575
6818173b
MS
576static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
577 unsigned long vlen, loff_t offset)
578{
579 mm_segment_t old_fs;
580 loff_t pos = offset;
581 ssize_t res;
582
583 old_fs = get_fs();
584 set_fs(get_ds());
585 /* The cast to a user pointer is valid due to the set_fs() */
586 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
587 set_fs(old_fs);
588
589 return res;
590}
591
7bb307e8 592ssize_t kernel_write(struct file *file, const char *buf, size_t count,
b2858d7d 593 loff_t pos)
0b0a47f5
MS
594{
595 mm_segment_t old_fs;
596 ssize_t res;
597
598 old_fs = get_fs();
599 set_fs(get_ds());
600 /* The cast to a user pointer is valid due to the set_fs() */
7bb307e8 601 res = vfs_write(file, (__force const char __user *)buf, count, &pos);
0b0a47f5
MS
602 set_fs(old_fs);
603
604 return res;
605}
7bb307e8 606EXPORT_SYMBOL(kernel_write);
0b0a47f5 607
6818173b
MS
608ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
609 struct pipe_inode_info *pipe, size_t len,
610 unsigned int flags)
611{
612 unsigned int nr_pages;
613 unsigned int nr_freed;
614 size_t offset;
35f3d14d
JA
615 struct page *pages[PIPE_DEF_BUFFERS];
616 struct partial_page partial[PIPE_DEF_BUFFERS];
617 struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
6818173b
MS
618 ssize_t res;
619 size_t this_len;
620 int error;
621 int i;
622 struct splice_pipe_desc spd = {
623 .pages = pages,
624 .partial = partial,
047fe360 625 .nr_pages_max = PIPE_DEF_BUFFERS,
6818173b
MS
626 .flags = flags,
627 .ops = &default_pipe_buf_ops,
628 .spd_release = spd_release_page,
629 };
630
35f3d14d
JA
631 if (splice_grow_spd(pipe, &spd))
632 return -ENOMEM;
633
634 res = -ENOMEM;
635 vec = __vec;
047fe360
ED
636 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
637 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
35f3d14d
JA
638 if (!vec)
639 goto shrink_ret;
640 }
641
6818173b
MS
642 offset = *ppos & ~PAGE_CACHE_MASK;
643 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
644
047fe360 645 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
6818173b
MS
646 struct page *page;
647
4f231228 648 page = alloc_page(GFP_USER);
6818173b
MS
649 error = -ENOMEM;
650 if (!page)
651 goto err;
652
653 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
4f231228 654 vec[i].iov_base = (void __user *) page_address(page);
6818173b 655 vec[i].iov_len = this_len;
35f3d14d 656 spd.pages[i] = page;
6818173b
MS
657 spd.nr_pages++;
658 len -= this_len;
659 offset = 0;
660 }
661
662 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
77f6bf57
AM
663 if (res < 0) {
664 error = res;
6818173b 665 goto err;
77f6bf57 666 }
6818173b
MS
667
668 error = 0;
669 if (!res)
670 goto err;
671
672 nr_freed = 0;
673 for (i = 0; i < spd.nr_pages; i++) {
6818173b 674 this_len = min_t(size_t, vec[i].iov_len, res);
35f3d14d
JA
675 spd.partial[i].offset = 0;
676 spd.partial[i].len = this_len;
6818173b 677 if (!this_len) {
35f3d14d
JA
678 __free_page(spd.pages[i]);
679 spd.pages[i] = NULL;
6818173b
MS
680 nr_freed++;
681 }
682 res -= this_len;
683 }
684 spd.nr_pages -= nr_freed;
685
686 res = splice_to_pipe(pipe, &spd);
687 if (res > 0)
688 *ppos += res;
689
35f3d14d
JA
690shrink_ret:
691 if (vec != __vec)
692 kfree(vec);
047fe360 693 splice_shrink_spd(&spd);
6818173b
MS
694 return res;
695
696err:
4f231228 697 for (i = 0; i < spd.nr_pages; i++)
35f3d14d 698 __free_page(spd.pages[i]);
4f231228 699
35f3d14d
JA
700 res = error;
701 goto shrink_ret;
6818173b
MS
702}
703EXPORT_SYMBOL(default_file_splice_read);
704
5274f052 705/*
4f6f0bd2 706 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
016b661e 707 * using sendpage(). Return the number of bytes sent.
5274f052 708 */
76ad4d11 709static int pipe_to_sendpage(struct pipe_inode_info *pipe,
5274f052
JA
710 struct pipe_buffer *buf, struct splice_desc *sd)
711{
6a14b90b 712 struct file *file = sd->u.file;
5274f052 713 loff_t pos = sd->pos;
a8adbe37 714 int more;
5274f052 715
a8adbe37
MM
716 if (!likely(file->f_op && file->f_op->sendpage))
717 return -EINVAL;
718
35f9c09f 719 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
ae62ca7b
ED
720
721 if (sd->len < sd->total_len && pipe->nrbufs > 1)
35f9c09f 722 more |= MSG_SENDPAGE_NOTLAST;
ae62ca7b 723
a8adbe37
MM
724 return file->f_op->sendpage(file, buf->page, buf->offset,
725 sd->len, &pos, more);
5274f052
JA
726}
727
728/*
729 * This is a little more tricky than the file -> pipe splicing. There are
730 * basically three cases:
731 *
732 * - Destination page already exists in the address space and there
733 * are users of it. For that case we have no other option that
734 * copying the data. Tough luck.
735 * - Destination page already exists in the address space, but there
736 * are no users of it. Make sure it's uptodate, then drop it. Fall
737 * through to last case.
738 * - Destination page does not exist, we can add the pipe page to
739 * the page cache and avoid the copy.
740 *
83f9135b
JA
741 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
742 * sd->flags), we attempt to migrate pages from the pipe to the output
743 * file address space page cache. This is possible if no one else has
744 * the pipe page referenced outside of the pipe and page cache. If
745 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
746 * a new page in the output file page cache and fill/dirty that.
5274f052 747 */
328eaaba
MS
748int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
749 struct splice_desc *sd)
5274f052 750{
6a14b90b 751 struct file *file = sd->u.file;
5274f052 752 struct address_space *mapping = file->f_mapping;
016b661e 753 unsigned int offset, this_len;
5274f052 754 struct page *page;
afddba49 755 void *fsdata;
3e7ee3e7 756 int ret;
5274f052 757
5274f052
JA
758 offset = sd->pos & ~PAGE_CACHE_MASK;
759
016b661e
JA
760 this_len = sd->len;
761 if (this_len + offset > PAGE_CACHE_SIZE)
762 this_len = PAGE_CACHE_SIZE - offset;
763
afddba49
NP
764 ret = pagecache_write_begin(file, mapping, sd->pos, this_len,
765 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
766 if (unlikely(ret))
767 goto out;
5274f052 768
0568b409 769 if (buf->page != page) {
76ad4d11 770 char *src = buf->ops->map(pipe, buf, 1);
e8e3c3d6 771 char *dst = kmap_atomic(page);
5abc97aa 772
016b661e 773 memcpy(dst + offset, src + buf->offset, this_len);
5abc97aa 774 flush_dcache_page(page);
e8e3c3d6 775 kunmap_atomic(dst);
76ad4d11 776 buf->ops->unmap(pipe, buf, src);
5abc97aa 777 }
afddba49
NP
778 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
779 page, fsdata);
5274f052 780out:
5274f052
JA
781 return ret;
782}
328eaaba 783EXPORT_SYMBOL(pipe_to_file);
5274f052 784
b3c2d2dd
MS
785static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
786{
787 smp_mb();
788 if (waitqueue_active(&pipe->wait))
789 wake_up_interruptible(&pipe->wait);
790 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
791}
792
932cc6d4 793/**
b3c2d2dd 794 * splice_from_pipe_feed - feed available data from a pipe to a file
932cc6d4
JA
795 * @pipe: pipe to splice from
796 * @sd: information to @actor
797 * @actor: handler that splices the data
798 *
799 * Description:
b3c2d2dd
MS
800 * This function loops over the pipe and calls @actor to do the
801 * actual moving of a single struct pipe_buffer to the desired
802 * destination. It returns when there's no more buffers left in
803 * the pipe or if the requested number of bytes (@sd->total_len)
804 * have been copied. It returns a positive number (one) if the
805 * pipe needs to be filled with more data, zero if the required
806 * number of bytes have been copied and -errno on error.
932cc6d4 807 *
b3c2d2dd
MS
808 * This, together with splice_from_pipe_{begin,end,next}, may be
809 * used to implement the functionality of __splice_from_pipe() when
810 * locking is required around copying the pipe buffers to the
811 * destination.
83f9135b 812 */
b3c2d2dd
MS
813int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
814 splice_actor *actor)
5274f052 815{
b3c2d2dd 816 int ret;
5274f052 817
b3c2d2dd
MS
818 while (pipe->nrbufs) {
819 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
820 const struct pipe_buf_operations *ops = buf->ops;
5274f052 821
b3c2d2dd
MS
822 sd->len = buf->len;
823 if (sd->len > sd->total_len)
824 sd->len = sd->total_len;
5274f052 825
a8adbe37
MM
826 ret = buf->ops->confirm(pipe, buf);
827 if (unlikely(ret)) {
b3c2d2dd
MS
828 if (ret == -ENODATA)
829 ret = 0;
830 return ret;
831 }
a8adbe37
MM
832
833 ret = actor(pipe, buf, sd);
834 if (ret <= 0)
835 return ret;
836
b3c2d2dd
MS
837 buf->offset += ret;
838 buf->len -= ret;
839
840 sd->num_spliced += ret;
841 sd->len -= ret;
842 sd->pos += ret;
843 sd->total_len -= ret;
844
845 if (!buf->len) {
846 buf->ops = NULL;
847 ops->release(pipe, buf);
35f3d14d 848 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
b3c2d2dd 849 pipe->nrbufs--;
6447a3cf 850 if (pipe->files)
b3c2d2dd
MS
851 sd->need_wakeup = true;
852 }
5274f052 853
b3c2d2dd
MS
854 if (!sd->total_len)
855 return 0;
856 }
5274f052 857
b3c2d2dd
MS
858 return 1;
859}
860EXPORT_SYMBOL(splice_from_pipe_feed);
5274f052 861
b3c2d2dd
MS
862/**
863 * splice_from_pipe_next - wait for some data to splice from
864 * @pipe: pipe to splice from
865 * @sd: information about the splice operation
866 *
867 * Description:
868 * This function will wait for some data and return a positive
869 * value (one) if pipe buffers are available. It will return zero
870 * or -errno if no more data needs to be spliced.
871 */
872int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
873{
874 while (!pipe->nrbufs) {
875 if (!pipe->writers)
876 return 0;
016b661e 877
b3c2d2dd
MS
878 if (!pipe->waiting_writers && sd->num_spliced)
879 return 0;
73d62d83 880
b3c2d2dd
MS
881 if (sd->flags & SPLICE_F_NONBLOCK)
882 return -EAGAIN;
5274f052 883
b3c2d2dd
MS
884 if (signal_pending(current))
885 return -ERESTARTSYS;
5274f052 886
b3c2d2dd
MS
887 if (sd->need_wakeup) {
888 wakeup_pipe_writers(pipe);
889 sd->need_wakeup = false;
5274f052
JA
890 }
891
b3c2d2dd
MS
892 pipe_wait(pipe);
893 }
29e35094 894
b3c2d2dd
MS
895 return 1;
896}
897EXPORT_SYMBOL(splice_from_pipe_next);
5274f052 898
b3c2d2dd
MS
899/**
900 * splice_from_pipe_begin - start splicing from pipe
b80901bb 901 * @sd: information about the splice operation
b3c2d2dd
MS
902 *
903 * Description:
904 * This function should be called before a loop containing
905 * splice_from_pipe_next() and splice_from_pipe_feed() to
906 * initialize the necessary fields of @sd.
907 */
908void splice_from_pipe_begin(struct splice_desc *sd)
909{
910 sd->num_spliced = 0;
911 sd->need_wakeup = false;
912}
913EXPORT_SYMBOL(splice_from_pipe_begin);
5274f052 914
b3c2d2dd
MS
915/**
916 * splice_from_pipe_end - finish splicing from pipe
917 * @pipe: pipe to splice from
918 * @sd: information about the splice operation
919 *
920 * Description:
921 * This function will wake up pipe writers if necessary. It should
922 * be called after a loop containing splice_from_pipe_next() and
923 * splice_from_pipe_feed().
924 */
925void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
926{
927 if (sd->need_wakeup)
928 wakeup_pipe_writers(pipe);
929}
930EXPORT_SYMBOL(splice_from_pipe_end);
5274f052 931
b3c2d2dd
MS
932/**
933 * __splice_from_pipe - splice data from a pipe to given actor
934 * @pipe: pipe to splice from
935 * @sd: information to @actor
936 * @actor: handler that splices the data
937 *
938 * Description:
939 * This function does little more than loop over the pipe and call
940 * @actor to do the actual moving of a single struct pipe_buffer to
941 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
942 * pipe_to_user.
943 *
944 */
945ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
946 splice_actor *actor)
947{
948 int ret;
5274f052 949
b3c2d2dd
MS
950 splice_from_pipe_begin(sd);
951 do {
b63a96fa 952 cond_resched();
b3c2d2dd
MS
953 ret = splice_from_pipe_next(pipe, sd);
954 if (ret > 0)
955 ret = splice_from_pipe_feed(pipe, sd, actor);
956 } while (ret > 0);
957 splice_from_pipe_end(pipe, sd);
958
959 return sd->num_spliced ? sd->num_spliced : ret;
5274f052 960}
40bee44e 961EXPORT_SYMBOL(__splice_from_pipe);
5274f052 962
932cc6d4
JA
963/**
964 * splice_from_pipe - splice data from a pipe to a file
965 * @pipe: pipe to splice from
966 * @out: file to splice to
967 * @ppos: position in @out
968 * @len: how many bytes to splice
969 * @flags: splice modifier flags
970 * @actor: handler that splices the data
971 *
972 * Description:
2933970b 973 * See __splice_from_pipe. This function locks the pipe inode,
932cc6d4
JA
974 * otherwise it's identical to __splice_from_pipe().
975 *
976 */
6da61809
MF
977ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
978 loff_t *ppos, size_t len, unsigned int flags,
979 splice_actor *actor)
980{
981 ssize_t ret;
c66ab6fa
JA
982 struct splice_desc sd = {
983 .total_len = len,
984 .flags = flags,
985 .pos = *ppos,
6a14b90b 986 .u.file = out,
c66ab6fa 987 };
6da61809 988
61e0d47c 989 pipe_lock(pipe);
c66ab6fa 990 ret = __splice_from_pipe(pipe, &sd, actor);
61e0d47c 991 pipe_unlock(pipe);
6da61809
MF
992
993 return ret;
994}
995
83f9135b
JA
996/**
997 * generic_file_splice_write - splice data from a pipe to a file
3a326a2c 998 * @pipe: pipe info
83f9135b 999 * @out: file to write to
932cc6d4 1000 * @ppos: position in @out
83f9135b
JA
1001 * @len: number of bytes to splice
1002 * @flags: splice modifier flags
1003 *
932cc6d4
JA
1004 * Description:
1005 * Will either move or copy pages (determined by @flags options) from
1006 * the given pipe inode to the given file.
83f9135b
JA
1007 *
1008 */
3a326a2c
IM
1009ssize_t
1010generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 1011 loff_t *ppos, size_t len, unsigned int flags)
5274f052 1012{
4f6f0bd2 1013 struct address_space *mapping = out->f_mapping;
8c34e2d6 1014 struct inode *inode = mapping->host;
7f3d4ee1 1015 struct splice_desc sd = {
7f3d4ee1 1016 .flags = flags,
7f3d4ee1
MS
1017 .u.file = out,
1018 };
3a326a2c
IM
1019 ssize_t ret;
1020
13d32f27
BH
1021 ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
1022 if (ret)
1023 return ret;
1024 sd.total_len = len;
1025 sd.pos = *ppos;
1026
61e0d47c 1027 pipe_lock(pipe);
eb443e5a
MS
1028
1029 splice_from_pipe_begin(&sd);
1030 do {
1031 ret = splice_from_pipe_next(pipe, &sd);
1032 if (ret <= 0)
1033 break;
1034
1035 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1036 ret = file_remove_suid(out);
723590ed 1037 if (!ret) {
c3b2da31
JB
1038 ret = file_update_time(out);
1039 if (!ret)
1040 ret = splice_from_pipe_feed(pipe, &sd,
1041 pipe_to_file);
723590ed 1042 }
eb443e5a
MS
1043 mutex_unlock(&inode->i_mutex);
1044 } while (ret > 0);
1045 splice_from_pipe_end(pipe, &sd);
1046
61e0d47c 1047 pipe_unlock(pipe);
eb443e5a
MS
1048
1049 if (sd.num_spliced)
1050 ret = sd.num_spliced;
1051
a4514ebd 1052 if (ret > 0) {
148f948b 1053 int err;
17ee4f49 1054
148f948b
JK
1055 err = generic_write_sync(out, *ppos, ret);
1056 if (err)
1057 ret = err;
1058 else
1059 *ppos += ret;
d0e1d66b 1060 balance_dirty_pages_ratelimited(mapping);
4f6f0bd2
JA
1061 }
1062
1063 return ret;
5274f052
JA
1064}
1065
059a8f37
JA
1066EXPORT_SYMBOL(generic_file_splice_write);
1067
b2858d7d
MS
1068static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1069 struct splice_desc *sd)
0b0a47f5 1070{
b2858d7d
MS
1071 int ret;
1072 void *data;
06ae43f3 1073 loff_t tmp = sd->pos;
b2858d7d 1074
b2858d7d 1075 data = buf->ops->map(pipe, buf, 0);
06ae43f3 1076 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
b2858d7d
MS
1077 buf->ops->unmap(pipe, buf, data);
1078
1079 return ret;
0b0a47f5
MS
1080}
1081
1082static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1083 struct file *out, loff_t *ppos,
1084 size_t len, unsigned int flags)
1085{
b2858d7d 1086 ssize_t ret;
0b0a47f5 1087
b2858d7d
MS
1088 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
1089 if (ret > 0)
1090 *ppos += ret;
0b0a47f5 1091
b2858d7d 1092 return ret;
0b0a47f5
MS
1093}
1094
83f9135b
JA
1095/**
1096 * generic_splice_sendpage - splice data from a pipe to a socket
932cc6d4 1097 * @pipe: pipe to splice from
83f9135b 1098 * @out: socket to write to
932cc6d4 1099 * @ppos: position in @out
83f9135b
JA
1100 * @len: number of bytes to splice
1101 * @flags: splice modifier flags
1102 *
932cc6d4
JA
1103 * Description:
1104 * Will send @len bytes from the pipe to a network socket. No data copying
1105 * is involved.
83f9135b
JA
1106 *
1107 */
3a326a2c 1108ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 1109 loff_t *ppos, size_t len, unsigned int flags)
5274f052 1110{
00522fb4 1111 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
5274f052
JA
1112}
1113
059a8f37 1114EXPORT_SYMBOL(generic_splice_sendpage);
a0f06780 1115
83f9135b
JA
1116/*
1117 * Attempt to initiate a splice from pipe to file.
1118 */
3a326a2c 1119static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 1120 loff_t *ppos, size_t len, unsigned int flags)
5274f052 1121{
0b0a47f5
MS
1122 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1123 loff_t *, size_t, unsigned int);
5274f052
JA
1124 int ret;
1125
49570e9b 1126 if (unlikely(!(out->f_mode & FMODE_WRITE)))
5274f052
JA
1127 return -EBADF;
1128
efc968d4
LT
1129 if (unlikely(out->f_flags & O_APPEND))
1130 return -EINVAL;
1131
cbb7e577 1132 ret = rw_verify_area(WRITE, out, ppos, len);
5274f052
JA
1133 if (unlikely(ret < 0))
1134 return ret;
1135
cc56f7de
CG
1136 if (out->f_op && out->f_op->splice_write)
1137 splice_write = out->f_op->splice_write;
1138 else
0b0a47f5
MS
1139 splice_write = default_file_splice_write;
1140
2dd8c9ad
AV
1141 file_start_write(out);
1142 ret = splice_write(pipe, out, ppos, len, flags);
1143 file_end_write(out);
1144 return ret;
5274f052
JA
1145}
1146
83f9135b
JA
1147/*
1148 * Attempt to initiate a splice from a file to a pipe.
1149 */
cbb7e577
JA
1150static long do_splice_to(struct file *in, loff_t *ppos,
1151 struct pipe_inode_info *pipe, size_t len,
1152 unsigned int flags)
5274f052 1153{
6818173b
MS
1154 ssize_t (*splice_read)(struct file *, loff_t *,
1155 struct pipe_inode_info *, size_t, unsigned int);
5274f052
JA
1156 int ret;
1157
49570e9b 1158 if (unlikely(!(in->f_mode & FMODE_READ)))
5274f052
JA
1159 return -EBADF;
1160
cbb7e577 1161 ret = rw_verify_area(READ, in, ppos, len);
5274f052
JA
1162 if (unlikely(ret < 0))
1163 return ret;
1164
cc56f7de
CG
1165 if (in->f_op && in->f_op->splice_read)
1166 splice_read = in->f_op->splice_read;
1167 else
6818173b
MS
1168 splice_read = default_file_splice_read;
1169
1170 return splice_read(in, ppos, pipe, len, flags);
5274f052
JA
1171}
1172
932cc6d4
JA
1173/**
1174 * splice_direct_to_actor - splices data directly between two non-pipes
1175 * @in: file to splice from
1176 * @sd: actor information on where to splice to
1177 * @actor: handles the data splicing
1178 *
1179 * Description:
1180 * This is a special case helper to splice directly between two
1181 * points, without requiring an explicit pipe. Internally an allocated
79685b8d 1182 * pipe is cached in the process, and reused during the lifetime of
932cc6d4
JA
1183 * that process.
1184 *
c66ab6fa
JA
1185 */
1186ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1187 splice_direct_actor *actor)
b92ce558
JA
1188{
1189 struct pipe_inode_info *pipe;
1190 long ret, bytes;
1191 umode_t i_mode;
c66ab6fa 1192 size_t len;
4c67196f 1193 int i, flags, more;
b92ce558
JA
1194
1195 /*
1196 * We require the input being a regular file, as we don't want to
1197 * randomly drop data for eg socket -> socket splicing. Use the
1198 * piped splicing for that!
1199 */
496ad9aa 1200 i_mode = file_inode(in)->i_mode;
b92ce558
JA
1201 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
1202 return -EINVAL;
1203
1204 /*
1205 * neither in nor out is a pipe, setup an internal pipe attached to
1206 * 'out' and transfer the wanted data from 'in' to 'out' through that
1207 */
1208 pipe = current->splice_pipe;
49570e9b 1209 if (unlikely(!pipe)) {
7bee130e 1210 pipe = alloc_pipe_info();
b92ce558
JA
1211 if (!pipe)
1212 return -ENOMEM;
1213
1214 /*
1215 * We don't have an immediate reader, but we'll read the stuff
00522fb4 1216 * out of the pipe right after the splice_to_pipe(). So set
b92ce558
JA
1217 * PIPE_READERS appropriately.
1218 */
1219 pipe->readers = 1;
1220
1221 current->splice_pipe = pipe;
1222 }
1223
1224 /*
73d62d83 1225 * Do the splice.
b92ce558
JA
1226 */
1227 ret = 0;
1228 bytes = 0;
c66ab6fa
JA
1229 len = sd->total_len;
1230 flags = sd->flags;
1231
1232 /*
1233 * Don't block on output, we have to drain the direct pipe.
1234 */
1235 sd->flags &= ~SPLICE_F_NONBLOCK;
4c67196f 1236 more = sd->flags & SPLICE_F_MORE;
b92ce558
JA
1237
1238 while (len) {
51a92c0f 1239 size_t read_len;
a82c53a0 1240 loff_t pos = sd->pos, prev_pos = pos;
b92ce558 1241
bcd4f3ac 1242 ret = do_splice_to(in, &pos, pipe, len, flags);
51a92c0f 1243 if (unlikely(ret <= 0))
b92ce558
JA
1244 goto out_release;
1245
1246 read_len = ret;
c66ab6fa 1247 sd->total_len = read_len;
b92ce558 1248
4c67196f
CL
1249 /*
1250 * If more data is pending, set SPLICE_F_MORE
1251 * If this is the last data and SPLICE_F_MORE was not set
1252 * initially, clears it.
1253 */
1254 if (read_len < len)
1255 sd->flags |= SPLICE_F_MORE;
1256 else if (!more)
1257 sd->flags &= ~SPLICE_F_MORE;
b92ce558
JA
1258 /*
1259 * NOTE: nonblocking mode only applies to the input. We
1260 * must not do the output in nonblocking mode as then we
1261 * could get stuck data in the internal pipe:
1262 */
c66ab6fa 1263 ret = actor(pipe, sd);
a82c53a0
TZ
1264 if (unlikely(ret <= 0)) {
1265 sd->pos = prev_pos;
b92ce558 1266 goto out_release;
a82c53a0 1267 }
b92ce558
JA
1268
1269 bytes += ret;
1270 len -= ret;
bcd4f3ac 1271 sd->pos = pos;
b92ce558 1272
a82c53a0
TZ
1273 if (ret < read_len) {
1274 sd->pos = prev_pos + ret;
51a92c0f 1275 goto out_release;
a82c53a0 1276 }
b92ce558
JA
1277 }
1278
9e97198d 1279done:
b92ce558 1280 pipe->nrbufs = pipe->curbuf = 0;
80848708 1281 file_accessed(in);
b92ce558
JA
1282 return bytes;
1283
1284out_release:
1285 /*
1286 * If we did an incomplete transfer we must release
1287 * the pipe buffers in question:
1288 */
35f3d14d 1289 for (i = 0; i < pipe->buffers; i++) {
b92ce558
JA
1290 struct pipe_buffer *buf = pipe->bufs + i;
1291
1292 if (buf->ops) {
1293 buf->ops->release(pipe, buf);
1294 buf->ops = NULL;
1295 }
1296 }
b92ce558 1297
9e97198d
JA
1298 if (!bytes)
1299 bytes = ret;
c66ab6fa 1300
9e97198d 1301 goto done;
c66ab6fa
JA
1302}
1303EXPORT_SYMBOL(splice_direct_to_actor);
1304
1305static int direct_splice_actor(struct pipe_inode_info *pipe,
1306 struct splice_desc *sd)
1307{
6a14b90b 1308 struct file *file = sd->u.file;
c66ab6fa 1309
7995bd28 1310 return do_splice_from(pipe, file, sd->opos, sd->total_len,
2cb4b05e 1311 sd->flags);
c66ab6fa
JA
1312}
1313
932cc6d4
JA
1314/**
1315 * do_splice_direct - splices data directly between two files
1316 * @in: file to splice from
1317 * @ppos: input file offset
1318 * @out: file to splice to
acdb37c3 1319 * @opos: output file offset
932cc6d4
JA
1320 * @len: number of bytes to splice
1321 * @flags: splice modifier flags
1322 *
1323 * Description:
1324 * For use by do_sendfile(). splice can easily emulate sendfile, but
1325 * doing it in the application would incur an extra system call
1326 * (splice in + splice out, as compared to just sendfile()). So this helper
1327 * can splice directly through a process-private pipe.
1328 *
1329 */
c66ab6fa 1330long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
7995bd28 1331 loff_t *opos, size_t len, unsigned int flags)
c66ab6fa
JA
1332{
1333 struct splice_desc sd = {
1334 .len = len,
1335 .total_len = len,
1336 .flags = flags,
1337 .pos = *ppos,
6a14b90b 1338 .u.file = out,
7995bd28 1339 .opos = opos,
c66ab6fa 1340 };
51a92c0f 1341 long ret;
c66ab6fa
JA
1342
1343 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
51a92c0f 1344 if (ret > 0)
a82c53a0 1345 *ppos = sd.pos;
51a92c0f 1346
c66ab6fa 1347 return ret;
b92ce558
JA
1348}
1349
7c77f0b3
MS
1350static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1351 struct pipe_inode_info *opipe,
1352 size_t len, unsigned int flags);
ddac0d39 1353
83f9135b
JA
1354/*
1355 * Determine where to splice to/from.
1356 */
529565dc
IM
1357static long do_splice(struct file *in, loff_t __user *off_in,
1358 struct file *out, loff_t __user *off_out,
1359 size_t len, unsigned int flags)
5274f052 1360{
7c77f0b3
MS
1361 struct pipe_inode_info *ipipe;
1362 struct pipe_inode_info *opipe;
7995bd28 1363 loff_t offset;
a4514ebd 1364 long ret;
5274f052 1365
71993e62
LT
1366 ipipe = get_pipe_info(in);
1367 opipe = get_pipe_info(out);
7c77f0b3
MS
1368
1369 if (ipipe && opipe) {
1370 if (off_in || off_out)
1371 return -ESPIPE;
1372
1373 if (!(in->f_mode & FMODE_READ))
1374 return -EBADF;
1375
1376 if (!(out->f_mode & FMODE_WRITE))
1377 return -EBADF;
1378
1379 /* Splicing to self would be fun, but... */
1380 if (ipipe == opipe)
1381 return -EINVAL;
1382
1383 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1384 }
1385
1386 if (ipipe) {
529565dc
IM
1387 if (off_in)
1388 return -ESPIPE;
b92ce558 1389 if (off_out) {
19c9a49b 1390 if (!(out->f_mode & FMODE_PWRITE))
b92ce558 1391 return -EINVAL;
cbb7e577 1392 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
b92ce558 1393 return -EFAULT;
7995bd28
AV
1394 } else {
1395 offset = out->f_pos;
1396 }
529565dc 1397
7995bd28 1398 ret = do_splice_from(ipipe, out, &offset, len, flags);
a4514ebd 1399
7995bd28
AV
1400 if (!off_out)
1401 out->f_pos = offset;
1402 else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
a4514ebd
JA
1403 ret = -EFAULT;
1404
1405 return ret;
529565dc 1406 }
5274f052 1407
7c77f0b3 1408 if (opipe) {
529565dc
IM
1409 if (off_out)
1410 return -ESPIPE;
b92ce558 1411 if (off_in) {
19c9a49b 1412 if (!(in->f_mode & FMODE_PREAD))
b92ce558 1413 return -EINVAL;
cbb7e577 1414 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
b92ce558 1415 return -EFAULT;
7995bd28
AV
1416 } else {
1417 offset = in->f_pos;
1418 }
529565dc 1419
7995bd28 1420 ret = do_splice_to(in, &offset, opipe, len, flags);
a4514ebd 1421
7995bd28
AV
1422 if (!off_in)
1423 in->f_pos = offset;
1424 else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
a4514ebd
JA
1425 ret = -EFAULT;
1426
1427 return ret;
529565dc 1428 }
5274f052
JA
1429
1430 return -EINVAL;
1431}
1432
912d35f8
JA
1433/*
1434 * Map an iov into an array of pages and offset/length tupples. With the
1435 * partial_page structure, we can map several non-contiguous ranges into
1436 * our ones pages[] map instead of splitting that operation into pieces.
1437 * Could easily be exported as a generic helper for other users, in which
1438 * case one would probably want to add a 'max_nr_pages' parameter as well.
1439 */
1440static int get_iovec_page_array(const struct iovec __user *iov,
1441 unsigned int nr_vecs, struct page **pages,
bd1a68b5 1442 struct partial_page *partial, bool aligned,
35f3d14d 1443 unsigned int pipe_buffers)
912d35f8
JA
1444{
1445 int buffers = 0, error = 0;
1446
912d35f8
JA
1447 while (nr_vecs) {
1448 unsigned long off, npages;
75723957 1449 struct iovec entry;
912d35f8
JA
1450 void __user *base;
1451 size_t len;
1452 int i;
1453
75723957 1454 error = -EFAULT;
bc40d73c 1455 if (copy_from_user(&entry, iov, sizeof(entry)))
912d35f8
JA
1456 break;
1457
75723957
LT
1458 base = entry.iov_base;
1459 len = entry.iov_len;
1460
912d35f8
JA
1461 /*
1462 * Sanity check this iovec. 0 read succeeds.
1463 */
75723957 1464 error = 0;
912d35f8
JA
1465 if (unlikely(!len))
1466 break;
1467 error = -EFAULT;
712a30e6 1468 if (!access_ok(VERIFY_READ, base, len))
912d35f8
JA
1469 break;
1470
1471 /*
1472 * Get this base offset and number of pages, then map
1473 * in the user pages.
1474 */
1475 off = (unsigned long) base & ~PAGE_MASK;
7afa6fd0
JA
1476
1477 /*
1478 * If asked for alignment, the offset must be zero and the
1479 * length a multiple of the PAGE_SIZE.
1480 */
1481 error = -EINVAL;
1482 if (aligned && (off || len & ~PAGE_MASK))
1483 break;
1484
912d35f8 1485 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
35f3d14d
JA
1486 if (npages > pipe_buffers - buffers)
1487 npages = pipe_buffers - buffers;
912d35f8 1488
bc40d73c
NP
1489 error = get_user_pages_fast((unsigned long)base, npages,
1490 0, &pages[buffers]);
912d35f8
JA
1491
1492 if (unlikely(error <= 0))
1493 break;
1494
1495 /*
1496 * Fill this contiguous range into the partial page map.
1497 */
1498 for (i = 0; i < error; i++) {
7591489a 1499 const int plen = min_t(size_t, len, PAGE_SIZE - off);
912d35f8
JA
1500
1501 partial[buffers].offset = off;
1502 partial[buffers].len = plen;
1503
1504 off = 0;
1505 len -= plen;
1506 buffers++;
1507 }
1508
1509 /*
1510 * We didn't complete this iov, stop here since it probably
1511 * means we have to move some of this into a pipe to
1512 * be able to continue.
1513 */
1514 if (len)
1515 break;
1516
1517 /*
1518 * Don't continue if we mapped fewer pages than we asked for,
1519 * or if we mapped the max number of pages that we have
1520 * room for.
1521 */
35f3d14d 1522 if (error < npages || buffers == pipe_buffers)
912d35f8
JA
1523 break;
1524
1525 nr_vecs--;
1526 iov++;
1527 }
1528
912d35f8
JA
1529 if (buffers)
1530 return buffers;
1531
1532 return error;
1533}
1534
6a14b90b
JA
1535static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1536 struct splice_desc *sd)
1537{
1538 char *src;
1539 int ret;
1540
6a14b90b
JA
1541 /*
1542 * See if we can use the atomic maps, by prefaulting in the
1543 * pages and doing an atomic copy
1544 */
1545 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1546 src = buf->ops->map(pipe, buf, 1);
1547 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1548 sd->len);
1549 buf->ops->unmap(pipe, buf, src);
1550 if (!ret) {
1551 ret = sd->len;
1552 goto out;
1553 }
1554 }
1555
1556 /*
1557 * No dice, use slow non-atomic map and copy
1558 */
1559 src = buf->ops->map(pipe, buf, 0);
1560
1561 ret = sd->len;
1562 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1563 ret = -EFAULT;
1564
6866bef4 1565 buf->ops->unmap(pipe, buf, src);
6a14b90b
JA
1566out:
1567 if (ret > 0)
1568 sd->u.userptr += ret;
6a14b90b
JA
1569 return ret;
1570}
1571
1572/*
1573 * For lack of a better implementation, implement vmsplice() to userspace
1574 * as a simple copy of the pipes pages to the user iov.
1575 */
1576static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1577 unsigned long nr_segs, unsigned int flags)
1578{
1579 struct pipe_inode_info *pipe;
1580 struct splice_desc sd;
1581 ssize_t size;
1582 int error;
1583 long ret;
1584
71993e62 1585 pipe = get_pipe_info(file);
6a14b90b
JA
1586 if (!pipe)
1587 return -EBADF;
1588
61e0d47c 1589 pipe_lock(pipe);
6a14b90b
JA
1590
1591 error = ret = 0;
1592 while (nr_segs) {
1593 void __user *base;
1594 size_t len;
1595
1596 /*
1597 * Get user address base and length for this iovec.
1598 */
1599 error = get_user(base, &iov->iov_base);
1600 if (unlikely(error))
1601 break;
1602 error = get_user(len, &iov->iov_len);
1603 if (unlikely(error))
1604 break;
1605
1606 /*
1607 * Sanity check this iovec. 0 read succeeds.
1608 */
1609 if (unlikely(!len))
1610 break;
1611 if (unlikely(!base)) {
1612 error = -EFAULT;
1613 break;
1614 }
1615
8811930d
JA
1616 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1617 error = -EFAULT;
1618 break;
1619 }
1620
6a14b90b
JA
1621 sd.len = 0;
1622 sd.total_len = len;
1623 sd.flags = flags;
1624 sd.u.userptr = base;
1625 sd.pos = 0;
1626
1627 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1628 if (size < 0) {
1629 if (!ret)
1630 ret = size;
1631
1632 break;
1633 }
1634
1635 ret += size;
1636
1637 if (size < len)
1638 break;
1639
1640 nr_segs--;
1641 iov++;
1642 }
1643
61e0d47c 1644 pipe_unlock(pipe);
6a14b90b
JA
1645
1646 if (!ret)
1647 ret = error;
1648
1649 return ret;
1650}
1651
912d35f8
JA
1652/*
1653 * vmsplice splices a user address range into a pipe. It can be thought of
1654 * as splice-from-memory, where the regular splice is splice-from-file (or
1655 * to file). In both cases the output is a pipe, naturally.
912d35f8 1656 */
6a14b90b
JA
1657static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1658 unsigned long nr_segs, unsigned int flags)
912d35f8 1659{
ddac0d39 1660 struct pipe_inode_info *pipe;
35f3d14d
JA
1661 struct page *pages[PIPE_DEF_BUFFERS];
1662 struct partial_page partial[PIPE_DEF_BUFFERS];
912d35f8
JA
1663 struct splice_pipe_desc spd = {
1664 .pages = pages,
1665 .partial = partial,
047fe360 1666 .nr_pages_max = PIPE_DEF_BUFFERS,
912d35f8
JA
1667 .flags = flags,
1668 .ops = &user_page_pipe_buf_ops,
bbdfc2f7 1669 .spd_release = spd_release_page,
912d35f8 1670 };
35f3d14d 1671 long ret;
912d35f8 1672
71993e62 1673 pipe = get_pipe_info(file);
ddac0d39 1674 if (!pipe)
912d35f8 1675 return -EBADF;
912d35f8 1676
35f3d14d
JA
1677 if (splice_grow_spd(pipe, &spd))
1678 return -ENOMEM;
1679
1680 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
bd1a68b5 1681 spd.partial, false,
047fe360 1682 spd.nr_pages_max);
912d35f8 1683 if (spd.nr_pages <= 0)
35f3d14d
JA
1684 ret = spd.nr_pages;
1685 else
1686 ret = splice_to_pipe(pipe, &spd);
912d35f8 1687
047fe360 1688 splice_shrink_spd(&spd);
35f3d14d 1689 return ret;
912d35f8
JA
1690}
1691
6a14b90b
JA
1692/*
1693 * Note that vmsplice only really supports true splicing _from_ user memory
1694 * to a pipe, not the other way around. Splicing from user memory is a simple
1695 * operation that can be supported without any funky alignment restrictions
1696 * or nasty vm tricks. We simply map in the user memory and fill them into
1697 * a pipe. The reverse isn't quite as easy, though. There are two possible
1698 * solutions for that:
1699 *
1700 * - memcpy() the data internally, at which point we might as well just
1701 * do a regular read() on the buffer anyway.
1702 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1703 * has restriction limitations on both ends of the pipe).
1704 *
1705 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1706 *
1707 */
836f92ad
HC
1708SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
1709 unsigned long, nr_segs, unsigned int, flags)
912d35f8 1710{
2903ff01 1711 struct fd f;
912d35f8 1712 long error;
912d35f8 1713
6a14b90b
JA
1714 if (unlikely(nr_segs > UIO_MAXIOV))
1715 return -EINVAL;
1716 else if (unlikely(!nr_segs))
1717 return 0;
1718
912d35f8 1719 error = -EBADF;
2903ff01
AV
1720 f = fdget(fd);
1721 if (f.file) {
1722 if (f.file->f_mode & FMODE_WRITE)
1723 error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
1724 else if (f.file->f_mode & FMODE_READ)
1725 error = vmsplice_to_user(f.file, iov, nr_segs, flags);
1726
1727 fdput(f);
912d35f8
JA
1728 }
1729
1730 return error;
1731}
1732
76b021d0
AV
1733#ifdef CONFIG_COMPAT
1734COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
1735 unsigned int, nr_segs, unsigned int, flags)
1736{
1737 unsigned i;
1738 struct iovec __user *iov;
1739 if (nr_segs > UIO_MAXIOV)
1740 return -EINVAL;
1741 iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
1742 for (i = 0; i < nr_segs; i++) {
1743 struct compat_iovec v;
1744 if (get_user(v.iov_base, &iov32[i].iov_base) ||
1745 get_user(v.iov_len, &iov32[i].iov_len) ||
1746 put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
1747 put_user(v.iov_len, &iov[i].iov_len))
1748 return -EFAULT;
1749 }
1750 return sys_vmsplice(fd, iov, nr_segs, flags);
1751}
1752#endif
1753
836f92ad
HC
1754SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1755 int, fd_out, loff_t __user *, off_out,
1756 size_t, len, unsigned int, flags)
5274f052 1757{
2903ff01 1758 struct fd in, out;
5274f052 1759 long error;
5274f052
JA
1760
1761 if (unlikely(!len))
1762 return 0;
1763
1764 error = -EBADF;
2903ff01
AV
1765 in = fdget(fd_in);
1766 if (in.file) {
1767 if (in.file->f_mode & FMODE_READ) {
1768 out = fdget(fd_out);
1769 if (out.file) {
1770 if (out.file->f_mode & FMODE_WRITE)
1771 error = do_splice(in.file, off_in,
1772 out.file, off_out,
529565dc 1773 len, flags);
2903ff01 1774 fdput(out);
5274f052
JA
1775 }
1776 }
2903ff01 1777 fdput(in);
5274f052 1778 }
5274f052
JA
1779 return error;
1780}
70524490 1781
aadd06e5
JA
1782/*
1783 * Make sure there's data to read. Wait for input if we can, otherwise
1784 * return an appropriate error.
1785 */
7c77f0b3 1786static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
aadd06e5
JA
1787{
1788 int ret;
1789
1790 /*
1791 * Check ->nrbufs without the inode lock first. This function
1792 * is speculative anyways, so missing one is ok.
1793 */
1794 if (pipe->nrbufs)
1795 return 0;
1796
1797 ret = 0;
61e0d47c 1798 pipe_lock(pipe);
aadd06e5
JA
1799
1800 while (!pipe->nrbufs) {
1801 if (signal_pending(current)) {
1802 ret = -ERESTARTSYS;
1803 break;
1804 }
1805 if (!pipe->writers)
1806 break;
1807 if (!pipe->waiting_writers) {
1808 if (flags & SPLICE_F_NONBLOCK) {
1809 ret = -EAGAIN;
1810 break;
1811 }
1812 }
1813 pipe_wait(pipe);
1814 }
1815
61e0d47c 1816 pipe_unlock(pipe);
aadd06e5
JA
1817 return ret;
1818}
1819
1820/*
1821 * Make sure there's writeable room. Wait for room if we can, otherwise
1822 * return an appropriate error.
1823 */
7c77f0b3 1824static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
aadd06e5
JA
1825{
1826 int ret;
1827
1828 /*
1829 * Check ->nrbufs without the inode lock first. This function
1830 * is speculative anyways, so missing one is ok.
1831 */
35f3d14d 1832 if (pipe->nrbufs < pipe->buffers)
aadd06e5
JA
1833 return 0;
1834
1835 ret = 0;
61e0d47c 1836 pipe_lock(pipe);
aadd06e5 1837
35f3d14d 1838 while (pipe->nrbufs >= pipe->buffers) {
aadd06e5
JA
1839 if (!pipe->readers) {
1840 send_sig(SIGPIPE, current, 0);
1841 ret = -EPIPE;
1842 break;
1843 }
1844 if (flags & SPLICE_F_NONBLOCK) {
1845 ret = -EAGAIN;
1846 break;
1847 }
1848 if (signal_pending(current)) {
1849 ret = -ERESTARTSYS;
1850 break;
1851 }
1852 pipe->waiting_writers++;
1853 pipe_wait(pipe);
1854 pipe->waiting_writers--;
1855 }
1856
61e0d47c 1857 pipe_unlock(pipe);
aadd06e5
JA
1858 return ret;
1859}
1860
7c77f0b3
MS
1861/*
1862 * Splice contents of ipipe to opipe.
1863 */
1864static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1865 struct pipe_inode_info *opipe,
1866 size_t len, unsigned int flags)
1867{
1868 struct pipe_buffer *ibuf, *obuf;
1869 int ret = 0, nbuf;
1870 bool input_wakeup = false;
1871
1872
1873retry:
1874 ret = ipipe_prep(ipipe, flags);
1875 if (ret)
1876 return ret;
1877
1878 ret = opipe_prep(opipe, flags);
1879 if (ret)
1880 return ret;
1881
1882 /*
1883 * Potential ABBA deadlock, work around it by ordering lock
1884 * grabbing by pipe info address. Otherwise two different processes
1885 * could deadlock (one doing tee from A -> B, the other from B -> A).
1886 */
1887 pipe_double_lock(ipipe, opipe);
1888
1889 do {
1890 if (!opipe->readers) {
1891 send_sig(SIGPIPE, current, 0);
1892 if (!ret)
1893 ret = -EPIPE;
1894 break;
1895 }
1896
1897 if (!ipipe->nrbufs && !ipipe->writers)
1898 break;
1899
1900 /*
1901 * Cannot make any progress, because either the input
1902 * pipe is empty or the output pipe is full.
1903 */
35f3d14d 1904 if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
7c77f0b3
MS
1905 /* Already processed some buffers, break */
1906 if (ret)
1907 break;
1908
1909 if (flags & SPLICE_F_NONBLOCK) {
1910 ret = -EAGAIN;
1911 break;
1912 }
1913
1914 /*
1915 * We raced with another reader/writer and haven't
1916 * managed to process any buffers. A zero return
1917 * value means EOF, so retry instead.
1918 */
1919 pipe_unlock(ipipe);
1920 pipe_unlock(opipe);
1921 goto retry;
1922 }
1923
1924 ibuf = ipipe->bufs + ipipe->curbuf;
35f3d14d 1925 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
7c77f0b3
MS
1926 obuf = opipe->bufs + nbuf;
1927
1928 if (len >= ibuf->len) {
1929 /*
1930 * Simply move the whole buffer from ipipe to opipe
1931 */
1932 *obuf = *ibuf;
1933 ibuf->ops = NULL;
1934 opipe->nrbufs++;
35f3d14d 1935 ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
7c77f0b3
MS
1936 ipipe->nrbufs--;
1937 input_wakeup = true;
1938 } else {
1939 /*
1940 * Get a reference to this pipe buffer,
1941 * so we can copy the contents over.
1942 */
1943 ibuf->ops->get(ipipe, ibuf);
1944 *obuf = *ibuf;
1945
1946 /*
1947 * Don't inherit the gift flag, we need to
1948 * prevent multiple steals of this page.
1949 */
1950 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1951
1952 obuf->len = len;
1953 opipe->nrbufs++;
1954 ibuf->offset += obuf->len;
1955 ibuf->len -= obuf->len;
1956 }
1957 ret += obuf->len;
1958 len -= obuf->len;
1959 } while (len);
1960
1961 pipe_unlock(ipipe);
1962 pipe_unlock(opipe);
1963
1964 /*
1965 * If we put data in the output pipe, wakeup any potential readers.
1966 */
825cdcb1
NK
1967 if (ret > 0)
1968 wakeup_pipe_readers(opipe);
1969
7c77f0b3
MS
1970 if (input_wakeup)
1971 wakeup_pipe_writers(ipipe);
1972
1973 return ret;
1974}
1975
70524490
JA
1976/*
1977 * Link contents of ipipe to opipe.
1978 */
1979static int link_pipe(struct pipe_inode_info *ipipe,
1980 struct pipe_inode_info *opipe,
1981 size_t len, unsigned int flags)
1982{
1983 struct pipe_buffer *ibuf, *obuf;
aadd06e5 1984 int ret = 0, i = 0, nbuf;
70524490
JA
1985
1986 /*
1987 * Potential ABBA deadlock, work around it by ordering lock
61e0d47c 1988 * grabbing by pipe info address. Otherwise two different processes
70524490
JA
1989 * could deadlock (one doing tee from A -> B, the other from B -> A).
1990 */
61e0d47c 1991 pipe_double_lock(ipipe, opipe);
70524490 1992
aadd06e5 1993 do {
70524490
JA
1994 if (!opipe->readers) {
1995 send_sig(SIGPIPE, current, 0);
1996 if (!ret)
1997 ret = -EPIPE;
1998 break;
1999 }
70524490 2000
aadd06e5
JA
2001 /*
2002 * If we have iterated all input buffers or ran out of
2003 * output room, break.
2004 */
35f3d14d 2005 if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
aadd06e5 2006 break;
70524490 2007
35f3d14d
JA
2008 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
2009 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
70524490
JA
2010
2011 /*
aadd06e5
JA
2012 * Get a reference to this pipe buffer,
2013 * so we can copy the contents over.
70524490 2014 */
aadd06e5
JA
2015 ibuf->ops->get(ipipe, ibuf);
2016
2017 obuf = opipe->bufs + nbuf;
2018 *obuf = *ibuf;
2019
2a27250e 2020 /*
aadd06e5
JA
2021 * Don't inherit the gift flag, we need to
2022 * prevent multiple steals of this page.
2a27250e 2023 */
aadd06e5 2024 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
70524490 2025
aadd06e5
JA
2026 if (obuf->len > len)
2027 obuf->len = len;
70524490 2028
aadd06e5
JA
2029 opipe->nrbufs++;
2030 ret += obuf->len;
2031 len -= obuf->len;
2032 i++;
2033 } while (len);
70524490 2034
02cf01ae
JA
2035 /*
2036 * return EAGAIN if we have the potential of some data in the
2037 * future, otherwise just return 0
2038 */
2039 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
2040 ret = -EAGAIN;
2041
61e0d47c
MS
2042 pipe_unlock(ipipe);
2043 pipe_unlock(opipe);
70524490 2044
aadd06e5
JA
2045 /*
2046 * If we put data in the output pipe, wakeup any potential readers.
2047 */
825cdcb1
NK
2048 if (ret > 0)
2049 wakeup_pipe_readers(opipe);
70524490
JA
2050
2051 return ret;
2052}
2053
2054/*
2055 * This is a tee(1) implementation that works on pipes. It doesn't copy
2056 * any data, it simply references the 'in' pages on the 'out' pipe.
2057 * The 'flags' used are the SPLICE_F_* variants, currently the only
2058 * applicable one is SPLICE_F_NONBLOCK.
2059 */
2060static long do_tee(struct file *in, struct file *out, size_t len,
2061 unsigned int flags)
2062{
71993e62
LT
2063 struct pipe_inode_info *ipipe = get_pipe_info(in);
2064 struct pipe_inode_info *opipe = get_pipe_info(out);
aadd06e5 2065 int ret = -EINVAL;
70524490
JA
2066
2067 /*
aadd06e5
JA
2068 * Duplicate the contents of ipipe to opipe without actually
2069 * copying the data.
70524490 2070 */
aadd06e5
JA
2071 if (ipipe && opipe && ipipe != opipe) {
2072 /*
2073 * Keep going, unless we encounter an error. The ipipe/opipe
2074 * ordering doesn't really matter.
2075 */
7c77f0b3 2076 ret = ipipe_prep(ipipe, flags);
aadd06e5 2077 if (!ret) {
7c77f0b3 2078 ret = opipe_prep(opipe, flags);
02cf01ae 2079 if (!ret)
aadd06e5 2080 ret = link_pipe(ipipe, opipe, len, flags);
aadd06e5
JA
2081 }
2082 }
70524490 2083
aadd06e5 2084 return ret;
70524490
JA
2085}
2086
836f92ad 2087SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
70524490 2088{
2903ff01
AV
2089 struct fd in;
2090 int error;
70524490
JA
2091
2092 if (unlikely(!len))
2093 return 0;
2094
2095 error = -EBADF;
2903ff01
AV
2096 in = fdget(fdin);
2097 if (in.file) {
2098 if (in.file->f_mode & FMODE_READ) {
2099 struct fd out = fdget(fdout);
2100 if (out.file) {
2101 if (out.file->f_mode & FMODE_WRITE)
2102 error = do_tee(in.file, out.file,
2103 len, flags);
2104 fdput(out);
70524490
JA
2105 }
2106 }
2903ff01 2107 fdput(in);
70524490
JA
2108 }
2109
2110 return error;
2111}