f2fs: declare static function for __build_free_nids
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34 }
35
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57 }
58
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
60 size_t wanted = n; \
61 __p = i->bvec; \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
66 (void)(STEP); \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
69 } \
70 while (unlikely(n)) { \
71 __p++; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
74 continue; \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
77 (void)(STEP); \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
80 } \
81 n = wanted; \
82 }
83
84 #define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
88 struct bio_vec v; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
92 struct kvec v; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
94 } else { \
95 const struct iovec *iov; \
96 struct iovec v; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
98 } \
99 }
100
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec; \
105 struct bio_vec v; \
106 iterate_bvec(i, n, v, bvec, skip, (B)) \
107 if (skip == bvec->bv_len) { \
108 bvec++; \
109 skip = 0; \
110 } \
111 i->nr_segs -= bvec - i->bvec; \
112 i->bvec = bvec; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
115 struct kvec v; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
118 kvec++; \
119 skip = 0; \
120 } \
121 i->nr_segs -= kvec - i->kvec; \
122 i->kvec = kvec; \
123 } else { \
124 const struct iovec *iov; \
125 struct iovec v; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
128 iov++; \
129 skip = 0; \
130 } \
131 i->nr_segs -= iov - i->iov; \
132 i->iov = iov; \
133 } \
134 i->count -= n; \
135 i->iov_offset = skip; \
136 }
137
138 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139 struct iov_iter *i)
140 {
141 size_t skip, copy, left, wanted;
142 const struct iovec *iov;
143 char __user *buf;
144 void *kaddr, *from;
145
146 if (unlikely(bytes > i->count))
147 bytes = i->count;
148
149 if (unlikely(!bytes))
150 return 0;
151
152 wanted = bytes;
153 iov = i->iov;
154 skip = i->iov_offset;
155 buf = iov->iov_base + skip;
156 copy = min(bytes, iov->iov_len - skip);
157
158 if (!fault_in_pages_writeable(buf, copy)) {
159 kaddr = kmap_atomic(page);
160 from = kaddr + offset;
161
162 /* first chunk, usually the only one */
163 left = __copy_to_user_inatomic(buf, from, copy);
164 copy -= left;
165 skip += copy;
166 from += copy;
167 bytes -= copy;
168
169 while (unlikely(!left && bytes)) {
170 iov++;
171 buf = iov->iov_base;
172 copy = min(bytes, iov->iov_len);
173 left = __copy_to_user_inatomic(buf, from, copy);
174 copy -= left;
175 skip = copy;
176 from += copy;
177 bytes -= copy;
178 }
179 if (likely(!bytes)) {
180 kunmap_atomic(kaddr);
181 goto done;
182 }
183 offset = from - kaddr;
184 buf += copy;
185 kunmap_atomic(kaddr);
186 copy = min(bytes, iov->iov_len - skip);
187 }
188 /* Too bad - revert to non-atomic kmap */
189 kaddr = kmap(page);
190 from = kaddr + offset;
191 left = __copy_to_user(buf, from, copy);
192 copy -= left;
193 skip += copy;
194 from += copy;
195 bytes -= copy;
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
200 left = __copy_to_user(buf, from, copy);
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
205 }
206 kunmap(page);
207 done:
208 if (skip == iov->iov_len) {
209 iov++;
210 skip = 0;
211 }
212 i->count -= wanted - bytes;
213 i->nr_segs -= iov - i->iov;
214 i->iov = iov;
215 i->iov_offset = skip;
216 return wanted - bytes;
217 }
218
219 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220 struct iov_iter *i)
221 {
222 size_t skip, copy, left, wanted;
223 const struct iovec *iov;
224 char __user *buf;
225 void *kaddr, *to;
226
227 if (unlikely(bytes > i->count))
228 bytes = i->count;
229
230 if (unlikely(!bytes))
231 return 0;
232
233 wanted = bytes;
234 iov = i->iov;
235 skip = i->iov_offset;
236 buf = iov->iov_base + skip;
237 copy = min(bytes, iov->iov_len - skip);
238
239 if (!fault_in_pages_readable(buf, copy)) {
240 kaddr = kmap_atomic(page);
241 to = kaddr + offset;
242
243 /* first chunk, usually the only one */
244 left = __copy_from_user_inatomic(to, buf, copy);
245 copy -= left;
246 skip += copy;
247 to += copy;
248 bytes -= copy;
249
250 while (unlikely(!left && bytes)) {
251 iov++;
252 buf = iov->iov_base;
253 copy = min(bytes, iov->iov_len);
254 left = __copy_from_user_inatomic(to, buf, copy);
255 copy -= left;
256 skip = copy;
257 to += copy;
258 bytes -= copy;
259 }
260 if (likely(!bytes)) {
261 kunmap_atomic(kaddr);
262 goto done;
263 }
264 offset = to - kaddr;
265 buf += copy;
266 kunmap_atomic(kaddr);
267 copy = min(bytes, iov->iov_len - skip);
268 }
269 /* Too bad - revert to non-atomic kmap */
270 kaddr = kmap(page);
271 to = kaddr + offset;
272 left = __copy_from_user(to, buf, copy);
273 copy -= left;
274 skip += copy;
275 to += copy;
276 bytes -= copy;
277 while (unlikely(!left && bytes)) {
278 iov++;
279 buf = iov->iov_base;
280 copy = min(bytes, iov->iov_len);
281 left = __copy_from_user(to, buf, copy);
282 copy -= left;
283 skip = copy;
284 to += copy;
285 bytes -= copy;
286 }
287 kunmap(page);
288 done:
289 if (skip == iov->iov_len) {
290 iov++;
291 skip = 0;
292 }
293 i->count -= wanted - bytes;
294 i->nr_segs -= iov - i->iov;
295 i->iov = iov;
296 i->iov_offset = skip;
297 return wanted - bytes;
298 }
299
300 /*
301 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
302 * bytes. For each iovec, fault in each page that constitutes the iovec.
303 *
304 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
305 * because it is an invalid address).
306 */
307 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
308 {
309 size_t skip = i->iov_offset;
310 const struct iovec *iov;
311 int err;
312 struct iovec v;
313
314 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
315 iterate_iovec(i, bytes, v, iov, skip, ({
316 err = fault_in_multipages_readable(v.iov_base,
317 v.iov_len);
318 if (unlikely(err))
319 return err;
320 0;}))
321 }
322 return 0;
323 }
324 EXPORT_SYMBOL(iov_iter_fault_in_readable);
325
326 void iov_iter_init(struct iov_iter *i, int direction,
327 const struct iovec *iov, unsigned long nr_segs,
328 size_t count)
329 {
330 /* It will get better. Eventually... */
331 if (segment_eq(get_fs(), KERNEL_DS)) {
332 direction |= ITER_KVEC;
333 i->type = direction;
334 i->kvec = (struct kvec *)iov;
335 } else {
336 i->type = direction;
337 i->iov = iov;
338 }
339 i->nr_segs = nr_segs;
340 i->iov_offset = 0;
341 i->count = count;
342 }
343 EXPORT_SYMBOL(iov_iter_init);
344
345 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
346 {
347 char *from = kmap_atomic(page);
348 memcpy(to, from + offset, len);
349 kunmap_atomic(from);
350 }
351
352 static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
353 {
354 char *to = kmap_atomic(page);
355 memcpy(to + offset, from, len);
356 kunmap_atomic(to);
357 }
358
359 static void memzero_page(struct page *page, size_t offset, size_t len)
360 {
361 char *addr = kmap_atomic(page);
362 memset(addr + offset, 0, len);
363 kunmap_atomic(addr);
364 }
365
366 size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
367 {
368 char *from = addr;
369 if (unlikely(bytes > i->count))
370 bytes = i->count;
371
372 if (unlikely(!bytes))
373 return 0;
374
375 iterate_and_advance(i, bytes, v,
376 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
377 v.iov_len),
378 memcpy_to_page(v.bv_page, v.bv_offset,
379 (from += v.bv_len) - v.bv_len, v.bv_len),
380 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
381 )
382
383 return bytes;
384 }
385 EXPORT_SYMBOL(copy_to_iter);
386
387 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
388 {
389 char *to = addr;
390 if (unlikely(bytes > i->count))
391 bytes = i->count;
392
393 if (unlikely(!bytes))
394 return 0;
395
396 iterate_and_advance(i, bytes, v,
397 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
398 v.iov_len),
399 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
400 v.bv_offset, v.bv_len),
401 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
402 )
403
404 return bytes;
405 }
406 EXPORT_SYMBOL(copy_from_iter);
407
408 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
409 {
410 char *to = addr;
411 if (unlikely(bytes > i->count))
412 bytes = i->count;
413
414 if (unlikely(!bytes))
415 return 0;
416
417 iterate_and_advance(i, bytes, v,
418 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
419 v.iov_base, v.iov_len),
420 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
421 v.bv_offset, v.bv_len),
422 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
423 )
424
425 return bytes;
426 }
427 EXPORT_SYMBOL(copy_from_iter_nocache);
428
429 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
430 struct iov_iter *i)
431 {
432 if (i->type & (ITER_BVEC|ITER_KVEC)) {
433 void *kaddr = kmap_atomic(page);
434 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
435 kunmap_atomic(kaddr);
436 return wanted;
437 } else
438 return copy_page_to_iter_iovec(page, offset, bytes, i);
439 }
440 EXPORT_SYMBOL(copy_page_to_iter);
441
442 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
443 struct iov_iter *i)
444 {
445 if (i->type & (ITER_BVEC|ITER_KVEC)) {
446 void *kaddr = kmap_atomic(page);
447 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
448 kunmap_atomic(kaddr);
449 return wanted;
450 } else
451 return copy_page_from_iter_iovec(page, offset, bytes, i);
452 }
453 EXPORT_SYMBOL(copy_page_from_iter);
454
455 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
456 {
457 if (unlikely(bytes > i->count))
458 bytes = i->count;
459
460 if (unlikely(!bytes))
461 return 0;
462
463 iterate_and_advance(i, bytes, v,
464 __clear_user(v.iov_base, v.iov_len),
465 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
466 memset(v.iov_base, 0, v.iov_len)
467 )
468
469 return bytes;
470 }
471 EXPORT_SYMBOL(iov_iter_zero);
472
473 size_t iov_iter_copy_from_user_atomic(struct page *page,
474 struct iov_iter *i, unsigned long offset, size_t bytes)
475 {
476 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
477 iterate_all_kinds(i, bytes, v,
478 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
479 v.iov_base, v.iov_len),
480 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
481 v.bv_offset, v.bv_len),
482 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
483 )
484 kunmap_atomic(kaddr);
485 return bytes;
486 }
487 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
488
489 void iov_iter_advance(struct iov_iter *i, size_t size)
490 {
491 iterate_and_advance(i, size, v, 0, 0, 0)
492 }
493 EXPORT_SYMBOL(iov_iter_advance);
494
495 /*
496 * Return the count of just the current iov_iter segment.
497 */
498 size_t iov_iter_single_seg_count(const struct iov_iter *i)
499 {
500 if (i->nr_segs == 1)
501 return i->count;
502 else if (i->type & ITER_BVEC)
503 return min(i->count, i->bvec->bv_len - i->iov_offset);
504 else
505 return min(i->count, i->iov->iov_len - i->iov_offset);
506 }
507 EXPORT_SYMBOL(iov_iter_single_seg_count);
508
509 void iov_iter_kvec(struct iov_iter *i, int direction,
510 const struct kvec *kvec, unsigned long nr_segs,
511 size_t count)
512 {
513 BUG_ON(!(direction & ITER_KVEC));
514 i->type = direction;
515 i->kvec = kvec;
516 i->nr_segs = nr_segs;
517 i->iov_offset = 0;
518 i->count = count;
519 }
520 EXPORT_SYMBOL(iov_iter_kvec);
521
522 void iov_iter_bvec(struct iov_iter *i, int direction,
523 const struct bio_vec *bvec, unsigned long nr_segs,
524 size_t count)
525 {
526 BUG_ON(!(direction & ITER_BVEC));
527 i->type = direction;
528 i->bvec = bvec;
529 i->nr_segs = nr_segs;
530 i->iov_offset = 0;
531 i->count = count;
532 }
533 EXPORT_SYMBOL(iov_iter_bvec);
534
535 unsigned long iov_iter_alignment(const struct iov_iter *i)
536 {
537 unsigned long res = 0;
538 size_t size = i->count;
539
540 if (!size)
541 return 0;
542
543 iterate_all_kinds(i, size, v,
544 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
545 res |= v.bv_offset | v.bv_len,
546 res |= (unsigned long)v.iov_base | v.iov_len
547 )
548 return res;
549 }
550 EXPORT_SYMBOL(iov_iter_alignment);
551
552 ssize_t iov_iter_get_pages(struct iov_iter *i,
553 struct page **pages, size_t maxsize, unsigned maxpages,
554 size_t *start)
555 {
556 if (maxsize > i->count)
557 maxsize = i->count;
558
559 if (!maxsize)
560 return 0;
561
562 iterate_all_kinds(i, maxsize, v, ({
563 unsigned long addr = (unsigned long)v.iov_base;
564 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
565 int n;
566 int res;
567
568 if (len > maxpages * PAGE_SIZE)
569 len = maxpages * PAGE_SIZE;
570 addr &= ~(PAGE_SIZE - 1);
571 n = DIV_ROUND_UP(len, PAGE_SIZE);
572 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
573 if (unlikely(res < 0))
574 return res;
575 return (res == n ? len : res * PAGE_SIZE) - *start;
576 0;}),({
577 /* can't be more than PAGE_SIZE */
578 *start = v.bv_offset;
579 get_page(*pages = v.bv_page);
580 return v.bv_len;
581 }),({
582 return -EFAULT;
583 })
584 )
585 return 0;
586 }
587 EXPORT_SYMBOL(iov_iter_get_pages);
588
589 static struct page **get_pages_array(size_t n)
590 {
591 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
592 if (!p)
593 p = vmalloc(n * sizeof(struct page *));
594 return p;
595 }
596
597 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
598 struct page ***pages, size_t maxsize,
599 size_t *start)
600 {
601 struct page **p;
602
603 if (maxsize > i->count)
604 maxsize = i->count;
605
606 if (!maxsize)
607 return 0;
608
609 iterate_all_kinds(i, maxsize, v, ({
610 unsigned long addr = (unsigned long)v.iov_base;
611 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
612 int n;
613 int res;
614
615 addr &= ~(PAGE_SIZE - 1);
616 n = DIV_ROUND_UP(len, PAGE_SIZE);
617 p = get_pages_array(n);
618 if (!p)
619 return -ENOMEM;
620 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
621 if (unlikely(res < 0)) {
622 kvfree(p);
623 return res;
624 }
625 *pages = p;
626 return (res == n ? len : res * PAGE_SIZE) - *start;
627 0;}),({
628 /* can't be more than PAGE_SIZE */
629 *start = v.bv_offset;
630 *pages = p = get_pages_array(1);
631 if (!p)
632 return -ENOMEM;
633 get_page(*p = v.bv_page);
634 return v.bv_len;
635 }),({
636 return -EFAULT;
637 })
638 )
639 return 0;
640 }
641 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
642
643 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
644 struct iov_iter *i)
645 {
646 char *to = addr;
647 __wsum sum, next;
648 size_t off = 0;
649 if (unlikely(bytes > i->count))
650 bytes = i->count;
651
652 if (unlikely(!bytes))
653 return 0;
654
655 sum = *csum;
656 iterate_and_advance(i, bytes, v, ({
657 int err = 0;
658 next = csum_and_copy_from_user(v.iov_base,
659 (to += v.iov_len) - v.iov_len,
660 v.iov_len, 0, &err);
661 if (!err) {
662 sum = csum_block_add(sum, next, off);
663 off += v.iov_len;
664 }
665 err ? v.iov_len : 0;
666 }), ({
667 char *p = kmap_atomic(v.bv_page);
668 next = csum_partial_copy_nocheck(p + v.bv_offset,
669 (to += v.bv_len) - v.bv_len,
670 v.bv_len, 0);
671 kunmap_atomic(p);
672 sum = csum_block_add(sum, next, off);
673 off += v.bv_len;
674 }),({
675 next = csum_partial_copy_nocheck(v.iov_base,
676 (to += v.iov_len) - v.iov_len,
677 v.iov_len, 0);
678 sum = csum_block_add(sum, next, off);
679 off += v.iov_len;
680 })
681 )
682 *csum = sum;
683 return bytes;
684 }
685 EXPORT_SYMBOL(csum_and_copy_from_iter);
686
687 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
688 struct iov_iter *i)
689 {
690 char *from = addr;
691 __wsum sum, next;
692 size_t off = 0;
693 if (unlikely(bytes > i->count))
694 bytes = i->count;
695
696 if (unlikely(!bytes))
697 return 0;
698
699 sum = *csum;
700 iterate_and_advance(i, bytes, v, ({
701 int err = 0;
702 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
703 v.iov_base,
704 v.iov_len, 0, &err);
705 if (!err) {
706 sum = csum_block_add(sum, next, off);
707 off += v.iov_len;
708 }
709 err ? v.iov_len : 0;
710 }), ({
711 char *p = kmap_atomic(v.bv_page);
712 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
713 p + v.bv_offset,
714 v.bv_len, 0);
715 kunmap_atomic(p);
716 sum = csum_block_add(sum, next, off);
717 off += v.bv_len;
718 }),({
719 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
720 v.iov_base,
721 v.iov_len, 0);
722 sum = csum_block_add(sum, next, off);
723 off += v.iov_len;
724 })
725 )
726 *csum = sum;
727 return bytes;
728 }
729 EXPORT_SYMBOL(csum_and_copy_to_iter);
730
731 int iov_iter_npages(const struct iov_iter *i, int maxpages)
732 {
733 size_t size = i->count;
734 int npages = 0;
735
736 if (!size)
737 return 0;
738
739 iterate_all_kinds(i, size, v, ({
740 unsigned long p = (unsigned long)v.iov_base;
741 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
742 - p / PAGE_SIZE;
743 if (npages >= maxpages)
744 return maxpages;
745 0;}),({
746 npages++;
747 if (npages >= maxpages)
748 return maxpages;
749 }),({
750 unsigned long p = (unsigned long)v.iov_base;
751 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
752 - p / PAGE_SIZE;
753 if (npages >= maxpages)
754 return maxpages;
755 })
756 )
757 return npages;
758 }
759 EXPORT_SYMBOL(iov_iter_npages);
760
761 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
762 {
763 *new = *old;
764 if (new->type & ITER_BVEC)
765 return new->bvec = kmemdup(new->bvec,
766 new->nr_segs * sizeof(struct bio_vec),
767 flags);
768 else
769 /* iovec and kvec have identical layout */
770 return new->iov = kmemdup(new->iov,
771 new->nr_segs * sizeof(struct iovec),
772 flags);
773 }
774 EXPORT_SYMBOL(dup_iter);
775
776 int import_iovec(int type, const struct iovec __user * uvector,
777 unsigned nr_segs, unsigned fast_segs,
778 struct iovec **iov, struct iov_iter *i)
779 {
780 ssize_t n;
781 struct iovec *p;
782 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
783 *iov, &p);
784 if (n < 0) {
785 if (p != *iov)
786 kfree(p);
787 *iov = NULL;
788 return n;
789 }
790 iov_iter_init(i, type, p, nr_segs, n);
791 *iov = p == *iov ? NULL : p;
792 return 0;
793 }
794 EXPORT_SYMBOL(import_iovec);
795
796 #ifdef CONFIG_COMPAT
797 #include <linux/compat.h>
798
799 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
800 unsigned nr_segs, unsigned fast_segs,
801 struct iovec **iov, struct iov_iter *i)
802 {
803 ssize_t n;
804 struct iovec *p;
805 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
806 *iov, &p);
807 if (n < 0) {
808 if (p != *iov)
809 kfree(p);
810 *iov = NULL;
811 return n;
812 }
813 iov_iter_init(i, type, p, nr_segs, n);
814 *iov = p == *iov ? NULL : p;
815 return 0;
816 }
817 #endif
818
819 int import_single_range(int rw, void __user *buf, size_t len,
820 struct iovec *iov, struct iov_iter *i)
821 {
822 if (len > MAX_RW_COUNT)
823 len = MAX_RW_COUNT;
824 if (unlikely(!access_ok(!rw, buf, len)))
825 return -EFAULT;
826
827 iov->iov_base = buf;
828 iov->iov_len = len;
829 iov_iter_init(i, rw, iov, 1, len);
830 return 0;
831 }