1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
22 while (unlikely(!left && n)) { \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
27 __v.iov_base = __p->iov_base; \
29 __v.iov_len -= left; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
43 skip += __v.iov_len; \
46 while (unlikely(n)) { \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
51 __v.iov_base = __p->iov_base; \
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
71 #define iterate_all_kinds(i, n, v, I, B, K) { \
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
82 const struct iovec *iov; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
88 #define iterate_and_advance(i, n, v, I, B, K) { \
89 if (unlikely(i->count < n)) \
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
94 const struct bio_vec *bvec = i->bvec; \
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
109 i->nr_segs -= kvec - i->kvec; \
112 const struct iovec *iov; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
119 i->nr_segs -= iov - i->iov; \
123 i->iov_offset = skip; \
127 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
130 size_t skip
, copy
, left
, wanted
;
131 const struct iovec
*iov
;
135 if (unlikely(bytes
> i
->count
))
138 if (unlikely(!bytes
))
143 skip
= i
->iov_offset
;
144 buf
= iov
->iov_base
+ skip
;
145 copy
= min(bytes
, iov
->iov_len
- skip
);
147 if (!fault_in_pages_writeable(buf
, copy
)) {
148 kaddr
= kmap_atomic(page
);
149 from
= kaddr
+ offset
;
151 /* first chunk, usually the only one */
152 left
= __copy_to_user_inatomic(buf
, from
, copy
);
158 while (unlikely(!left
&& bytes
)) {
161 copy
= min(bytes
, iov
->iov_len
);
162 left
= __copy_to_user_inatomic(buf
, from
, copy
);
168 if (likely(!bytes
)) {
169 kunmap_atomic(kaddr
);
172 offset
= from
- kaddr
;
174 kunmap_atomic(kaddr
);
175 copy
= min(bytes
, iov
->iov_len
- skip
);
177 /* Too bad - revert to non-atomic kmap */
179 from
= kaddr
+ offset
;
180 left
= __copy_to_user(buf
, from
, copy
);
185 while (unlikely(!left
&& bytes
)) {
188 copy
= min(bytes
, iov
->iov_len
);
189 left
= __copy_to_user(buf
, from
, copy
);
197 if (skip
== iov
->iov_len
) {
201 i
->count
-= wanted
- bytes
;
202 i
->nr_segs
-= iov
- i
->iov
;
204 i
->iov_offset
= skip
;
205 return wanted
- bytes
;
208 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
211 size_t skip
, copy
, left
, wanted
;
212 const struct iovec
*iov
;
216 if (unlikely(bytes
> i
->count
))
219 if (unlikely(!bytes
))
224 skip
= i
->iov_offset
;
225 buf
= iov
->iov_base
+ skip
;
226 copy
= min(bytes
, iov
->iov_len
- skip
);
228 if (!fault_in_pages_readable(buf
, copy
)) {
229 kaddr
= kmap_atomic(page
);
232 /* first chunk, usually the only one */
233 left
= __copy_from_user_inatomic(to
, buf
, copy
);
239 while (unlikely(!left
&& bytes
)) {
242 copy
= min(bytes
, iov
->iov_len
);
243 left
= __copy_from_user_inatomic(to
, buf
, copy
);
249 if (likely(!bytes
)) {
250 kunmap_atomic(kaddr
);
255 kunmap_atomic(kaddr
);
256 copy
= min(bytes
, iov
->iov_len
- skip
);
258 /* Too bad - revert to non-atomic kmap */
261 left
= __copy_from_user(to
, buf
, copy
);
266 while (unlikely(!left
&& bytes
)) {
269 copy
= min(bytes
, iov
->iov_len
);
270 left
= __copy_from_user(to
, buf
, copy
);
278 if (skip
== iov
->iov_len
) {
282 i
->count
-= wanted
- bytes
;
283 i
->nr_segs
-= iov
- i
->iov
;
285 i
->iov_offset
= skip
;
286 return wanted
- bytes
;
290 * Fault in the first iovec of the given iov_iter, to a maximum length
291 * of bytes. Returns 0 on success, or non-zero if the memory could not be
292 * accessed (ie. because it is an invalid address).
294 * writev-intensive code may want this to prefault several iovecs -- that
295 * would be possible (callers must not rely on the fact that _only_ the
296 * first iovec will be faulted with the current implementation).
298 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
300 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
301 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
302 bytes
= min(bytes
, i
->iov
->iov_len
- i
->iov_offset
);
303 return fault_in_pages_readable(buf
, bytes
);
307 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
310 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
311 * bytes. For each iovec, fault in each page that constitutes the iovec.
313 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
314 * because it is an invalid address).
316 int iov_iter_fault_in_multipages_readable(struct iov_iter
*i
, size_t bytes
)
318 size_t skip
= i
->iov_offset
;
319 const struct iovec
*iov
;
323 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
324 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
325 err
= fault_in_multipages_readable(v
.iov_base
,
333 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable
);
335 void iov_iter_init(struct iov_iter
*i
, int direction
,
336 const struct iovec
*iov
, unsigned long nr_segs
,
339 /* It will get better. Eventually... */
340 if (segment_eq(get_fs(), KERNEL_DS
)) {
341 direction
|= ITER_KVEC
;
343 i
->kvec
= (struct kvec
*)iov
;
348 i
->nr_segs
= nr_segs
;
352 EXPORT_SYMBOL(iov_iter_init
);
354 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
356 char *from
= kmap_atomic(page
);
357 memcpy(to
, from
+ offset
, len
);
361 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
363 char *to
= kmap_atomic(page
);
364 memcpy(to
+ offset
, from
, len
);
368 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
370 char *addr
= kmap_atomic(page
);
371 memset(addr
+ offset
, 0, len
);
375 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
377 const char *from
= addr
;
378 iterate_and_advance(i
, bytes
, v
,
379 __copy_to_user(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
381 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
382 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
383 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
388 EXPORT_SYMBOL(copy_to_iter
);
390 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
393 iterate_and_advance(i
, bytes
, v
,
394 __copy_from_user((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
396 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
397 v
.bv_offset
, v
.bv_len
),
398 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
403 EXPORT_SYMBOL(copy_from_iter
);
405 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
408 iterate_and_advance(i
, bytes
, v
,
409 __copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
410 v
.iov_base
, v
.iov_len
),
411 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
412 v
.bv_offset
, v
.bv_len
),
413 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
418 EXPORT_SYMBOL(copy_from_iter_nocache
);
420 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
423 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
424 void *kaddr
= kmap_atomic(page
);
425 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
426 kunmap_atomic(kaddr
);
429 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
431 EXPORT_SYMBOL(copy_page_to_iter
);
433 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
436 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
437 void *kaddr
= kmap_atomic(page
);
438 size_t wanted
= copy_from_iter(kaddr
+ offset
, bytes
, i
);
439 kunmap_atomic(kaddr
);
442 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
444 EXPORT_SYMBOL(copy_page_from_iter
);
446 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
448 iterate_and_advance(i
, bytes
, v
,
449 __clear_user(v
.iov_base
, v
.iov_len
),
450 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
451 memset(v
.iov_base
, 0, v
.iov_len
)
456 EXPORT_SYMBOL(iov_iter_zero
);
458 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
459 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
461 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
462 iterate_all_kinds(i
, bytes
, v
,
463 __copy_from_user_inatomic((p
+= v
.iov_len
) - v
.iov_len
,
464 v
.iov_base
, v
.iov_len
),
465 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
466 v
.bv_offset
, v
.bv_len
),
467 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
469 kunmap_atomic(kaddr
);
472 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
474 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
476 iterate_and_advance(i
, size
, v
, 0, 0, 0)
478 EXPORT_SYMBOL(iov_iter_advance
);
481 * Return the count of just the current iov_iter segment.
483 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
487 else if (i
->type
& ITER_BVEC
)
488 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
490 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
492 EXPORT_SYMBOL(iov_iter_single_seg_count
);
494 void iov_iter_kvec(struct iov_iter
*i
, int direction
,
495 const struct kvec
*kvec
, unsigned long nr_segs
,
498 BUG_ON(!(direction
& ITER_KVEC
));
501 i
->nr_segs
= nr_segs
;
505 EXPORT_SYMBOL(iov_iter_kvec
);
507 void iov_iter_bvec(struct iov_iter
*i
, int direction
,
508 const struct bio_vec
*bvec
, unsigned long nr_segs
,
511 BUG_ON(!(direction
& ITER_BVEC
));
514 i
->nr_segs
= nr_segs
;
518 EXPORT_SYMBOL(iov_iter_bvec
);
520 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
522 unsigned long res
= 0;
523 size_t size
= i
->count
;
528 iterate_all_kinds(i
, size
, v
,
529 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
530 res
|= v
.bv_offset
| v
.bv_len
,
531 res
|= (unsigned long)v
.iov_base
| v
.iov_len
535 EXPORT_SYMBOL(iov_iter_alignment
);
537 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
539 unsigned long res
= 0;
540 size_t size
= i
->count
;
544 iterate_all_kinds(i
, size
, v
,
545 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
546 (size
!= v
.iov_len
? size
: 0), 0),
547 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
548 (size
!= v
.bv_len
? size
: 0)),
549 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
550 (size
!= v
.iov_len
? size
: 0))
554 EXPORT_SYMBOL(iov_iter_gap_alignment
);
556 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
557 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
560 if (maxsize
> i
->count
)
566 iterate_all_kinds(i
, maxsize
, v
, ({
567 unsigned long addr
= (unsigned long)v
.iov_base
;
568 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
572 if (len
> maxpages
* PAGE_SIZE
)
573 len
= maxpages
* PAGE_SIZE
;
574 addr
&= ~(PAGE_SIZE
- 1);
575 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
576 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
577 if (unlikely(res
< 0))
579 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
581 /* can't be more than PAGE_SIZE */
582 *start
= v
.bv_offset
;
583 get_page(*pages
= v
.bv_page
);
591 EXPORT_SYMBOL(iov_iter_get_pages
);
593 static struct page
**get_pages_array(size_t n
)
595 struct page
**p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
597 p
= vmalloc(n
* sizeof(struct page
*));
601 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
602 struct page
***pages
, size_t maxsize
,
607 if (maxsize
> i
->count
)
613 iterate_all_kinds(i
, maxsize
, v
, ({
614 unsigned long addr
= (unsigned long)v
.iov_base
;
615 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
619 addr
&= ~(PAGE_SIZE
- 1);
620 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
621 p
= get_pages_array(n
);
624 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
625 if (unlikely(res
< 0)) {
630 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
632 /* can't be more than PAGE_SIZE */
633 *start
= v
.bv_offset
;
634 *pages
= p
= get_pages_array(1);
637 get_page(*p
= v
.bv_page
);
645 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
647 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
654 iterate_and_advance(i
, bytes
, v
, ({
656 next
= csum_and_copy_from_user(v
.iov_base
,
657 (to
+= v
.iov_len
) - v
.iov_len
,
660 sum
= csum_block_add(sum
, next
, off
);
665 char *p
= kmap_atomic(v
.bv_page
);
666 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
667 (to
+= v
.bv_len
) - v
.bv_len
,
670 sum
= csum_block_add(sum
, next
, off
);
673 next
= csum_partial_copy_nocheck(v
.iov_base
,
674 (to
+= v
.iov_len
) - v
.iov_len
,
676 sum
= csum_block_add(sum
, next
, off
);
683 EXPORT_SYMBOL(csum_and_copy_from_iter
);
685 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, __wsum
*csum
,
688 const char *from
= addr
;
692 iterate_and_advance(i
, bytes
, v
, ({
694 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
698 sum
= csum_block_add(sum
, next
, off
);
703 char *p
= kmap_atomic(v
.bv_page
);
704 next
= csum_partial_copy_nocheck((from
+= v
.bv_len
) - v
.bv_len
,
708 sum
= csum_block_add(sum
, next
, off
);
711 next
= csum_partial_copy_nocheck((from
+= v
.iov_len
) - v
.iov_len
,
714 sum
= csum_block_add(sum
, next
, off
);
721 EXPORT_SYMBOL(csum_and_copy_to_iter
);
723 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
725 size_t size
= i
->count
;
731 iterate_all_kinds(i
, size
, v
, ({
732 unsigned long p
= (unsigned long)v
.iov_base
;
733 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
735 if (npages
>= maxpages
)
739 if (npages
>= maxpages
)
742 unsigned long p
= (unsigned long)v
.iov_base
;
743 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
745 if (npages
>= maxpages
)
751 EXPORT_SYMBOL(iov_iter_npages
);
753 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
756 if (new->type
& ITER_BVEC
)
757 return new->bvec
= kmemdup(new->bvec
,
758 new->nr_segs
* sizeof(struct bio_vec
),
761 /* iovec and kvec have identical layout */
762 return new->iov
= kmemdup(new->iov
,
763 new->nr_segs
* sizeof(struct iovec
),
766 EXPORT_SYMBOL(dup_iter
);
768 int import_iovec(int type
, const struct iovec __user
* uvector
,
769 unsigned nr_segs
, unsigned fast_segs
,
770 struct iovec
**iov
, struct iov_iter
*i
)
774 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
782 iov_iter_init(i
, type
, p
, nr_segs
, n
);
783 *iov
= p
== *iov
? NULL
: p
;
786 EXPORT_SYMBOL(import_iovec
);
789 #include <linux/compat.h>
791 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
792 unsigned nr_segs
, unsigned fast_segs
,
793 struct iovec
**iov
, struct iov_iter
*i
)
797 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
805 iov_iter_init(i
, type
, p
, nr_segs
, n
);
806 *iov
= p
== *iov
? NULL
: p
;
811 int import_single_range(int rw
, void __user
*buf
, size_t len
,
812 struct iovec
*iov
, struct iov_iter
*i
)
814 if (len
> MAX_RW_COUNT
)
816 if (unlikely(!access_ok(!rw
, buf
, len
)))
821 iov_iter_init(i
, rw
, iov
, 1, len
);
824 EXPORT_SYMBOL(import_single_range
);