Merge branch 'akpm' (patches from Andrew)
[deliverable/linux.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34 }
35
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57 }
58
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
63 __start.bi_idx = 0; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
65 if (!__v.bv_len) \
66 continue; \
67 (void)(STEP); \
68 } \
69 }
70
71 #define iterate_all_kinds(i, n, v, I, B, K) { \
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
74 struct bio_vec v; \
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
79 struct kvec v; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
81 } else { \
82 const struct iovec *iov; \
83 struct iovec v; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
85 } \
86 }
87
88 #define iterate_and_advance(i, n, v, I, B, K) { \
89 if (unlikely(i->count < n)) \
90 n = i->count; \
91 if (i->count) { \
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
94 const struct bio_vec *bvec = i->bvec; \
95 struct bio_vec v; \
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
103 struct kvec v; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
106 kvec++; \
107 skip = 0; \
108 } \
109 i->nr_segs -= kvec - i->kvec; \
110 i->kvec = kvec; \
111 } else { \
112 const struct iovec *iov; \
113 struct iovec v; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
116 iov++; \
117 skip = 0; \
118 } \
119 i->nr_segs -= iov - i->iov; \
120 i->iov = iov; \
121 } \
122 i->count -= n; \
123 i->iov_offset = skip; \
124 } \
125 }
126
127 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
128 struct iov_iter *i)
129 {
130 size_t skip, copy, left, wanted;
131 const struct iovec *iov;
132 char __user *buf;
133 void *kaddr, *from;
134
135 if (unlikely(bytes > i->count))
136 bytes = i->count;
137
138 if (unlikely(!bytes))
139 return 0;
140
141 wanted = bytes;
142 iov = i->iov;
143 skip = i->iov_offset;
144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip);
146
147 if (!fault_in_pages_writeable(buf, copy)) {
148 kaddr = kmap_atomic(page);
149 from = kaddr + offset;
150
151 /* first chunk, usually the only one */
152 left = __copy_to_user_inatomic(buf, from, copy);
153 copy -= left;
154 skip += copy;
155 from += copy;
156 bytes -= copy;
157
158 while (unlikely(!left && bytes)) {
159 iov++;
160 buf = iov->iov_base;
161 copy = min(bytes, iov->iov_len);
162 left = __copy_to_user_inatomic(buf, from, copy);
163 copy -= left;
164 skip = copy;
165 from += copy;
166 bytes -= copy;
167 }
168 if (likely(!bytes)) {
169 kunmap_atomic(kaddr);
170 goto done;
171 }
172 offset = from - kaddr;
173 buf += copy;
174 kunmap_atomic(kaddr);
175 copy = min(bytes, iov->iov_len - skip);
176 }
177 /* Too bad - revert to non-atomic kmap */
178 kaddr = kmap(page);
179 from = kaddr + offset;
180 left = __copy_to_user(buf, from, copy);
181 copy -= left;
182 skip += copy;
183 from += copy;
184 bytes -= copy;
185 while (unlikely(!left && bytes)) {
186 iov++;
187 buf = iov->iov_base;
188 copy = min(bytes, iov->iov_len);
189 left = __copy_to_user(buf, from, copy);
190 copy -= left;
191 skip = copy;
192 from += copy;
193 bytes -= copy;
194 }
195 kunmap(page);
196 done:
197 if (skip == iov->iov_len) {
198 iov++;
199 skip = 0;
200 }
201 i->count -= wanted - bytes;
202 i->nr_segs -= iov - i->iov;
203 i->iov = iov;
204 i->iov_offset = skip;
205 return wanted - bytes;
206 }
207
208 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
209 struct iov_iter *i)
210 {
211 size_t skip, copy, left, wanted;
212 const struct iovec *iov;
213 char __user *buf;
214 void *kaddr, *to;
215
216 if (unlikely(bytes > i->count))
217 bytes = i->count;
218
219 if (unlikely(!bytes))
220 return 0;
221
222 wanted = bytes;
223 iov = i->iov;
224 skip = i->iov_offset;
225 buf = iov->iov_base + skip;
226 copy = min(bytes, iov->iov_len - skip);
227
228 if (!fault_in_pages_readable(buf, copy)) {
229 kaddr = kmap_atomic(page);
230 to = kaddr + offset;
231
232 /* first chunk, usually the only one */
233 left = __copy_from_user_inatomic(to, buf, copy);
234 copy -= left;
235 skip += copy;
236 to += copy;
237 bytes -= copy;
238
239 while (unlikely(!left && bytes)) {
240 iov++;
241 buf = iov->iov_base;
242 copy = min(bytes, iov->iov_len);
243 left = __copy_from_user_inatomic(to, buf, copy);
244 copy -= left;
245 skip = copy;
246 to += copy;
247 bytes -= copy;
248 }
249 if (likely(!bytes)) {
250 kunmap_atomic(kaddr);
251 goto done;
252 }
253 offset = to - kaddr;
254 buf += copy;
255 kunmap_atomic(kaddr);
256 copy = min(bytes, iov->iov_len - skip);
257 }
258 /* Too bad - revert to non-atomic kmap */
259 kaddr = kmap(page);
260 to = kaddr + offset;
261 left = __copy_from_user(to, buf, copy);
262 copy -= left;
263 skip += copy;
264 to += copy;
265 bytes -= copy;
266 while (unlikely(!left && bytes)) {
267 iov++;
268 buf = iov->iov_base;
269 copy = min(bytes, iov->iov_len);
270 left = __copy_from_user(to, buf, copy);
271 copy -= left;
272 skip = copy;
273 to += copy;
274 bytes -= copy;
275 }
276 kunmap(page);
277 done:
278 if (skip == iov->iov_len) {
279 iov++;
280 skip = 0;
281 }
282 i->count -= wanted - bytes;
283 i->nr_segs -= iov - i->iov;
284 i->iov = iov;
285 i->iov_offset = skip;
286 return wanted - bytes;
287 }
288
289 /*
290 * Fault in the first iovec of the given iov_iter, to a maximum length
291 * of bytes. Returns 0 on success, or non-zero if the memory could not be
292 * accessed (ie. because it is an invalid address).
293 *
294 * writev-intensive code may want this to prefault several iovecs -- that
295 * would be possible (callers must not rely on the fact that _only_ the
296 * first iovec will be faulted with the current implementation).
297 */
298 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
299 {
300 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
301 char __user *buf = i->iov->iov_base + i->iov_offset;
302 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
303 return fault_in_pages_readable(buf, bytes);
304 }
305 return 0;
306 }
307 EXPORT_SYMBOL(iov_iter_fault_in_readable);
308
309 /*
310 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
311 * bytes. For each iovec, fault in each page that constitutes the iovec.
312 *
313 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
314 * because it is an invalid address).
315 */
316 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
317 {
318 size_t skip = i->iov_offset;
319 const struct iovec *iov;
320 int err;
321 struct iovec v;
322
323 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
324 iterate_iovec(i, bytes, v, iov, skip, ({
325 err = fault_in_multipages_readable(v.iov_base,
326 v.iov_len);
327 if (unlikely(err))
328 return err;
329 0;}))
330 }
331 return 0;
332 }
333 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
334
335 void iov_iter_init(struct iov_iter *i, int direction,
336 const struct iovec *iov, unsigned long nr_segs,
337 size_t count)
338 {
339 /* It will get better. Eventually... */
340 if (segment_eq(get_fs(), KERNEL_DS)) {
341 direction |= ITER_KVEC;
342 i->type = direction;
343 i->kvec = (struct kvec *)iov;
344 } else {
345 i->type = direction;
346 i->iov = iov;
347 }
348 i->nr_segs = nr_segs;
349 i->iov_offset = 0;
350 i->count = count;
351 }
352 EXPORT_SYMBOL(iov_iter_init);
353
354 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
355 {
356 char *from = kmap_atomic(page);
357 memcpy(to, from + offset, len);
358 kunmap_atomic(from);
359 }
360
361 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
362 {
363 char *to = kmap_atomic(page);
364 memcpy(to + offset, from, len);
365 kunmap_atomic(to);
366 }
367
368 static void memzero_page(struct page *page, size_t offset, size_t len)
369 {
370 char *addr = kmap_atomic(page);
371 memset(addr + offset, 0, len);
372 kunmap_atomic(addr);
373 }
374
375 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
376 {
377 const char *from = addr;
378 iterate_and_advance(i, bytes, v,
379 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
380 v.iov_len),
381 memcpy_to_page(v.bv_page, v.bv_offset,
382 (from += v.bv_len) - v.bv_len, v.bv_len),
383 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
384 )
385
386 return bytes;
387 }
388 EXPORT_SYMBOL(copy_to_iter);
389
390 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
391 {
392 char *to = addr;
393 iterate_and_advance(i, bytes, v,
394 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
395 v.iov_len),
396 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
397 v.bv_offset, v.bv_len),
398 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
399 )
400
401 return bytes;
402 }
403 EXPORT_SYMBOL(copy_from_iter);
404
405 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
406 {
407 char *to = addr;
408 iterate_and_advance(i, bytes, v,
409 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
410 v.iov_base, v.iov_len),
411 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
412 v.bv_offset, v.bv_len),
413 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
414 )
415
416 return bytes;
417 }
418 EXPORT_SYMBOL(copy_from_iter_nocache);
419
420 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
421 struct iov_iter *i)
422 {
423 if (i->type & (ITER_BVEC|ITER_KVEC)) {
424 void *kaddr = kmap_atomic(page);
425 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
426 kunmap_atomic(kaddr);
427 return wanted;
428 } else
429 return copy_page_to_iter_iovec(page, offset, bytes, i);
430 }
431 EXPORT_SYMBOL(copy_page_to_iter);
432
433 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
434 struct iov_iter *i)
435 {
436 if (i->type & (ITER_BVEC|ITER_KVEC)) {
437 void *kaddr = kmap_atomic(page);
438 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
439 kunmap_atomic(kaddr);
440 return wanted;
441 } else
442 return copy_page_from_iter_iovec(page, offset, bytes, i);
443 }
444 EXPORT_SYMBOL(copy_page_from_iter);
445
446 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
447 {
448 iterate_and_advance(i, bytes, v,
449 __clear_user(v.iov_base, v.iov_len),
450 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
451 memset(v.iov_base, 0, v.iov_len)
452 )
453
454 return bytes;
455 }
456 EXPORT_SYMBOL(iov_iter_zero);
457
458 size_t iov_iter_copy_from_user_atomic(struct page *page,
459 struct iov_iter *i, unsigned long offset, size_t bytes)
460 {
461 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
462 iterate_all_kinds(i, bytes, v,
463 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
464 v.iov_base, v.iov_len),
465 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
466 v.bv_offset, v.bv_len),
467 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
468 )
469 kunmap_atomic(kaddr);
470 return bytes;
471 }
472 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
473
474 void iov_iter_advance(struct iov_iter *i, size_t size)
475 {
476 iterate_and_advance(i, size, v, 0, 0, 0)
477 }
478 EXPORT_SYMBOL(iov_iter_advance);
479
480 /*
481 * Return the count of just the current iov_iter segment.
482 */
483 size_t iov_iter_single_seg_count(const struct iov_iter *i)
484 {
485 if (i->nr_segs == 1)
486 return i->count;
487 else if (i->type & ITER_BVEC)
488 return min(i->count, i->bvec->bv_len - i->iov_offset);
489 else
490 return min(i->count, i->iov->iov_len - i->iov_offset);
491 }
492 EXPORT_SYMBOL(iov_iter_single_seg_count);
493
494 void iov_iter_kvec(struct iov_iter *i, int direction,
495 const struct kvec *kvec, unsigned long nr_segs,
496 size_t count)
497 {
498 BUG_ON(!(direction & ITER_KVEC));
499 i->type = direction;
500 i->kvec = kvec;
501 i->nr_segs = nr_segs;
502 i->iov_offset = 0;
503 i->count = count;
504 }
505 EXPORT_SYMBOL(iov_iter_kvec);
506
507 void iov_iter_bvec(struct iov_iter *i, int direction,
508 const struct bio_vec *bvec, unsigned long nr_segs,
509 size_t count)
510 {
511 BUG_ON(!(direction & ITER_BVEC));
512 i->type = direction;
513 i->bvec = bvec;
514 i->nr_segs = nr_segs;
515 i->iov_offset = 0;
516 i->count = count;
517 }
518 EXPORT_SYMBOL(iov_iter_bvec);
519
520 unsigned long iov_iter_alignment(const struct iov_iter *i)
521 {
522 unsigned long res = 0;
523 size_t size = i->count;
524
525 if (!size)
526 return 0;
527
528 iterate_all_kinds(i, size, v,
529 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
530 res |= v.bv_offset | v.bv_len,
531 res |= (unsigned long)v.iov_base | v.iov_len
532 )
533 return res;
534 }
535 EXPORT_SYMBOL(iov_iter_alignment);
536
537 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
538 {
539 unsigned long res = 0;
540 size_t size = i->count;
541 if (!size)
542 return 0;
543
544 iterate_all_kinds(i, size, v,
545 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
546 (size != v.iov_len ? size : 0), 0),
547 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
548 (size != v.bv_len ? size : 0)),
549 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
550 (size != v.iov_len ? size : 0))
551 );
552 return res;
553 }
554 EXPORT_SYMBOL(iov_iter_gap_alignment);
555
556 ssize_t iov_iter_get_pages(struct iov_iter *i,
557 struct page **pages, size_t maxsize, unsigned maxpages,
558 size_t *start)
559 {
560 if (maxsize > i->count)
561 maxsize = i->count;
562
563 if (!maxsize)
564 return 0;
565
566 iterate_all_kinds(i, maxsize, v, ({
567 unsigned long addr = (unsigned long)v.iov_base;
568 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
569 int n;
570 int res;
571
572 if (len > maxpages * PAGE_SIZE)
573 len = maxpages * PAGE_SIZE;
574 addr &= ~(PAGE_SIZE - 1);
575 n = DIV_ROUND_UP(len, PAGE_SIZE);
576 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
577 if (unlikely(res < 0))
578 return res;
579 return (res == n ? len : res * PAGE_SIZE) - *start;
580 0;}),({
581 /* can't be more than PAGE_SIZE */
582 *start = v.bv_offset;
583 get_page(*pages = v.bv_page);
584 return v.bv_len;
585 }),({
586 return -EFAULT;
587 })
588 )
589 return 0;
590 }
591 EXPORT_SYMBOL(iov_iter_get_pages);
592
593 static struct page **get_pages_array(size_t n)
594 {
595 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
596 if (!p)
597 p = vmalloc(n * sizeof(struct page *));
598 return p;
599 }
600
601 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
602 struct page ***pages, size_t maxsize,
603 size_t *start)
604 {
605 struct page **p;
606
607 if (maxsize > i->count)
608 maxsize = i->count;
609
610 if (!maxsize)
611 return 0;
612
613 iterate_all_kinds(i, maxsize, v, ({
614 unsigned long addr = (unsigned long)v.iov_base;
615 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
616 int n;
617 int res;
618
619 addr &= ~(PAGE_SIZE - 1);
620 n = DIV_ROUND_UP(len, PAGE_SIZE);
621 p = get_pages_array(n);
622 if (!p)
623 return -ENOMEM;
624 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
625 if (unlikely(res < 0)) {
626 kvfree(p);
627 return res;
628 }
629 *pages = p;
630 return (res == n ? len : res * PAGE_SIZE) - *start;
631 0;}),({
632 /* can't be more than PAGE_SIZE */
633 *start = v.bv_offset;
634 *pages = p = get_pages_array(1);
635 if (!p)
636 return -ENOMEM;
637 get_page(*p = v.bv_page);
638 return v.bv_len;
639 }),({
640 return -EFAULT;
641 })
642 )
643 return 0;
644 }
645 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
646
647 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
648 struct iov_iter *i)
649 {
650 char *to = addr;
651 __wsum sum, next;
652 size_t off = 0;
653 sum = *csum;
654 iterate_and_advance(i, bytes, v, ({
655 int err = 0;
656 next = csum_and_copy_from_user(v.iov_base,
657 (to += v.iov_len) - v.iov_len,
658 v.iov_len, 0, &err);
659 if (!err) {
660 sum = csum_block_add(sum, next, off);
661 off += v.iov_len;
662 }
663 err ? v.iov_len : 0;
664 }), ({
665 char *p = kmap_atomic(v.bv_page);
666 next = csum_partial_copy_nocheck(p + v.bv_offset,
667 (to += v.bv_len) - v.bv_len,
668 v.bv_len, 0);
669 kunmap_atomic(p);
670 sum = csum_block_add(sum, next, off);
671 off += v.bv_len;
672 }),({
673 next = csum_partial_copy_nocheck(v.iov_base,
674 (to += v.iov_len) - v.iov_len,
675 v.iov_len, 0);
676 sum = csum_block_add(sum, next, off);
677 off += v.iov_len;
678 })
679 )
680 *csum = sum;
681 return bytes;
682 }
683 EXPORT_SYMBOL(csum_and_copy_from_iter);
684
685 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
686 struct iov_iter *i)
687 {
688 const char *from = addr;
689 __wsum sum, next;
690 size_t off = 0;
691 sum = *csum;
692 iterate_and_advance(i, bytes, v, ({
693 int err = 0;
694 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
695 v.iov_base,
696 v.iov_len, 0, &err);
697 if (!err) {
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
700 }
701 err ? v.iov_len : 0;
702 }), ({
703 char *p = kmap_atomic(v.bv_page);
704 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
705 p + v.bv_offset,
706 v.bv_len, 0);
707 kunmap_atomic(p);
708 sum = csum_block_add(sum, next, off);
709 off += v.bv_len;
710 }),({
711 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
712 v.iov_base,
713 v.iov_len, 0);
714 sum = csum_block_add(sum, next, off);
715 off += v.iov_len;
716 })
717 )
718 *csum = sum;
719 return bytes;
720 }
721 EXPORT_SYMBOL(csum_and_copy_to_iter);
722
723 int iov_iter_npages(const struct iov_iter *i, int maxpages)
724 {
725 size_t size = i->count;
726 int npages = 0;
727
728 if (!size)
729 return 0;
730
731 iterate_all_kinds(i, size, v, ({
732 unsigned long p = (unsigned long)v.iov_base;
733 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
734 - p / PAGE_SIZE;
735 if (npages >= maxpages)
736 return maxpages;
737 0;}),({
738 npages++;
739 if (npages >= maxpages)
740 return maxpages;
741 }),({
742 unsigned long p = (unsigned long)v.iov_base;
743 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
744 - p / PAGE_SIZE;
745 if (npages >= maxpages)
746 return maxpages;
747 })
748 )
749 return npages;
750 }
751 EXPORT_SYMBOL(iov_iter_npages);
752
753 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
754 {
755 *new = *old;
756 if (new->type & ITER_BVEC)
757 return new->bvec = kmemdup(new->bvec,
758 new->nr_segs * sizeof(struct bio_vec),
759 flags);
760 else
761 /* iovec and kvec have identical layout */
762 return new->iov = kmemdup(new->iov,
763 new->nr_segs * sizeof(struct iovec),
764 flags);
765 }
766 EXPORT_SYMBOL(dup_iter);
767
768 int import_iovec(int type, const struct iovec __user * uvector,
769 unsigned nr_segs, unsigned fast_segs,
770 struct iovec **iov, struct iov_iter *i)
771 {
772 ssize_t n;
773 struct iovec *p;
774 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
775 *iov, &p);
776 if (n < 0) {
777 if (p != *iov)
778 kfree(p);
779 *iov = NULL;
780 return n;
781 }
782 iov_iter_init(i, type, p, nr_segs, n);
783 *iov = p == *iov ? NULL : p;
784 return 0;
785 }
786 EXPORT_SYMBOL(import_iovec);
787
788 #ifdef CONFIG_COMPAT
789 #include <linux/compat.h>
790
791 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
792 unsigned nr_segs, unsigned fast_segs,
793 struct iovec **iov, struct iov_iter *i)
794 {
795 ssize_t n;
796 struct iovec *p;
797 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
798 *iov, &p);
799 if (n < 0) {
800 if (p != *iov)
801 kfree(p);
802 *iov = NULL;
803 return n;
804 }
805 iov_iter_init(i, type, p, nr_segs, n);
806 *iov = p == *iov ? NULL : p;
807 return 0;
808 }
809 #endif
810
811 int import_single_range(int rw, void __user *buf, size_t len,
812 struct iovec *iov, struct iov_iter *i)
813 {
814 if (len > MAX_RW_COUNT)
815 len = MAX_RW_COUNT;
816 if (unlikely(!access_ok(!rw, buf, len)))
817 return -EFAULT;
818
819 iov->iov_base = buf;
820 iov->iov_len = len;
821 iov_iter_init(i, rw, iov, 1, len);
822 return 0;
823 }
824 EXPORT_SYMBOL(import_single_range);
This page took 0.066696 seconds and 6 git commands to generate.