fix mismerge in ll_rw_blk.c
[deliverable/linux.git] / fs / bio.c
1 /*
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
28 #include <scsi/sg.h> /* for struct sg_iovec */
29
30 #define BIO_POOL_SIZE 256
31
32 static kmem_cache_t *bio_slab;
33
34 #define BIOVEC_NR_POOLS 6
35
36 /*
37 * a small number of entries is fine, not going to be performance critical.
38 * basically we just need to survive
39 */
40 #define BIO_SPLIT_ENTRIES 8
41 mempool_t *bio_split_pool;
42
43 struct biovec_slab {
44 int nr_vecs;
45 char *name;
46 kmem_cache_t *slab;
47 };
48
49 /*
50 * if you change this list, also change bvec_alloc or things will
51 * break badly! cannot be bigger than what you can fit into an
52 * unsigned short
53 */
54
55 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
56 static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
57 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
58 };
59 #undef BV
60
61 /*
62 * bio_set is used to allow other portions of the IO system to
63 * allocate their own private memory pools for bio and iovec structures.
64 * These memory pools in turn all allocate from the bio_slab
65 * and the bvec_slabs[].
66 */
67 struct bio_set {
68 mempool_t *bio_pool;
69 mempool_t *bvec_pools[BIOVEC_NR_POOLS];
70 };
71
72 /*
73 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
74 * IO code that does not need private memory pools.
75 */
76 static struct bio_set *fs_bio_set;
77
78 static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
79 {
80 struct bio_vec *bvl;
81 struct biovec_slab *bp;
82
83 /*
84 * see comment near bvec_array define!
85 */
86 switch (nr) {
87 case 1 : *idx = 0; break;
88 case 2 ... 4: *idx = 1; break;
89 case 5 ... 16: *idx = 2; break;
90 case 17 ... 64: *idx = 3; break;
91 case 65 ... 128: *idx = 4; break;
92 case 129 ... BIO_MAX_PAGES: *idx = 5; break;
93 default:
94 return NULL;
95 }
96 /*
97 * idx now points to the pool we want to allocate from
98 */
99
100 bp = bvec_slabs + *idx;
101 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
102 if (bvl)
103 memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
104
105 return bvl;
106 }
107
108 /*
109 * default destructor for a bio allocated with bio_alloc_bioset()
110 */
111 static void bio_destructor(struct bio *bio)
112 {
113 const int pool_idx = BIO_POOL_IDX(bio);
114 struct bio_set *bs = bio->bi_set;
115
116 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
117
118 mempool_free(bio->bi_io_vec, bs->bvec_pools[pool_idx]);
119 mempool_free(bio, bs->bio_pool);
120 }
121
122 inline void bio_init(struct bio *bio)
123 {
124 bio->bi_next = NULL;
125 bio->bi_flags = 1 << BIO_UPTODATE;
126 bio->bi_rw = 0;
127 bio->bi_vcnt = 0;
128 bio->bi_idx = 0;
129 bio->bi_phys_segments = 0;
130 bio->bi_hw_segments = 0;
131 bio->bi_hw_front_size = 0;
132 bio->bi_hw_back_size = 0;
133 bio->bi_size = 0;
134 bio->bi_max_vecs = 0;
135 bio->bi_end_io = NULL;
136 atomic_set(&bio->bi_cnt, 1);
137 bio->bi_private = NULL;
138 }
139
140 /**
141 * bio_alloc_bioset - allocate a bio for I/O
142 * @gfp_mask: the GFP_ mask given to the slab allocator
143 * @nr_iovecs: number of iovecs to pre-allocate
144 * @bs: the bio_set to allocate from
145 *
146 * Description:
147 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
148 * If %__GFP_WAIT is set then we will block on the internal pool waiting
149 * for a &struct bio to become free.
150 *
151 * allocate bio and iovecs from the memory pools specified by the
152 * bio_set structure.
153 **/
154 struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs)
155 {
156 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
157
158 if (likely(bio)) {
159 struct bio_vec *bvl = NULL;
160
161 bio_init(bio);
162 if (likely(nr_iovecs)) {
163 unsigned long idx;
164
165 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
166 if (unlikely(!bvl)) {
167 mempool_free(bio, bs->bio_pool);
168 bio = NULL;
169 goto out;
170 }
171 bio->bi_flags |= idx << BIO_POOL_OFFSET;
172 bio->bi_max_vecs = bvec_slabs[idx].nr_vecs;
173 }
174 bio->bi_io_vec = bvl;
175 bio->bi_destructor = bio_destructor;
176 bio->bi_set = bs;
177 }
178 out:
179 return bio;
180 }
181
182 struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
183 {
184 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
185 }
186
187 void zero_fill_bio(struct bio *bio)
188 {
189 unsigned long flags;
190 struct bio_vec *bv;
191 int i;
192
193 bio_for_each_segment(bv, bio, i) {
194 char *data = bvec_kmap_irq(bv, &flags);
195 memset(data, 0, bv->bv_len);
196 flush_dcache_page(bv->bv_page);
197 bvec_kunmap_irq(data, &flags);
198 }
199 }
200 EXPORT_SYMBOL(zero_fill_bio);
201
202 /**
203 * bio_put - release a reference to a bio
204 * @bio: bio to release reference to
205 *
206 * Description:
207 * Put a reference to a &struct bio, either one you have gotten with
208 * bio_alloc or bio_get. The last put of a bio will free it.
209 **/
210 void bio_put(struct bio *bio)
211 {
212 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
213
214 /*
215 * last put frees it
216 */
217 if (atomic_dec_and_test(&bio->bi_cnt)) {
218 bio->bi_next = NULL;
219 bio->bi_destructor(bio);
220 }
221 }
222
223 inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
224 {
225 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
226 blk_recount_segments(q, bio);
227
228 return bio->bi_phys_segments;
229 }
230
231 inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
232 {
233 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
234 blk_recount_segments(q, bio);
235
236 return bio->bi_hw_segments;
237 }
238
239 /**
240 * __bio_clone - clone a bio
241 * @bio: destination bio
242 * @bio_src: bio to clone
243 *
244 * Clone a &bio. Caller will own the returned bio, but not
245 * the actual data it points to. Reference count of returned
246 * bio will be one.
247 */
248 inline void __bio_clone(struct bio *bio, struct bio *bio_src)
249 {
250 request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
251
252 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
253 bio_src->bi_max_vecs * sizeof(struct bio_vec));
254
255 bio->bi_sector = bio_src->bi_sector;
256 bio->bi_bdev = bio_src->bi_bdev;
257 bio->bi_flags |= 1 << BIO_CLONED;
258 bio->bi_rw = bio_src->bi_rw;
259 bio->bi_vcnt = bio_src->bi_vcnt;
260 bio->bi_size = bio_src->bi_size;
261 bio->bi_idx = bio_src->bi_idx;
262 bio_phys_segments(q, bio);
263 bio_hw_segments(q, bio);
264 }
265
266 /**
267 * bio_clone - clone a bio
268 * @bio: bio to clone
269 * @gfp_mask: allocation priority
270 *
271 * Like __bio_clone, only also allocates the returned bio
272 */
273 struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
274 {
275 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
276
277 if (b)
278 __bio_clone(b, bio);
279
280 return b;
281 }
282
283 /**
284 * bio_get_nr_vecs - return approx number of vecs
285 * @bdev: I/O target
286 *
287 * Return the approximate number of pages we can send to this target.
288 * There's no guarantee that you will be able to fit this number of pages
289 * into a bio, it does not account for dynamic restrictions that vary
290 * on offset.
291 */
292 int bio_get_nr_vecs(struct block_device *bdev)
293 {
294 request_queue_t *q = bdev_get_queue(bdev);
295 int nr_pages;
296
297 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
298 if (nr_pages > q->max_phys_segments)
299 nr_pages = q->max_phys_segments;
300 if (nr_pages > q->max_hw_segments)
301 nr_pages = q->max_hw_segments;
302
303 return nr_pages;
304 }
305
306 static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
307 *page, unsigned int len, unsigned int offset)
308 {
309 int retried_segments = 0;
310 struct bio_vec *bvec;
311
312 /*
313 * cloned bio must not modify vec list
314 */
315 if (unlikely(bio_flagged(bio, BIO_CLONED)))
316 return 0;
317
318 if (bio->bi_vcnt >= bio->bi_max_vecs)
319 return 0;
320
321 if (((bio->bi_size + len) >> 9) > q->max_sectors)
322 return 0;
323
324 /*
325 * we might lose a segment or two here, but rather that than
326 * make this too complex.
327 */
328
329 while (bio->bi_phys_segments >= q->max_phys_segments
330 || bio->bi_hw_segments >= q->max_hw_segments
331 || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
332
333 if (retried_segments)
334 return 0;
335
336 retried_segments = 1;
337 blk_recount_segments(q, bio);
338 }
339
340 /*
341 * setup the new entry, we might clear it again later if we
342 * cannot add the page
343 */
344 bvec = &bio->bi_io_vec[bio->bi_vcnt];
345 bvec->bv_page = page;
346 bvec->bv_len = len;
347 bvec->bv_offset = offset;
348
349 /*
350 * if queue has other restrictions (eg varying max sector size
351 * depending on offset), it can specify a merge_bvec_fn in the
352 * queue to get further control
353 */
354 if (q->merge_bvec_fn) {
355 /*
356 * merge_bvec_fn() returns number of bytes it can accept
357 * at this offset
358 */
359 if (q->merge_bvec_fn(q, bio, bvec) < len) {
360 bvec->bv_page = NULL;
361 bvec->bv_len = 0;
362 bvec->bv_offset = 0;
363 return 0;
364 }
365 }
366
367 /* If we may be able to merge these biovecs, force a recount */
368 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
369 BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
370 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
371
372 bio->bi_vcnt++;
373 bio->bi_phys_segments++;
374 bio->bi_hw_segments++;
375 bio->bi_size += len;
376 return len;
377 }
378
379 /**
380 * bio_add_page - attempt to add page to bio
381 * @bio: destination bio
382 * @page: page to add
383 * @len: vec entry length
384 * @offset: vec entry offset
385 *
386 * Attempt to add a page to the bio_vec maplist. This can fail for a
387 * number of reasons, such as the bio being full or target block
388 * device limitations. The target block device must allow bio's
389 * smaller than PAGE_SIZE, so it is always possible to add a single
390 * page to an empty bio.
391 */
392 int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
393 unsigned int offset)
394 {
395 return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
396 len, offset);
397 }
398
399 struct bio_map_data {
400 struct bio_vec *iovecs;
401 void __user *userptr;
402 };
403
404 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
405 {
406 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
407 bio->bi_private = bmd;
408 }
409
410 static void bio_free_map_data(struct bio_map_data *bmd)
411 {
412 kfree(bmd->iovecs);
413 kfree(bmd);
414 }
415
416 static struct bio_map_data *bio_alloc_map_data(int nr_segs)
417 {
418 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
419
420 if (!bmd)
421 return NULL;
422
423 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
424 if (bmd->iovecs)
425 return bmd;
426
427 kfree(bmd);
428 return NULL;
429 }
430
431 /**
432 * bio_uncopy_user - finish previously mapped bio
433 * @bio: bio being terminated
434 *
435 * Free pages allocated from bio_copy_user() and write back data
436 * to user space in case of a read.
437 */
438 int bio_uncopy_user(struct bio *bio)
439 {
440 struct bio_map_data *bmd = bio->bi_private;
441 const int read = bio_data_dir(bio) == READ;
442 struct bio_vec *bvec;
443 int i, ret = 0;
444
445 __bio_for_each_segment(bvec, bio, i, 0) {
446 char *addr = page_address(bvec->bv_page);
447 unsigned int len = bmd->iovecs[i].bv_len;
448
449 if (read && !ret && copy_to_user(bmd->userptr, addr, len))
450 ret = -EFAULT;
451
452 __free_page(bvec->bv_page);
453 bmd->userptr += len;
454 }
455 bio_free_map_data(bmd);
456 bio_put(bio);
457 return ret;
458 }
459
460 /**
461 * bio_copy_user - copy user data to bio
462 * @q: destination block queue
463 * @uaddr: start of user address
464 * @len: length in bytes
465 * @write_to_vm: bool indicating writing to pages or not
466 *
467 * Prepares and returns a bio for indirect user io, bouncing data
468 * to/from kernel pages as necessary. Must be paired with
469 * call bio_uncopy_user() on io completion.
470 */
471 struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
472 unsigned int len, int write_to_vm)
473 {
474 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
475 unsigned long start = uaddr >> PAGE_SHIFT;
476 struct bio_map_data *bmd;
477 struct bio_vec *bvec;
478 struct page *page;
479 struct bio *bio;
480 int i, ret;
481
482 bmd = bio_alloc_map_data(end - start);
483 if (!bmd)
484 return ERR_PTR(-ENOMEM);
485
486 bmd->userptr = (void __user *) uaddr;
487
488 ret = -ENOMEM;
489 bio = bio_alloc(GFP_KERNEL, end - start);
490 if (!bio)
491 goto out_bmd;
492
493 bio->bi_rw |= (!write_to_vm << BIO_RW);
494
495 ret = 0;
496 while (len) {
497 unsigned int bytes = PAGE_SIZE;
498
499 if (bytes > len)
500 bytes = len;
501
502 page = alloc_page(q->bounce_gfp | GFP_KERNEL);
503 if (!page) {
504 ret = -ENOMEM;
505 break;
506 }
507
508 if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
509 ret = -EINVAL;
510 break;
511 }
512
513 len -= bytes;
514 }
515
516 if (ret)
517 goto cleanup;
518
519 /*
520 * success
521 */
522 if (!write_to_vm) {
523 char __user *p = (char __user *) uaddr;
524
525 /*
526 * for a write, copy in data to kernel pages
527 */
528 ret = -EFAULT;
529 bio_for_each_segment(bvec, bio, i) {
530 char *addr = page_address(bvec->bv_page);
531
532 if (copy_from_user(addr, p, bvec->bv_len))
533 goto cleanup;
534 p += bvec->bv_len;
535 }
536 }
537
538 bio_set_map_data(bmd, bio);
539 return bio;
540 cleanup:
541 bio_for_each_segment(bvec, bio, i)
542 __free_page(bvec->bv_page);
543
544 bio_put(bio);
545 out_bmd:
546 bio_free_map_data(bmd);
547 return ERR_PTR(ret);
548 }
549
550 static struct bio *__bio_map_user_iov(request_queue_t *q,
551 struct block_device *bdev,
552 struct sg_iovec *iov, int iov_count,
553 int write_to_vm)
554 {
555 int i, j;
556 int nr_pages = 0;
557 struct page **pages;
558 struct bio *bio;
559 int cur_page = 0;
560 int ret, offset;
561
562 for (i = 0; i < iov_count; i++) {
563 unsigned long uaddr = (unsigned long)iov[i].iov_base;
564 unsigned long len = iov[i].iov_len;
565 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
566 unsigned long start = uaddr >> PAGE_SHIFT;
567
568 nr_pages += end - start;
569 /*
570 * transfer and buffer must be aligned to at least hardsector
571 * size for now, in the future we can relax this restriction
572 */
573 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
574 return ERR_PTR(-EINVAL);
575 }
576
577 if (!nr_pages)
578 return ERR_PTR(-EINVAL);
579
580 bio = bio_alloc(GFP_KERNEL, nr_pages);
581 if (!bio)
582 return ERR_PTR(-ENOMEM);
583
584 ret = -ENOMEM;
585 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
586 if (!pages)
587 goto out;
588
589 memset(pages, 0, nr_pages * sizeof(struct page *));
590
591 for (i = 0; i < iov_count; i++) {
592 unsigned long uaddr = (unsigned long)iov[i].iov_base;
593 unsigned long len = iov[i].iov_len;
594 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
595 unsigned long start = uaddr >> PAGE_SHIFT;
596 const int local_nr_pages = end - start;
597 const int page_limit = cur_page + local_nr_pages;
598
599 down_read(&current->mm->mmap_sem);
600 ret = get_user_pages(current, current->mm, uaddr,
601 local_nr_pages,
602 write_to_vm, 0, &pages[cur_page], NULL);
603 up_read(&current->mm->mmap_sem);
604
605 if (ret < local_nr_pages)
606 goto out_unmap;
607
608
609 offset = uaddr & ~PAGE_MASK;
610 for (j = cur_page; j < page_limit; j++) {
611 unsigned int bytes = PAGE_SIZE - offset;
612
613 if (len <= 0)
614 break;
615
616 if (bytes > len)
617 bytes = len;
618
619 /*
620 * sorry...
621 */
622 if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
623 break;
624
625 len -= bytes;
626 offset = 0;
627 }
628
629 cur_page = j;
630 /*
631 * release the pages we didn't map into the bio, if any
632 */
633 while (j < page_limit)
634 page_cache_release(pages[j++]);
635 }
636
637 kfree(pages);
638
639 /*
640 * set data direction, and check if mapped pages need bouncing
641 */
642 if (!write_to_vm)
643 bio->bi_rw |= (1 << BIO_RW);
644
645 bio->bi_bdev = bdev;
646 bio->bi_flags |= (1 << BIO_USER_MAPPED);
647 return bio;
648
649 out_unmap:
650 for (i = 0; i < nr_pages; i++) {
651 if(!pages[i])
652 break;
653 page_cache_release(pages[i]);
654 }
655 out:
656 kfree(pages);
657 bio_put(bio);
658 return ERR_PTR(ret);
659 }
660
661 /**
662 * bio_map_user - map user address into bio
663 * @q: the request_queue_t for the bio
664 * @bdev: destination block device
665 * @uaddr: start of user address
666 * @len: length in bytes
667 * @write_to_vm: bool indicating writing to pages or not
668 *
669 * Map the user space address into a bio suitable for io to a block
670 * device. Returns an error pointer in case of error.
671 */
672 struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
673 unsigned long uaddr, unsigned int len, int write_to_vm)
674 {
675 struct sg_iovec iov;
676
677 iov.iov_base = (__user void *)uaddr;
678 iov.iov_len = len;
679
680 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
681 }
682
683 /**
684 * bio_map_user_iov - map user sg_iovec table into bio
685 * @q: the request_queue_t for the bio
686 * @bdev: destination block device
687 * @iov: the iovec.
688 * @iov_count: number of elements in the iovec
689 * @write_to_vm: bool indicating writing to pages or not
690 *
691 * Map the user space address into a bio suitable for io to a block
692 * device. Returns an error pointer in case of error.
693 */
694 struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
695 struct sg_iovec *iov, int iov_count,
696 int write_to_vm)
697 {
698 struct bio *bio;
699 int len = 0, i;
700
701 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
702
703 if (IS_ERR(bio))
704 return bio;
705
706 /*
707 * subtle -- if __bio_map_user() ended up bouncing a bio,
708 * it would normally disappear when its bi_end_io is run.
709 * however, we need it for the unmap, so grab an extra
710 * reference to it
711 */
712 bio_get(bio);
713
714 for (i = 0; i < iov_count; i++)
715 len += iov[i].iov_len;
716
717 if (bio->bi_size == len)
718 return bio;
719
720 /*
721 * don't support partial mappings
722 */
723 bio_endio(bio, bio->bi_size, 0);
724 bio_unmap_user(bio);
725 return ERR_PTR(-EINVAL);
726 }
727
728 static void __bio_unmap_user(struct bio *bio)
729 {
730 struct bio_vec *bvec;
731 int i;
732
733 /*
734 * make sure we dirty pages we wrote to
735 */
736 __bio_for_each_segment(bvec, bio, i, 0) {
737 if (bio_data_dir(bio) == READ)
738 set_page_dirty_lock(bvec->bv_page);
739
740 page_cache_release(bvec->bv_page);
741 }
742
743 bio_put(bio);
744 }
745
746 /**
747 * bio_unmap_user - unmap a bio
748 * @bio: the bio being unmapped
749 *
750 * Unmap a bio previously mapped by bio_map_user(). Must be called with
751 * a process context.
752 *
753 * bio_unmap_user() may sleep.
754 */
755 void bio_unmap_user(struct bio *bio)
756 {
757 __bio_unmap_user(bio);
758 bio_put(bio);
759 }
760
761 static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
762 {
763 if (bio->bi_size)
764 return 1;
765
766 bio_put(bio);
767 return 0;
768 }
769
770
771 static struct bio *__bio_map_kern(request_queue_t *q, void *data,
772 unsigned int len, unsigned int gfp_mask)
773 {
774 unsigned long kaddr = (unsigned long)data;
775 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
776 unsigned long start = kaddr >> PAGE_SHIFT;
777 const int nr_pages = end - start;
778 int offset, i;
779 struct bio *bio;
780
781 bio = bio_alloc(gfp_mask, nr_pages);
782 if (!bio)
783 return ERR_PTR(-ENOMEM);
784
785 offset = offset_in_page(kaddr);
786 for (i = 0; i < nr_pages; i++) {
787 unsigned int bytes = PAGE_SIZE - offset;
788
789 if (len <= 0)
790 break;
791
792 if (bytes > len)
793 bytes = len;
794
795 if (__bio_add_page(q, bio, virt_to_page(data), bytes,
796 offset) < bytes)
797 break;
798
799 data += bytes;
800 len -= bytes;
801 offset = 0;
802 }
803
804 bio->bi_end_io = bio_map_kern_endio;
805 return bio;
806 }
807
808 /**
809 * bio_map_kern - map kernel address into bio
810 * @q: the request_queue_t for the bio
811 * @data: pointer to buffer to map
812 * @len: length in bytes
813 * @gfp_mask: allocation flags for bio allocation
814 *
815 * Map the kernel address into a bio suitable for io to a block
816 * device. Returns an error pointer in case of error.
817 */
818 struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
819 unsigned int gfp_mask)
820 {
821 struct bio *bio;
822
823 bio = __bio_map_kern(q, data, len, gfp_mask);
824 if (IS_ERR(bio))
825 return bio;
826
827 if (bio->bi_size == len)
828 return bio;
829
830 /*
831 * Don't support partial mappings.
832 */
833 bio_put(bio);
834 return ERR_PTR(-EINVAL);
835 }
836
837 /*
838 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
839 * for performing direct-IO in BIOs.
840 *
841 * The problem is that we cannot run set_page_dirty() from interrupt context
842 * because the required locks are not interrupt-safe. So what we can do is to
843 * mark the pages dirty _before_ performing IO. And in interrupt context,
844 * check that the pages are still dirty. If so, fine. If not, redirty them
845 * in process context.
846 *
847 * We special-case compound pages here: normally this means reads into hugetlb
848 * pages. The logic in here doesn't really work right for compound pages
849 * because the VM does not uniformly chase down the head page in all cases.
850 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
851 * handle them at all. So we skip compound pages here at an early stage.
852 *
853 * Note that this code is very hard to test under normal circumstances because
854 * direct-io pins the pages with get_user_pages(). This makes
855 * is_page_cache_freeable return false, and the VM will not clean the pages.
856 * But other code (eg, pdflush) could clean the pages if they are mapped
857 * pagecache.
858 *
859 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
860 * deferred bio dirtying paths.
861 */
862
863 /*
864 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
865 */
866 void bio_set_pages_dirty(struct bio *bio)
867 {
868 struct bio_vec *bvec = bio->bi_io_vec;
869 int i;
870
871 for (i = 0; i < bio->bi_vcnt; i++) {
872 struct page *page = bvec[i].bv_page;
873
874 if (page && !PageCompound(page))
875 set_page_dirty_lock(page);
876 }
877 }
878
879 static void bio_release_pages(struct bio *bio)
880 {
881 struct bio_vec *bvec = bio->bi_io_vec;
882 int i;
883
884 for (i = 0; i < bio->bi_vcnt; i++) {
885 struct page *page = bvec[i].bv_page;
886
887 if (page)
888 put_page(page);
889 }
890 }
891
892 /*
893 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
894 * If they are, then fine. If, however, some pages are clean then they must
895 * have been written out during the direct-IO read. So we take another ref on
896 * the BIO and the offending pages and re-dirty the pages in process context.
897 *
898 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
899 * here on. It will run one page_cache_release() against each page and will
900 * run one bio_put() against the BIO.
901 */
902
903 static void bio_dirty_fn(void *data);
904
905 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
906 static DEFINE_SPINLOCK(bio_dirty_lock);
907 static struct bio *bio_dirty_list;
908
909 /*
910 * This runs in process context
911 */
912 static void bio_dirty_fn(void *data)
913 {
914 unsigned long flags;
915 struct bio *bio;
916
917 spin_lock_irqsave(&bio_dirty_lock, flags);
918 bio = bio_dirty_list;
919 bio_dirty_list = NULL;
920 spin_unlock_irqrestore(&bio_dirty_lock, flags);
921
922 while (bio) {
923 struct bio *next = bio->bi_private;
924
925 bio_set_pages_dirty(bio);
926 bio_release_pages(bio);
927 bio_put(bio);
928 bio = next;
929 }
930 }
931
932 void bio_check_pages_dirty(struct bio *bio)
933 {
934 struct bio_vec *bvec = bio->bi_io_vec;
935 int nr_clean_pages = 0;
936 int i;
937
938 for (i = 0; i < bio->bi_vcnt; i++) {
939 struct page *page = bvec[i].bv_page;
940
941 if (PageDirty(page) || PageCompound(page)) {
942 page_cache_release(page);
943 bvec[i].bv_page = NULL;
944 } else {
945 nr_clean_pages++;
946 }
947 }
948
949 if (nr_clean_pages) {
950 unsigned long flags;
951
952 spin_lock_irqsave(&bio_dirty_lock, flags);
953 bio->bi_private = bio_dirty_list;
954 bio_dirty_list = bio;
955 spin_unlock_irqrestore(&bio_dirty_lock, flags);
956 schedule_work(&bio_dirty_work);
957 } else {
958 bio_put(bio);
959 }
960 }
961
962 /**
963 * bio_endio - end I/O on a bio
964 * @bio: bio
965 * @bytes_done: number of bytes completed
966 * @error: error, if any
967 *
968 * Description:
969 * bio_endio() will end I/O on @bytes_done number of bytes. This may be
970 * just a partial part of the bio, or it may be the whole bio. bio_endio()
971 * is the preferred way to end I/O on a bio, it takes care of decrementing
972 * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
973 * and one of the established -Exxxx (-EIO, for instance) error values in
974 * case something went wrong. Noone should call bi_end_io() directly on
975 * a bio unless they own it and thus know that it has an end_io function.
976 **/
977 void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
978 {
979 if (error)
980 clear_bit(BIO_UPTODATE, &bio->bi_flags);
981
982 if (unlikely(bytes_done > bio->bi_size)) {
983 printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
984 bytes_done, bio->bi_size);
985 bytes_done = bio->bi_size;
986 }
987
988 bio->bi_size -= bytes_done;
989 bio->bi_sector += (bytes_done >> 9);
990
991 if (bio->bi_end_io)
992 bio->bi_end_io(bio, bytes_done, error);
993 }
994
995 void bio_pair_release(struct bio_pair *bp)
996 {
997 if (atomic_dec_and_test(&bp->cnt)) {
998 struct bio *master = bp->bio1.bi_private;
999
1000 bio_endio(master, master->bi_size, bp->error);
1001 mempool_free(bp, bp->bio2.bi_private);
1002 }
1003 }
1004
1005 static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
1006 {
1007 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1008
1009 if (err)
1010 bp->error = err;
1011
1012 if (bi->bi_size)
1013 return 1;
1014
1015 bio_pair_release(bp);
1016 return 0;
1017 }
1018
1019 static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
1020 {
1021 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1022
1023 if (err)
1024 bp->error = err;
1025
1026 if (bi->bi_size)
1027 return 1;
1028
1029 bio_pair_release(bp);
1030 return 0;
1031 }
1032
1033 /*
1034 * split a bio - only worry about a bio with a single page
1035 * in it's iovec
1036 */
1037 struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1038 {
1039 struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
1040
1041 if (!bp)
1042 return bp;
1043
1044 BUG_ON(bi->bi_vcnt != 1);
1045 BUG_ON(bi->bi_idx != 0);
1046 atomic_set(&bp->cnt, 3);
1047 bp->error = 0;
1048 bp->bio1 = *bi;
1049 bp->bio2 = *bi;
1050 bp->bio2.bi_sector += first_sectors;
1051 bp->bio2.bi_size -= first_sectors << 9;
1052 bp->bio1.bi_size = first_sectors << 9;
1053
1054 bp->bv1 = bi->bi_io_vec[0];
1055 bp->bv2 = bi->bi_io_vec[0];
1056 bp->bv2.bv_offset += first_sectors << 9;
1057 bp->bv2.bv_len -= first_sectors << 9;
1058 bp->bv1.bv_len = first_sectors << 9;
1059
1060 bp->bio1.bi_io_vec = &bp->bv1;
1061 bp->bio2.bi_io_vec = &bp->bv2;
1062
1063 bp->bio1.bi_end_io = bio_pair_end_1;
1064 bp->bio2.bi_end_io = bio_pair_end_2;
1065
1066 bp->bio1.bi_private = bi;
1067 bp->bio2.bi_private = pool;
1068
1069 return bp;
1070 }
1071
1072 static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
1073 {
1074 return kmalloc(sizeof(struct bio_pair), gfp_flags);
1075 }
1076
1077 static void bio_pair_free(void *bp, void *data)
1078 {
1079 kfree(bp);
1080 }
1081
1082
1083 /*
1084 * create memory pools for biovec's in a bio_set.
1085 * use the global biovec slabs created for general use.
1086 */
1087 static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
1088 {
1089 int i;
1090
1091 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1092 struct biovec_slab *bp = bvec_slabs + i;
1093 mempool_t **bvp = bs->bvec_pools + i;
1094
1095 if (i >= scale)
1096 pool_entries >>= 1;
1097
1098 *bvp = mempool_create(pool_entries, mempool_alloc_slab,
1099 mempool_free_slab, bp->slab);
1100 if (!*bvp)
1101 return -ENOMEM;
1102 }
1103 return 0;
1104 }
1105
1106 static void biovec_free_pools(struct bio_set *bs)
1107 {
1108 int i;
1109
1110 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1111 mempool_t *bvp = bs->bvec_pools[i];
1112
1113 if (bvp)
1114 mempool_destroy(bvp);
1115 }
1116
1117 }
1118
1119 void bioset_free(struct bio_set *bs)
1120 {
1121 if (bs->bio_pool)
1122 mempool_destroy(bs->bio_pool);
1123
1124 biovec_free_pools(bs);
1125
1126 kfree(bs);
1127 }
1128
1129 struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
1130 {
1131 struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL);
1132
1133 if (!bs)
1134 return NULL;
1135
1136 memset(bs, 0, sizeof(*bs));
1137 bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
1138 mempool_free_slab, bio_slab);
1139
1140 if (!bs->bio_pool)
1141 goto bad;
1142
1143 if (!biovec_create_pools(bs, bvec_pool_size, scale))
1144 return bs;
1145
1146 bad:
1147 bioset_free(bs);
1148 return NULL;
1149 }
1150
1151 static void __init biovec_init_slabs(void)
1152 {
1153 int i;
1154
1155 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1156 int size;
1157 struct biovec_slab *bvs = bvec_slabs + i;
1158
1159 size = bvs->nr_vecs * sizeof(struct bio_vec);
1160 bvs->slab = kmem_cache_create(bvs->name, size, 0,
1161 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1162 }
1163 }
1164
1165 static int __init init_bio(void)
1166 {
1167 int megabytes, bvec_pool_entries;
1168 int scale = BIOVEC_NR_POOLS;
1169
1170 bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
1171 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1172
1173 biovec_init_slabs();
1174
1175 megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
1176
1177 /*
1178 * find out where to start scaling
1179 */
1180 if (megabytes <= 16)
1181 scale = 0;
1182 else if (megabytes <= 32)
1183 scale = 1;
1184 else if (megabytes <= 64)
1185 scale = 2;
1186 else if (megabytes <= 96)
1187 scale = 3;
1188 else if (megabytes <= 128)
1189 scale = 4;
1190
1191 /*
1192 * scale number of entries
1193 */
1194 bvec_pool_entries = megabytes * 2;
1195 if (bvec_pool_entries > 256)
1196 bvec_pool_entries = 256;
1197
1198 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
1199 if (!fs_bio_set)
1200 panic("bio: can't allocate bios\n");
1201
1202 bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
1203 bio_pair_alloc, bio_pair_free, NULL);
1204 if (!bio_split_pool)
1205 panic("bio: can't create split pool\n");
1206
1207 return 0;
1208 }
1209
1210 subsys_initcall(init_bio);
1211
1212 EXPORT_SYMBOL(bio_alloc);
1213 EXPORT_SYMBOL(bio_put);
1214 EXPORT_SYMBOL(bio_endio);
1215 EXPORT_SYMBOL(bio_init);
1216 EXPORT_SYMBOL(__bio_clone);
1217 EXPORT_SYMBOL(bio_clone);
1218 EXPORT_SYMBOL(bio_phys_segments);
1219 EXPORT_SYMBOL(bio_hw_segments);
1220 EXPORT_SYMBOL(bio_add_page);
1221 EXPORT_SYMBOL(bio_get_nr_vecs);
1222 EXPORT_SYMBOL(bio_map_user);
1223 EXPORT_SYMBOL(bio_unmap_user);
1224 EXPORT_SYMBOL(bio_map_kern);
1225 EXPORT_SYMBOL(bio_pair_release);
1226 EXPORT_SYMBOL(bio_split);
1227 EXPORT_SYMBOL(bio_split_pool);
1228 EXPORT_SYMBOL(bio_copy_user);
1229 EXPORT_SYMBOL(bio_uncopy_user);
1230 EXPORT_SYMBOL(bioset_create);
1231 EXPORT_SYMBOL(bioset_free);
1232 EXPORT_SYMBOL(bio_alloc_bioset);
This page took 0.054236 seconds and 6 git commands to generate.