2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
28 #include <scsi/sg.h> /* for struct sg_iovec */
30 #define BIO_POOL_SIZE 256
32 static kmem_cache_t
*bio_slab
;
34 #define BIOVEC_NR_POOLS 6
37 * a small number of entries is fine, not going to be performance critical.
38 * basically we just need to survive
40 #define BIO_SPLIT_ENTRIES 8
41 mempool_t
*bio_split_pool
;
50 * if you change this list, also change bvec_alloc or things will
51 * break badly! cannot be bigger than what you can fit into an
55 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
56 static struct biovec_slab bvec_slabs
[BIOVEC_NR_POOLS
] = {
57 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
62 * bio_set is used to allow other portions of the IO system to
63 * allocate their own private memory pools for bio and iovec structures.
64 * These memory pools in turn all allocate from the bio_slab
65 * and the bvec_slabs[].
69 mempool_t
*bvec_pools
[BIOVEC_NR_POOLS
];
73 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
74 * IO code that does not need private memory pools.
76 static struct bio_set
*fs_bio_set
;
78 static inline struct bio_vec
*bvec_alloc_bs(unsigned int __nocast gfp_mask
, int nr
, unsigned long *idx
, struct bio_set
*bs
)
81 struct biovec_slab
*bp
;
84 * see comment near bvec_array define!
87 case 1 : *idx
= 0; break;
88 case 2 ... 4: *idx
= 1; break;
89 case 5 ... 16: *idx
= 2; break;
90 case 17 ... 64: *idx
= 3; break;
91 case 65 ... 128: *idx
= 4; break;
92 case 129 ... BIO_MAX_PAGES
: *idx
= 5; break;
97 * idx now points to the pool we want to allocate from
100 bp
= bvec_slabs
+ *idx
;
101 bvl
= mempool_alloc(bs
->bvec_pools
[*idx
], gfp_mask
);
103 memset(bvl
, 0, bp
->nr_vecs
* sizeof(struct bio_vec
));
109 * default destructor for a bio allocated with bio_alloc_bioset()
111 static void bio_destructor(struct bio
*bio
)
113 const int pool_idx
= BIO_POOL_IDX(bio
);
114 struct bio_set
*bs
= bio
->bi_set
;
116 BIO_BUG_ON(pool_idx
>= BIOVEC_NR_POOLS
);
118 mempool_free(bio
->bi_io_vec
, bs
->bvec_pools
[pool_idx
]);
119 mempool_free(bio
, bs
->bio_pool
);
122 inline void bio_init(struct bio
*bio
)
125 bio
->bi_flags
= 1 << BIO_UPTODATE
;
129 bio
->bi_phys_segments
= 0;
130 bio
->bi_hw_segments
= 0;
131 bio
->bi_hw_front_size
= 0;
132 bio
->bi_hw_back_size
= 0;
134 bio
->bi_max_vecs
= 0;
135 bio
->bi_end_io
= NULL
;
136 atomic_set(&bio
->bi_cnt
, 1);
137 bio
->bi_private
= NULL
;
141 * bio_alloc_bioset - allocate a bio for I/O
142 * @gfp_mask: the GFP_ mask given to the slab allocator
143 * @nr_iovecs: number of iovecs to pre-allocate
144 * @bs: the bio_set to allocate from
147 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
148 * If %__GFP_WAIT is set then we will block on the internal pool waiting
149 * for a &struct bio to become free.
151 * allocate bio and iovecs from the memory pools specified by the
154 struct bio
*bio_alloc_bioset(unsigned int __nocast gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
156 struct bio
*bio
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
159 struct bio_vec
*bvl
= NULL
;
162 if (likely(nr_iovecs
)) {
165 bvl
= bvec_alloc_bs(gfp_mask
, nr_iovecs
, &idx
, bs
);
166 if (unlikely(!bvl
)) {
167 mempool_free(bio
, bs
->bio_pool
);
171 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
172 bio
->bi_max_vecs
= bvec_slabs
[idx
].nr_vecs
;
174 bio
->bi_io_vec
= bvl
;
175 bio
->bi_destructor
= bio_destructor
;
182 struct bio
*bio_alloc(unsigned int __nocast gfp_mask
, int nr_iovecs
)
184 return bio_alloc_bioset(gfp_mask
, nr_iovecs
, fs_bio_set
);
187 void zero_fill_bio(struct bio
*bio
)
193 bio_for_each_segment(bv
, bio
, i
) {
194 char *data
= bvec_kmap_irq(bv
, &flags
);
195 memset(data
, 0, bv
->bv_len
);
196 flush_dcache_page(bv
->bv_page
);
197 bvec_kunmap_irq(data
, &flags
);
200 EXPORT_SYMBOL(zero_fill_bio
);
203 * bio_put - release a reference to a bio
204 * @bio: bio to release reference to
207 * Put a reference to a &struct bio, either one you have gotten with
208 * bio_alloc or bio_get. The last put of a bio will free it.
210 void bio_put(struct bio
*bio
)
212 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
217 if (atomic_dec_and_test(&bio
->bi_cnt
)) {
219 bio
->bi_destructor(bio
);
223 inline int bio_phys_segments(request_queue_t
*q
, struct bio
*bio
)
225 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
226 blk_recount_segments(q
, bio
);
228 return bio
->bi_phys_segments
;
231 inline int bio_hw_segments(request_queue_t
*q
, struct bio
*bio
)
233 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
234 blk_recount_segments(q
, bio
);
236 return bio
->bi_hw_segments
;
240 * __bio_clone - clone a bio
241 * @bio: destination bio
242 * @bio_src: bio to clone
244 * Clone a &bio. Caller will own the returned bio, but not
245 * the actual data it points to. Reference count of returned
248 inline void __bio_clone(struct bio
*bio
, struct bio
*bio_src
)
250 request_queue_t
*q
= bdev_get_queue(bio_src
->bi_bdev
);
252 memcpy(bio
->bi_io_vec
, bio_src
->bi_io_vec
, bio_src
->bi_max_vecs
* sizeof(struct bio_vec
));
254 bio
->bi_sector
= bio_src
->bi_sector
;
255 bio
->bi_bdev
= bio_src
->bi_bdev
;
256 bio
->bi_flags
|= 1 << BIO_CLONED
;
257 bio
->bi_rw
= bio_src
->bi_rw
;
260 * notes -- maybe just leave bi_idx alone. assume identical mapping
263 bio
->bi_vcnt
= bio_src
->bi_vcnt
;
264 bio
->bi_size
= bio_src
->bi_size
;
265 bio_phys_segments(q
, bio
);
266 bio_hw_segments(q
, bio
);
270 * bio_clone - clone a bio
272 * @gfp_mask: allocation priority
274 * Like __bio_clone, only also allocates the returned bio
276 struct bio
*bio_clone(struct bio
*bio
, unsigned int __nocast gfp_mask
)
278 struct bio
*b
= bio_alloc_bioset(gfp_mask
, bio
->bi_max_vecs
, fs_bio_set
);
287 * bio_get_nr_vecs - return approx number of vecs
290 * Return the approximate number of pages we can send to this target.
291 * There's no guarantee that you will be able to fit this number of pages
292 * into a bio, it does not account for dynamic restrictions that vary
295 int bio_get_nr_vecs(struct block_device
*bdev
)
297 request_queue_t
*q
= bdev_get_queue(bdev
);
300 nr_pages
= ((q
->max_sectors
<< 9) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
301 if (nr_pages
> q
->max_phys_segments
)
302 nr_pages
= q
->max_phys_segments
;
303 if (nr_pages
> q
->max_hw_segments
)
304 nr_pages
= q
->max_hw_segments
;
309 static int __bio_add_page(request_queue_t
*q
, struct bio
*bio
, struct page
310 *page
, unsigned int len
, unsigned int offset
)
312 int retried_segments
= 0;
313 struct bio_vec
*bvec
;
316 * cloned bio must not modify vec list
318 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
321 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
324 if (((bio
->bi_size
+ len
) >> 9) > q
->max_sectors
)
328 * we might lose a segment or two here, but rather that than
329 * make this too complex.
332 while (bio
->bi_phys_segments
>= q
->max_phys_segments
333 || bio
->bi_hw_segments
>= q
->max_hw_segments
334 || BIOVEC_VIRT_OVERSIZE(bio
->bi_size
)) {
336 if (retried_segments
)
339 retried_segments
= 1;
340 blk_recount_segments(q
, bio
);
344 * setup the new entry, we might clear it again later if we
345 * cannot add the page
347 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
348 bvec
->bv_page
= page
;
350 bvec
->bv_offset
= offset
;
353 * if queue has other restrictions (eg varying max sector size
354 * depending on offset), it can specify a merge_bvec_fn in the
355 * queue to get further control
357 if (q
->merge_bvec_fn
) {
359 * merge_bvec_fn() returns number of bytes it can accept
362 if (q
->merge_bvec_fn(q
, bio
, bvec
) < len
) {
363 bvec
->bv_page
= NULL
;
370 /* If we may be able to merge these biovecs, force a recount */
371 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
) ||
372 BIOVEC_VIRT_MERGEABLE(bvec
-1, bvec
)))
373 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
376 bio
->bi_phys_segments
++;
377 bio
->bi_hw_segments
++;
383 * bio_add_page - attempt to add page to bio
384 * @bio: destination bio
386 * @len: vec entry length
387 * @offset: vec entry offset
389 * Attempt to add a page to the bio_vec maplist. This can fail for a
390 * number of reasons, such as the bio being full or target block
391 * device limitations. The target block device must allow bio's
392 * smaller than PAGE_SIZE, so it is always possible to add a single
393 * page to an empty bio.
395 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
398 return __bio_add_page(bdev_get_queue(bio
->bi_bdev
), bio
, page
,
402 struct bio_map_data
{
403 struct bio_vec
*iovecs
;
404 void __user
*userptr
;
407 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
)
409 memcpy(bmd
->iovecs
, bio
->bi_io_vec
, sizeof(struct bio_vec
) * bio
->bi_vcnt
);
410 bio
->bi_private
= bmd
;
413 static void bio_free_map_data(struct bio_map_data
*bmd
)
419 static struct bio_map_data
*bio_alloc_map_data(int nr_segs
)
421 struct bio_map_data
*bmd
= kmalloc(sizeof(*bmd
), GFP_KERNEL
);
426 bmd
->iovecs
= kmalloc(sizeof(struct bio_vec
) * nr_segs
, GFP_KERNEL
);
435 * bio_uncopy_user - finish previously mapped bio
436 * @bio: bio being terminated
438 * Free pages allocated from bio_copy_user() and write back data
439 * to user space in case of a read.
441 int bio_uncopy_user(struct bio
*bio
)
443 struct bio_map_data
*bmd
= bio
->bi_private
;
444 const int read
= bio_data_dir(bio
) == READ
;
445 struct bio_vec
*bvec
;
448 __bio_for_each_segment(bvec
, bio
, i
, 0) {
449 char *addr
= page_address(bvec
->bv_page
);
450 unsigned int len
= bmd
->iovecs
[i
].bv_len
;
452 if (read
&& !ret
&& copy_to_user(bmd
->userptr
, addr
, len
))
455 __free_page(bvec
->bv_page
);
458 bio_free_map_data(bmd
);
464 * bio_copy_user - copy user data to bio
465 * @q: destination block queue
466 * @uaddr: start of user address
467 * @len: length in bytes
468 * @write_to_vm: bool indicating writing to pages or not
470 * Prepares and returns a bio for indirect user io, bouncing data
471 * to/from kernel pages as necessary. Must be paired with
472 * call bio_uncopy_user() on io completion.
474 struct bio
*bio_copy_user(request_queue_t
*q
, unsigned long uaddr
,
475 unsigned int len
, int write_to_vm
)
477 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
478 unsigned long start
= uaddr
>> PAGE_SHIFT
;
479 struct bio_map_data
*bmd
;
480 struct bio_vec
*bvec
;
485 bmd
= bio_alloc_map_data(end
- start
);
487 return ERR_PTR(-ENOMEM
);
489 bmd
->userptr
= (void __user
*) uaddr
;
492 bio
= bio_alloc(GFP_KERNEL
, end
- start
);
496 bio
->bi_rw
|= (!write_to_vm
<< BIO_RW
);
500 unsigned int bytes
= PAGE_SIZE
;
505 page
= alloc_page(q
->bounce_gfp
| GFP_KERNEL
);
511 if (__bio_add_page(q
, bio
, page
, bytes
, 0) < bytes
) {
526 char __user
*p
= (char __user
*) uaddr
;
529 * for a write, copy in data to kernel pages
532 bio_for_each_segment(bvec
, bio
, i
) {
533 char *addr
= page_address(bvec
->bv_page
);
535 if (copy_from_user(addr
, p
, bvec
->bv_len
))
541 bio_set_map_data(bmd
, bio
);
544 bio_for_each_segment(bvec
, bio
, i
)
545 __free_page(bvec
->bv_page
);
549 bio_free_map_data(bmd
);
553 static struct bio
*__bio_map_user_iov(request_queue_t
*q
,
554 struct block_device
*bdev
,
555 struct sg_iovec
*iov
, int iov_count
,
565 for (i
= 0; i
< iov_count
; i
++) {
566 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
567 unsigned long len
= iov
[i
].iov_len
;
568 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
569 unsigned long start
= uaddr
>> PAGE_SHIFT
;
571 nr_pages
+= end
- start
;
573 * transfer and buffer must be aligned to at least hardsector
574 * size for now, in the future we can relax this restriction
576 if ((uaddr
& queue_dma_alignment(q
)) || (len
& queue_dma_alignment(q
)))
577 return ERR_PTR(-EINVAL
);
581 return ERR_PTR(-EINVAL
);
583 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
585 return ERR_PTR(-ENOMEM
);
588 pages
= kmalloc(nr_pages
* sizeof(struct page
*), GFP_KERNEL
);
592 memset(pages
, 0, nr_pages
* sizeof(struct page
*));
594 for (i
= 0; i
< iov_count
; i
++) {
595 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
596 unsigned long len
= iov
[i
].iov_len
;
597 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
598 unsigned long start
= uaddr
>> PAGE_SHIFT
;
599 const int local_nr_pages
= end
- start
;
600 const int page_limit
= cur_page
+ local_nr_pages
;
602 down_read(¤t
->mm
->mmap_sem
);
603 ret
= get_user_pages(current
, current
->mm
, uaddr
,
605 write_to_vm
, 0, &pages
[cur_page
], NULL
);
606 up_read(¤t
->mm
->mmap_sem
);
608 if (ret
< local_nr_pages
)
612 offset
= uaddr
& ~PAGE_MASK
;
613 for (j
= cur_page
; j
< page_limit
; j
++) {
614 unsigned int bytes
= PAGE_SIZE
- offset
;
625 if (__bio_add_page(q
, bio
, pages
[j
], bytes
, offset
) < bytes
)
634 * release the pages we didn't map into the bio, if any
636 while (j
< page_limit
)
637 page_cache_release(pages
[j
++]);
643 * set data direction, and check if mapped pages need bouncing
646 bio
->bi_rw
|= (1 << BIO_RW
);
649 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
653 for (i
= 0; i
< nr_pages
; i
++) {
656 page_cache_release(pages
[i
]);
665 * bio_map_user - map user address into bio
666 * @q: the request_queue_t for the bio
667 * @bdev: destination block device
668 * @uaddr: start of user address
669 * @len: length in bytes
670 * @write_to_vm: bool indicating writing to pages or not
672 * Map the user space address into a bio suitable for io to a block
673 * device. Returns an error pointer in case of error.
675 struct bio
*bio_map_user(request_queue_t
*q
, struct block_device
*bdev
,
676 unsigned long uaddr
, unsigned int len
, int write_to_vm
)
680 iov
.iov_base
= (__user
void *)uaddr
;
683 return bio_map_user_iov(q
, bdev
, &iov
, 1, write_to_vm
);
687 * bio_map_user_iov - map user sg_iovec table into bio
688 * @q: the request_queue_t for the bio
689 * @bdev: destination block device
691 * @iov_count: number of elements in the iovec
692 * @write_to_vm: bool indicating writing to pages or not
694 * Map the user space address into a bio suitable for io to a block
695 * device. Returns an error pointer in case of error.
697 struct bio
*bio_map_user_iov(request_queue_t
*q
, struct block_device
*bdev
,
698 struct sg_iovec
*iov
, int iov_count
,
704 bio
= __bio_map_user_iov(q
, bdev
, iov
, iov_count
, write_to_vm
);
710 * subtle -- if __bio_map_user() ended up bouncing a bio,
711 * it would normally disappear when its bi_end_io is run.
712 * however, we need it for the unmap, so grab an extra
717 for (i
= 0; i
< iov_count
; i
++)
718 len
+= iov
[i
].iov_len
;
720 if (bio
->bi_size
== len
)
724 * don't support partial mappings
726 bio_endio(bio
, bio
->bi_size
, 0);
728 return ERR_PTR(-EINVAL
);
731 static void __bio_unmap_user(struct bio
*bio
)
733 struct bio_vec
*bvec
;
737 * make sure we dirty pages we wrote to
739 __bio_for_each_segment(bvec
, bio
, i
, 0) {
740 if (bio_data_dir(bio
) == READ
)
741 set_page_dirty_lock(bvec
->bv_page
);
743 page_cache_release(bvec
->bv_page
);
750 * bio_unmap_user - unmap a bio
751 * @bio: the bio being unmapped
753 * Unmap a bio previously mapped by bio_map_user(). Must be called with
756 * bio_unmap_user() may sleep.
758 void bio_unmap_user(struct bio
*bio
)
760 __bio_unmap_user(bio
);
764 static int bio_map_kern_endio(struct bio
*bio
, unsigned int bytes_done
, int err
)
774 static struct bio
*__bio_map_kern(request_queue_t
*q
, void *data
,
775 unsigned int len
, unsigned int gfp_mask
)
777 unsigned long kaddr
= (unsigned long)data
;
778 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
779 unsigned long start
= kaddr
>> PAGE_SHIFT
;
780 const int nr_pages
= end
- start
;
784 bio
= bio_alloc(gfp_mask
, nr_pages
);
786 return ERR_PTR(-ENOMEM
);
788 offset
= offset_in_page(kaddr
);
789 for (i
= 0; i
< nr_pages
; i
++) {
790 unsigned int bytes
= PAGE_SIZE
- offset
;
798 if (__bio_add_page(q
, bio
, virt_to_page(data
), bytes
,
807 bio
->bi_end_io
= bio_map_kern_endio
;
812 * bio_map_kern - map kernel address into bio
813 * @q: the request_queue_t for the bio
814 * @data: pointer to buffer to map
815 * @len: length in bytes
816 * @gfp_mask: allocation flags for bio allocation
818 * Map the kernel address into a bio suitable for io to a block
819 * device. Returns an error pointer in case of error.
821 struct bio
*bio_map_kern(request_queue_t
*q
, void *data
, unsigned int len
,
822 unsigned int gfp_mask
)
826 bio
= __bio_map_kern(q
, data
, len
, gfp_mask
);
830 if (bio
->bi_size
== len
)
834 * Don't support partial mappings.
837 return ERR_PTR(-EINVAL
);
841 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
842 * for performing direct-IO in BIOs.
844 * The problem is that we cannot run set_page_dirty() from interrupt context
845 * because the required locks are not interrupt-safe. So what we can do is to
846 * mark the pages dirty _before_ performing IO. And in interrupt context,
847 * check that the pages are still dirty. If so, fine. If not, redirty them
848 * in process context.
850 * We special-case compound pages here: normally this means reads into hugetlb
851 * pages. The logic in here doesn't really work right for compound pages
852 * because the VM does not uniformly chase down the head page in all cases.
853 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
854 * handle them at all. So we skip compound pages here at an early stage.
856 * Note that this code is very hard to test under normal circumstances because
857 * direct-io pins the pages with get_user_pages(). This makes
858 * is_page_cache_freeable return false, and the VM will not clean the pages.
859 * But other code (eg, pdflush) could clean the pages if they are mapped
862 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
863 * deferred bio dirtying paths.
867 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
869 void bio_set_pages_dirty(struct bio
*bio
)
871 struct bio_vec
*bvec
= bio
->bi_io_vec
;
874 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
875 struct page
*page
= bvec
[i
].bv_page
;
877 if (page
&& !PageCompound(page
))
878 set_page_dirty_lock(page
);
882 static void bio_release_pages(struct bio
*bio
)
884 struct bio_vec
*bvec
= bio
->bi_io_vec
;
887 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
888 struct page
*page
= bvec
[i
].bv_page
;
896 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
897 * If they are, then fine. If, however, some pages are clean then they must
898 * have been written out during the direct-IO read. So we take another ref on
899 * the BIO and the offending pages and re-dirty the pages in process context.
901 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
902 * here on. It will run one page_cache_release() against each page and will
903 * run one bio_put() against the BIO.
906 static void bio_dirty_fn(void *data
);
908 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
, NULL
);
909 static DEFINE_SPINLOCK(bio_dirty_lock
);
910 static struct bio
*bio_dirty_list
;
913 * This runs in process context
915 static void bio_dirty_fn(void *data
)
920 spin_lock_irqsave(&bio_dirty_lock
, flags
);
921 bio
= bio_dirty_list
;
922 bio_dirty_list
= NULL
;
923 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
926 struct bio
*next
= bio
->bi_private
;
928 bio_set_pages_dirty(bio
);
929 bio_release_pages(bio
);
935 void bio_check_pages_dirty(struct bio
*bio
)
937 struct bio_vec
*bvec
= bio
->bi_io_vec
;
938 int nr_clean_pages
= 0;
941 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
942 struct page
*page
= bvec
[i
].bv_page
;
944 if (PageDirty(page
) || PageCompound(page
)) {
945 page_cache_release(page
);
946 bvec
[i
].bv_page
= NULL
;
952 if (nr_clean_pages
) {
955 spin_lock_irqsave(&bio_dirty_lock
, flags
);
956 bio
->bi_private
= bio_dirty_list
;
957 bio_dirty_list
= bio
;
958 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
959 schedule_work(&bio_dirty_work
);
966 * bio_endio - end I/O on a bio
968 * @bytes_done: number of bytes completed
969 * @error: error, if any
972 * bio_endio() will end I/O on @bytes_done number of bytes. This may be
973 * just a partial part of the bio, or it may be the whole bio. bio_endio()
974 * is the preferred way to end I/O on a bio, it takes care of decrementing
975 * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
976 * and one of the established -Exxxx (-EIO, for instance) error values in
977 * case something went wrong. Noone should call bi_end_io() directly on
978 * a bio unless they own it and thus know that it has an end_io function.
980 void bio_endio(struct bio
*bio
, unsigned int bytes_done
, int error
)
983 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
985 if (unlikely(bytes_done
> bio
->bi_size
)) {
986 printk("%s: want %u bytes done, only %u left\n", __FUNCTION__
,
987 bytes_done
, bio
->bi_size
);
988 bytes_done
= bio
->bi_size
;
991 bio
->bi_size
-= bytes_done
;
992 bio
->bi_sector
+= (bytes_done
>> 9);
995 bio
->bi_end_io(bio
, bytes_done
, error
);
998 void bio_pair_release(struct bio_pair
*bp
)
1000 if (atomic_dec_and_test(&bp
->cnt
)) {
1001 struct bio
*master
= bp
->bio1
.bi_private
;
1003 bio_endio(master
, master
->bi_size
, bp
->error
);
1004 mempool_free(bp
, bp
->bio2
.bi_private
);
1008 static int bio_pair_end_1(struct bio
* bi
, unsigned int done
, int err
)
1010 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio1
);
1018 bio_pair_release(bp
);
1022 static int bio_pair_end_2(struct bio
* bi
, unsigned int done
, int err
)
1024 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio2
);
1032 bio_pair_release(bp
);
1037 * split a bio - only worry about a bio with a single page
1040 struct bio_pair
*bio_split(struct bio
*bi
, mempool_t
*pool
, int first_sectors
)
1042 struct bio_pair
*bp
= mempool_alloc(pool
, GFP_NOIO
);
1047 BUG_ON(bi
->bi_vcnt
!= 1);
1048 BUG_ON(bi
->bi_idx
!= 0);
1049 atomic_set(&bp
->cnt
, 3);
1053 bp
->bio2
.bi_sector
+= first_sectors
;
1054 bp
->bio2
.bi_size
-= first_sectors
<< 9;
1055 bp
->bio1
.bi_size
= first_sectors
<< 9;
1057 bp
->bv1
= bi
->bi_io_vec
[0];
1058 bp
->bv2
= bi
->bi_io_vec
[0];
1059 bp
->bv2
.bv_offset
+= first_sectors
<< 9;
1060 bp
->bv2
.bv_len
-= first_sectors
<< 9;
1061 bp
->bv1
.bv_len
= first_sectors
<< 9;
1063 bp
->bio1
.bi_io_vec
= &bp
->bv1
;
1064 bp
->bio2
.bi_io_vec
= &bp
->bv2
;
1066 bp
->bio1
.bi_end_io
= bio_pair_end_1
;
1067 bp
->bio2
.bi_end_io
= bio_pair_end_2
;
1069 bp
->bio1
.bi_private
= bi
;
1070 bp
->bio2
.bi_private
= pool
;
1075 static void *bio_pair_alloc(unsigned int __nocast gfp_flags
, void *data
)
1077 return kmalloc(sizeof(struct bio_pair
), gfp_flags
);
1080 static void bio_pair_free(void *bp
, void *data
)
1087 * create memory pools for biovec's in a bio_set.
1088 * use the global biovec slabs created for general use.
1090 static int biovec_create_pools(struct bio_set
*bs
, int pool_entries
, int scale
)
1094 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1095 struct biovec_slab
*bp
= bvec_slabs
+ i
;
1096 mempool_t
**bvp
= bs
->bvec_pools
+ i
;
1101 *bvp
= mempool_create(pool_entries
, mempool_alloc_slab
,
1102 mempool_free_slab
, bp
->slab
);
1109 static void biovec_free_pools(struct bio_set
*bs
)
1113 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1114 mempool_t
*bvp
= bs
->bvec_pools
[i
];
1117 mempool_destroy(bvp
);
1122 void bioset_free(struct bio_set
*bs
)
1125 mempool_destroy(bs
->bio_pool
);
1127 biovec_free_pools(bs
);
1132 struct bio_set
*bioset_create(int bio_pool_size
, int bvec_pool_size
, int scale
)
1134 struct bio_set
*bs
= kmalloc(sizeof(*bs
), GFP_KERNEL
);
1139 memset(bs
, 0, sizeof(*bs
));
1140 bs
->bio_pool
= mempool_create(bio_pool_size
, mempool_alloc_slab
,
1141 mempool_free_slab
, bio_slab
);
1146 if (!biovec_create_pools(bs
, bvec_pool_size
, scale
))
1154 static void __init
biovec_init_slabs(void)
1158 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1160 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
1162 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
1163 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
1164 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
1168 static int __init
init_bio(void)
1170 int megabytes
, bvec_pool_entries
;
1171 int scale
= BIOVEC_NR_POOLS
;
1173 bio_slab
= kmem_cache_create("bio", sizeof(struct bio
), 0,
1174 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
1176 biovec_init_slabs();
1178 megabytes
= nr_free_pages() >> (20 - PAGE_SHIFT
);
1181 * find out where to start scaling
1183 if (megabytes
<= 16)
1185 else if (megabytes
<= 32)
1187 else if (megabytes
<= 64)
1189 else if (megabytes
<= 96)
1191 else if (megabytes
<= 128)
1195 * scale number of entries
1197 bvec_pool_entries
= megabytes
* 2;
1198 if (bvec_pool_entries
> 256)
1199 bvec_pool_entries
= 256;
1201 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, bvec_pool_entries
, scale
);
1203 panic("bio: can't allocate bios\n");
1205 bio_split_pool
= mempool_create(BIO_SPLIT_ENTRIES
,
1206 bio_pair_alloc
, bio_pair_free
, NULL
);
1207 if (!bio_split_pool
)
1208 panic("bio: can't create split pool\n");
1213 subsys_initcall(init_bio
);
1215 EXPORT_SYMBOL(bio_alloc
);
1216 EXPORT_SYMBOL(bio_put
);
1217 EXPORT_SYMBOL(bio_endio
);
1218 EXPORT_SYMBOL(bio_init
);
1219 EXPORT_SYMBOL(__bio_clone
);
1220 EXPORT_SYMBOL(bio_clone
);
1221 EXPORT_SYMBOL(bio_phys_segments
);
1222 EXPORT_SYMBOL(bio_hw_segments
);
1223 EXPORT_SYMBOL(bio_add_page
);
1224 EXPORT_SYMBOL(bio_get_nr_vecs
);
1225 EXPORT_SYMBOL(bio_map_user
);
1226 EXPORT_SYMBOL(bio_unmap_user
);
1227 EXPORT_SYMBOL(bio_map_kern
);
1228 EXPORT_SYMBOL(bio_pair_release
);
1229 EXPORT_SYMBOL(bio_split
);
1230 EXPORT_SYMBOL(bio_split_pool
);
1231 EXPORT_SYMBOL(bio_copy_user
);
1232 EXPORT_SYMBOL(bio_uncopy_user
);
1233 EXPORT_SYMBOL(bioset_create
);
1234 EXPORT_SYMBOL(bioset_free
);
1235 EXPORT_SYMBOL(bio_alloc_bioset
);
This page took 0.056902 seconds and 6 git commands to generate.