2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
15 * bch_bio_split - split a bio
17 * @sectors: number of sectors to split from the front of @bio
19 * @bs: bio set to allocate from
21 * Allocates and returns a new bio which represents @sectors from the start of
22 * @bio, and updates @bio to represent the remaining sectors.
24 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
27 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
28 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
29 * freed before the split.
31 struct bio
*bch_bio_split(struct bio
*bio
, int sectors
,
32 gfp_t gfp
, struct bio_set
*bs
)
34 unsigned vcnt
= 0, nbytes
= sectors
<< 9;
36 struct bvec_iter iter
;
37 struct bio
*ret
= NULL
;
41 if (sectors
>= bio_sectors(bio
))
44 if (bio
->bi_rw
& REQ_DISCARD
) {
45 ret
= bio_alloc_bioset(gfp
, 1, bs
);
51 bio_for_each_segment(bv
, bio
, iter
) {
54 if (nbytes
<= bv
.bv_len
)
60 ret
= bio_alloc_bioset(gfp
, vcnt
, bs
);
64 bio_for_each_segment(bv
, bio
, iter
) {
65 ret
->bi_io_vec
[ret
->bi_vcnt
++] = bv
;
67 if (ret
->bi_vcnt
== vcnt
)
71 ret
->bi_io_vec
[ret
->bi_vcnt
- 1].bv_len
= nbytes
;
73 ret
->bi_bdev
= bio
->bi_bdev
;
74 ret
->bi_iter
.bi_sector
= bio
->bi_iter
.bi_sector
;
75 ret
->bi_iter
.bi_size
= sectors
<< 9;
76 ret
->bi_rw
= bio
->bi_rw
;
78 if (bio_integrity(bio
)) {
79 if (bio_integrity_clone(ret
, bio
, gfp
)) {
84 bio_integrity_trim(ret
, 0, bio_sectors(ret
));
87 bio_advance(bio
, ret
->bi_iter
.bi_size
);
92 static unsigned bch_bio_max_sectors(struct bio
*bio
)
94 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
96 struct bvec_iter iter
;
97 unsigned ret
= 0, seg
= 0;
99 if (bio
->bi_rw
& REQ_DISCARD
)
100 return min(bio_sectors(bio
), q
->limits
.max_discard_sectors
);
102 bio_for_each_segment(bv
, bio
, iter
) {
103 struct bvec_merge_data bvm
= {
104 .bi_bdev
= bio
->bi_bdev
,
105 .bi_sector
= bio
->bi_iter
.bi_sector
,
110 if (seg
== min_t(unsigned, BIO_MAX_PAGES
,
111 queue_max_segments(q
)))
114 if (q
->merge_bvec_fn
&&
115 q
->merge_bvec_fn(q
, &bvm
, &bv
) < (int) bv
.bv_len
)
119 ret
+= bv
.bv_len
>> 9;
122 ret
= min(ret
, queue_max_sectors(q
));
125 ret
= max_t(int, ret
, bio_iovec(bio
).bv_len
>> 9);
130 static void bch_bio_submit_split_done(struct closure
*cl
)
132 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
134 s
->bio
->bi_end_io
= s
->bi_end_io
;
135 s
->bio
->bi_private
= s
->bi_private
;
136 bio_endio_nodec(s
->bio
, 0);
138 closure_debug_destroy(&s
->cl
);
139 mempool_free(s
, s
->p
->bio_split_hook
);
142 static void bch_bio_submit_split_endio(struct bio
*bio
, int error
)
144 struct closure
*cl
= bio
->bi_private
;
145 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
148 clear_bit(BIO_UPTODATE
, &s
->bio
->bi_flags
);
154 void bch_generic_make_request(struct bio
*bio
, struct bio_split_pool
*p
)
156 struct bio_split_hook
*s
;
159 if (!bio_has_data(bio
) && !(bio
->bi_rw
& REQ_DISCARD
))
162 if (bio_sectors(bio
) <= bch_bio_max_sectors(bio
))
165 s
= mempool_alloc(p
->bio_split_hook
, GFP_NOIO
);
166 closure_init(&s
->cl
, NULL
);
170 s
->bi_end_io
= bio
->bi_end_io
;
171 s
->bi_private
= bio
->bi_private
;
175 n
= bch_bio_split(bio
, bch_bio_max_sectors(bio
),
176 GFP_NOIO
, s
->p
->bio_split
);
178 n
->bi_end_io
= bch_bio_submit_split_endio
;
179 n
->bi_private
= &s
->cl
;
182 generic_make_request(n
);
185 continue_at(&s
->cl
, bch_bio_submit_split_done
, NULL
);
187 generic_make_request(bio
);
190 /* Bios with headers */
192 void bch_bbio_free(struct bio
*bio
, struct cache_set
*c
)
194 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
195 mempool_free(b
, c
->bio_meta
);
198 struct bio
*bch_bbio_alloc(struct cache_set
*c
)
200 struct bbio
*b
= mempool_alloc(c
->bio_meta
, GFP_NOIO
);
201 struct bio
*bio
= &b
->bio
;
204 bio
->bi_flags
|= BIO_POOL_NONE
<< BIO_POOL_OFFSET
;
205 bio
->bi_max_vecs
= bucket_pages(c
);
206 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
211 void __bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
)
213 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
215 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
216 bio
->bi_bdev
= PTR_CACHE(c
, &b
->key
, 0)->bdev
;
218 b
->submit_time_us
= local_clock_us();
219 closure_bio_submit(bio
, bio
->bi_private
, PTR_CACHE(c
, &b
->key
, 0));
222 void bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
,
223 struct bkey
*k
, unsigned ptr
)
225 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
226 bch_bkey_copy_single_ptr(&b
->key
, k
, ptr
);
227 __bch_submit_bbio(bio
, c
);
232 void bch_count_io_errors(struct cache
*ca
, int error
, const char *m
)
235 * The halflife of an error is:
236 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
239 if (ca
->set
->error_decay
) {
240 unsigned count
= atomic_inc_return(&ca
->io_count
);
242 while (count
> ca
->set
->error_decay
) {
244 unsigned old
= count
;
245 unsigned new = count
- ca
->set
->error_decay
;
248 * First we subtract refresh from count; each time we
249 * succesfully do so, we rescale the errors once:
252 count
= atomic_cmpxchg(&ca
->io_count
, old
, new);
257 errors
= atomic_read(&ca
->io_errors
);
260 new = ((uint64_t) errors
* 127) / 128;
261 errors
= atomic_cmpxchg(&ca
->io_errors
,
263 } while (old
!= errors
);
269 char buf
[BDEVNAME_SIZE
];
270 unsigned errors
= atomic_add_return(1 << IO_ERROR_SHIFT
,
272 errors
>>= IO_ERROR_SHIFT
;
274 if (errors
< ca
->set
->error_limit
)
275 pr_err("%s: IO error on %s, recovering",
276 bdevname(ca
->bdev
, buf
), m
);
278 bch_cache_set_error(ca
->set
,
279 "%s: too many IO errors %s",
280 bdevname(ca
->bdev
, buf
), m
);
284 void bch_bbio_count_io_errors(struct cache_set
*c
, struct bio
*bio
,
285 int error
, const char *m
)
287 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
288 struct cache
*ca
= PTR_CACHE(c
, &b
->key
, 0);
290 unsigned threshold
= bio
->bi_rw
& REQ_WRITE
291 ? c
->congested_write_threshold_us
292 : c
->congested_read_threshold_us
;
295 unsigned t
= local_clock_us();
297 int us
= t
- b
->submit_time_us
;
298 int congested
= atomic_read(&c
->congested
);
300 if (us
> (int) threshold
) {
302 c
->congested_last_us
= t
;
304 ms
= min(ms
, CONGESTED_MAX
+ congested
);
305 atomic_sub(ms
, &c
->congested
);
306 } else if (congested
< 0)
307 atomic_inc(&c
->congested
);
310 bch_count_io_errors(ca
, error
, m
);
313 void bch_bbio_endio(struct cache_set
*c
, struct bio
*bio
,
314 int error
, const char *m
)
316 struct closure
*cl
= bio
->bi_private
;
318 bch_bbio_count_io_errors(c
, bio
, error
, m
);