2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 static void bch_bi_idx_hack_endio(struct bio
*bio
, int error
)
16 struct bio
*p
= bio
->bi_private
;
22 static void bch_generic_make_request_hack(struct bio
*bio
)
24 if (bio
->bi_iter
.bi_idx
) {
26 struct bvec_iter iter
;
27 struct bio
*clone
= bio_alloc(GFP_NOIO
, bio_segments(bio
));
29 bio_for_each_segment(bv
, bio
, iter
)
30 clone
->bi_io_vec
[clone
->bi_vcnt
++] = bv
;
32 clone
->bi_iter
.bi_sector
= bio
->bi_iter
.bi_sector
;
33 clone
->bi_bdev
= bio
->bi_bdev
;
34 clone
->bi_rw
= bio
->bi_rw
;
35 clone
->bi_vcnt
= bio_segments(bio
);
36 clone
->bi_iter
.bi_size
= bio
->bi_iter
.bi_size
;
38 clone
->bi_private
= bio
;
39 clone
->bi_end_io
= bch_bi_idx_hack_endio
;
45 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
46 * bios might have had more than that (before we split them per device
49 * To be taken out once immutable bvec stuff is in.
51 bio
->bi_max_vecs
= bio
->bi_vcnt
;
53 generic_make_request(bio
);
57 * bch_bio_split - split a bio
59 * @sectors: number of sectors to split from the front of @bio
61 * @bs: bio set to allocate from
63 * Allocates and returns a new bio which represents @sectors from the start of
64 * @bio, and updates @bio to represent the remaining sectors.
66 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
69 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
70 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
71 * freed before the split.
73 struct bio
*bch_bio_split(struct bio
*bio
, int sectors
,
74 gfp_t gfp
, struct bio_set
*bs
)
76 unsigned vcnt
= 0, nbytes
= sectors
<< 9;
78 struct bvec_iter iter
;
79 struct bio
*ret
= NULL
;
83 if (sectors
>= bio_sectors(bio
))
86 if (bio
->bi_rw
& REQ_DISCARD
) {
87 ret
= bio_alloc_bioset(gfp
, 1, bs
);
93 bio_for_each_segment(bv
, bio
, iter
) {
96 if (nbytes
<= bv
.bv_len
)
102 ret
= bio_alloc_bioset(gfp
, vcnt
, bs
);
106 bio_for_each_segment(bv
, bio
, iter
) {
107 ret
->bi_io_vec
[ret
->bi_vcnt
++] = bv
;
109 if (ret
->bi_vcnt
== vcnt
)
113 ret
->bi_io_vec
[ret
->bi_vcnt
- 1].bv_len
= nbytes
;
115 ret
->bi_bdev
= bio
->bi_bdev
;
116 ret
->bi_iter
.bi_sector
= bio
->bi_iter
.bi_sector
;
117 ret
->bi_iter
.bi_size
= sectors
<< 9;
118 ret
->bi_rw
= bio
->bi_rw
;
120 if (bio_integrity(bio
)) {
121 if (bio_integrity_clone(ret
, bio
, gfp
)) {
126 bio_integrity_trim(ret
, 0, bio_sectors(ret
));
129 bio_advance(bio
, ret
->bi_iter
.bi_size
);
134 static unsigned bch_bio_max_sectors(struct bio
*bio
)
136 unsigned ret
= bio_sectors(bio
);
137 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
138 unsigned max_segments
= min_t(unsigned, BIO_MAX_PAGES
,
139 queue_max_segments(q
));
141 if (bio
->bi_rw
& REQ_DISCARD
)
142 return min(ret
, q
->limits
.max_discard_sectors
);
144 if (bio_segments(bio
) > max_segments
||
147 struct bvec_iter iter
;
152 bio_for_each_segment(bv
, bio
, iter
) {
153 struct bvec_merge_data bvm
= {
154 .bi_bdev
= bio
->bi_bdev
,
155 .bi_sector
= bio
->bi_iter
.bi_sector
,
160 if (seg
== max_segments
)
163 if (q
->merge_bvec_fn
&&
164 q
->merge_bvec_fn(q
, &bvm
, &bv
) < (int) bv
.bv_len
)
168 ret
+= bv
.bv_len
>> 9;
172 ret
= min(ret
, queue_max_sectors(q
));
175 ret
= max_t(int, ret
, bio_iovec(bio
).bv_len
>> 9);
180 static void bch_bio_submit_split_done(struct closure
*cl
)
182 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
184 s
->bio
->bi_end_io
= s
->bi_end_io
;
185 s
->bio
->bi_private
= s
->bi_private
;
186 bio_endio(s
->bio
, 0);
188 closure_debug_destroy(&s
->cl
);
189 mempool_free(s
, s
->p
->bio_split_hook
);
192 static void bch_bio_submit_split_endio(struct bio
*bio
, int error
)
194 struct closure
*cl
= bio
->bi_private
;
195 struct bio_split_hook
*s
= container_of(cl
, struct bio_split_hook
, cl
);
198 clear_bit(BIO_UPTODATE
, &s
->bio
->bi_flags
);
204 void bch_generic_make_request(struct bio
*bio
, struct bio_split_pool
*p
)
206 struct bio_split_hook
*s
;
209 if (!bio_has_data(bio
) && !(bio
->bi_rw
& REQ_DISCARD
))
212 if (bio_sectors(bio
) <= bch_bio_max_sectors(bio
))
215 s
= mempool_alloc(p
->bio_split_hook
, GFP_NOIO
);
216 closure_init(&s
->cl
, NULL
);
220 s
->bi_end_io
= bio
->bi_end_io
;
221 s
->bi_private
= bio
->bi_private
;
225 n
= bch_bio_split(bio
, bch_bio_max_sectors(bio
),
226 GFP_NOIO
, s
->p
->bio_split
);
228 n
->bi_end_io
= bch_bio_submit_split_endio
;
229 n
->bi_private
= &s
->cl
;
232 bch_generic_make_request_hack(n
);
235 continue_at(&s
->cl
, bch_bio_submit_split_done
, NULL
);
237 bch_generic_make_request_hack(bio
);
240 /* Bios with headers */
242 void bch_bbio_free(struct bio
*bio
, struct cache_set
*c
)
244 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
245 mempool_free(b
, c
->bio_meta
);
248 struct bio
*bch_bbio_alloc(struct cache_set
*c
)
250 struct bbio
*b
= mempool_alloc(c
->bio_meta
, GFP_NOIO
);
251 struct bio
*bio
= &b
->bio
;
254 bio
->bi_flags
|= BIO_POOL_NONE
<< BIO_POOL_OFFSET
;
255 bio
->bi_max_vecs
= bucket_pages(c
);
256 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
261 void __bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
)
263 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
265 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
266 bio
->bi_bdev
= PTR_CACHE(c
, &b
->key
, 0)->bdev
;
268 b
->submit_time_us
= local_clock_us();
269 closure_bio_submit(bio
, bio
->bi_private
, PTR_CACHE(c
, &b
->key
, 0));
272 void bch_submit_bbio(struct bio
*bio
, struct cache_set
*c
,
273 struct bkey
*k
, unsigned ptr
)
275 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
276 bch_bkey_copy_single_ptr(&b
->key
, k
, ptr
);
277 __bch_submit_bbio(bio
, c
);
282 void bch_count_io_errors(struct cache
*ca
, int error
, const char *m
)
285 * The halflife of an error is:
286 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
289 if (ca
->set
->error_decay
) {
290 unsigned count
= atomic_inc_return(&ca
->io_count
);
292 while (count
> ca
->set
->error_decay
) {
294 unsigned old
= count
;
295 unsigned new = count
- ca
->set
->error_decay
;
298 * First we subtract refresh from count; each time we
299 * succesfully do so, we rescale the errors once:
302 count
= atomic_cmpxchg(&ca
->io_count
, old
, new);
307 errors
= atomic_read(&ca
->io_errors
);
310 new = ((uint64_t) errors
* 127) / 128;
311 errors
= atomic_cmpxchg(&ca
->io_errors
,
313 } while (old
!= errors
);
319 char buf
[BDEVNAME_SIZE
];
320 unsigned errors
= atomic_add_return(1 << IO_ERROR_SHIFT
,
322 errors
>>= IO_ERROR_SHIFT
;
324 if (errors
< ca
->set
->error_limit
)
325 pr_err("%s: IO error on %s, recovering",
326 bdevname(ca
->bdev
, buf
), m
);
328 bch_cache_set_error(ca
->set
,
329 "%s: too many IO errors %s",
330 bdevname(ca
->bdev
, buf
), m
);
334 void bch_bbio_count_io_errors(struct cache_set
*c
, struct bio
*bio
,
335 int error
, const char *m
)
337 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
338 struct cache
*ca
= PTR_CACHE(c
, &b
->key
, 0);
340 unsigned threshold
= bio
->bi_rw
& REQ_WRITE
341 ? c
->congested_write_threshold_us
342 : c
->congested_read_threshold_us
;
345 unsigned t
= local_clock_us();
347 int us
= t
- b
->submit_time_us
;
348 int congested
= atomic_read(&c
->congested
);
350 if (us
> (int) threshold
) {
352 c
->congested_last_us
= t
;
354 ms
= min(ms
, CONGESTED_MAX
+ congested
);
355 atomic_sub(ms
, &c
->congested
);
356 } else if (congested
< 0)
357 atomic_inc(&c
->congested
);
360 bch_count_io_errors(ca
, error
, m
);
363 void bch_bbio_endio(struct cache_set
*c
, struct bio
*bio
,
364 int error
, const char *m
)
366 struct closure
*cl
= bio
->bi_private
;
368 bch_bbio_count_io_errors(c
, bio
, error
, m
);