Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Some low level IO code, and hacks for various block layer limitations | |
3 | * | |
4 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
5 | * Copyright 2012 Google, Inc. | |
6 | */ | |
7 | ||
8 | #include "bcache.h" | |
9 | #include "bset.h" | |
10 | #include "debug.h" | |
11 | ||
c37511b8 KO |
12 | #include <linux/blkdev.h> |
13 | ||
cafe5635 KO |
14 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) |
15 | { | |
16 | struct bio *p = bio->bi_private; | |
17 | ||
18 | bio_endio(p, error); | |
19 | bio_put(bio); | |
20 | } | |
21 | ||
22 | static void bch_generic_make_request_hack(struct bio *bio) | |
23 | { | |
4f024f37 | 24 | if (bio->bi_iter.bi_idx) { |
7988613b KO |
25 | struct bio_vec bv; |
26 | struct bvec_iter iter; | |
458b76ed KO |
27 | unsigned segs = bio_segments(bio); |
28 | struct bio *clone = bio_alloc(GFP_NOIO, segs); | |
cafe5635 | 29 | |
7988613b KO |
30 | bio_for_each_segment(bv, bio, iter) |
31 | clone->bi_io_vec[clone->bi_vcnt++] = bv; | |
cafe5635 | 32 | |
4f024f37 | 33 | clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
cafe5635 KO |
34 | clone->bi_bdev = bio->bi_bdev; |
35 | clone->bi_rw = bio->bi_rw; | |
458b76ed | 36 | clone->bi_vcnt = segs; |
4f024f37 | 37 | clone->bi_iter.bi_size = bio->bi_iter.bi_size; |
cafe5635 KO |
38 | |
39 | clone->bi_private = bio; | |
40 | clone->bi_end_io = bch_bi_idx_hack_endio; | |
41 | ||
42 | bio = clone; | |
43 | } | |
44 | ||
bca97ada KO |
45 | /* |
46 | * Hack, since drivers that clone bios clone up to bi_max_vecs, but our | |
47 | * bios might have had more than that (before we split them per device | |
48 | * limitations). | |
49 | * | |
50 | * To be taken out once immutable bvec stuff is in. | |
51 | */ | |
52 | bio->bi_max_vecs = bio->bi_vcnt; | |
53 | ||
cafe5635 KO |
54 | generic_make_request(bio); |
55 | } | |
56 | ||
57 | /** | |
58 | * bch_bio_split - split a bio | |
59 | * @bio: bio to split | |
60 | * @sectors: number of sectors to split from the front of @bio | |
61 | * @gfp: gfp mask | |
62 | * @bs: bio set to allocate from | |
63 | * | |
64 | * Allocates and returns a new bio which represents @sectors from the start of | |
65 | * @bio, and updates @bio to represent the remaining sectors. | |
66 | * | |
67 | * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio | |
68 | * unchanged. | |
69 | * | |
70 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | |
71 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | |
72 | * freed before the split. | |
cafe5635 KO |
73 | */ |
74 | struct bio *bch_bio_split(struct bio *bio, int sectors, | |
75 | gfp_t gfp, struct bio_set *bs) | |
76 | { | |
7988613b KO |
77 | unsigned vcnt = 0, nbytes = sectors << 9; |
78 | struct bio_vec bv; | |
79 | struct bvec_iter iter; | |
cafe5635 KO |
80 | struct bio *ret = NULL; |
81 | ||
82 | BUG_ON(sectors <= 0); | |
83 | ||
cafe5635 KO |
84 | if (sectors >= bio_sectors(bio)) |
85 | return bio; | |
86 | ||
87 | if (bio->bi_rw & REQ_DISCARD) { | |
88 | ret = bio_alloc_bioset(gfp, 1, bs); | |
5c694129 KAM |
89 | if (!ret) |
90 | return NULL; | |
cafe5635 KO |
91 | goto out; |
92 | } | |
93 | ||
7988613b KO |
94 | bio_for_each_segment(bv, bio, iter) { |
95 | vcnt++; | |
cafe5635 | 96 | |
7988613b KO |
97 | if (nbytes <= bv.bv_len) |
98 | break; | |
cafe5635 | 99 | |
7988613b KO |
100 | nbytes -= bv.bv_len; |
101 | } | |
cafe5635 | 102 | |
7988613b KO |
103 | ret = bio_alloc_bioset(gfp, vcnt, bs); |
104 | if (!ret) | |
105 | return NULL; | |
cafe5635 | 106 | |
7988613b KO |
107 | bio_for_each_segment(bv, bio, iter) { |
108 | ret->bi_io_vec[ret->bi_vcnt++] = bv; | |
cafe5635 | 109 | |
7988613b | 110 | if (ret->bi_vcnt == vcnt) |
cafe5635 | 111 | break; |
cafe5635 | 112 | } |
7988613b KO |
113 | |
114 | ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes; | |
cafe5635 KO |
115 | out: |
116 | ret->bi_bdev = bio->bi_bdev; | |
4f024f37 KO |
117 | ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
118 | ret->bi_iter.bi_size = sectors << 9; | |
cafe5635 | 119 | ret->bi_rw = bio->bi_rw; |
cafe5635 KO |
120 | |
121 | if (bio_integrity(bio)) { | |
122 | if (bio_integrity_clone(ret, bio, gfp)) { | |
123 | bio_put(ret); | |
124 | return NULL; | |
125 | } | |
126 | ||
127 | bio_integrity_trim(ret, 0, bio_sectors(ret)); | |
cafe5635 KO |
128 | } |
129 | ||
7988613b KO |
130 | bio_advance(bio, ret->bi_iter.bi_size); |
131 | ||
cafe5635 KO |
132 | return ret; |
133 | } | |
134 | ||
135 | static unsigned bch_bio_max_sectors(struct bio *bio) | |
136 | { | |
cafe5635 | 137 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
458b76ed KO |
138 | struct bio_vec bv; |
139 | struct bvec_iter iter; | |
140 | unsigned ret = 0, seg = 0; | |
cafe5635 | 141 | |
cafe5635 | 142 | if (bio->bi_rw & REQ_DISCARD) |
458b76ed | 143 | return min(bio_sectors(bio), q->limits.max_discard_sectors); |
cafe5635 | 144 | |
458b76ed KO |
145 | bio_for_each_segment(bv, bio, iter) { |
146 | struct bvec_merge_data bvm = { | |
147 | .bi_bdev = bio->bi_bdev, | |
148 | .bi_sector = bio->bi_iter.bi_sector, | |
149 | .bi_size = ret << 9, | |
150 | .bi_rw = bio->bi_rw, | |
151 | }; | |
152 | ||
153 | if (seg == min_t(unsigned, BIO_MAX_PAGES, | |
154 | queue_max_segments(q))) | |
155 | break; | |
8e51e414 | 156 | |
458b76ed KO |
157 | if (q->merge_bvec_fn && |
158 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | |
159 | break; | |
cafe5635 | 160 | |
458b76ed KO |
161 | seg++; |
162 | ret += bv.bv_len >> 9; | |
cafe5635 KO |
163 | } |
164 | ||
165 | ret = min(ret, queue_max_sectors(q)); | |
166 | ||
167 | WARN_ON(!ret); | |
a4ad39b1 | 168 | ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); |
cafe5635 KO |
169 | |
170 | return ret; | |
171 | } | |
172 | ||
173 | static void bch_bio_submit_split_done(struct closure *cl) | |
174 | { | |
175 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
176 | ||
177 | s->bio->bi_end_io = s->bi_end_io; | |
178 | s->bio->bi_private = s->bi_private; | |
179 | bio_endio(s->bio, 0); | |
180 | ||
181 | closure_debug_destroy(&s->cl); | |
182 | mempool_free(s, s->p->bio_split_hook); | |
183 | } | |
184 | ||
185 | static void bch_bio_submit_split_endio(struct bio *bio, int error) | |
186 | { | |
187 | struct closure *cl = bio->bi_private; | |
188 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
189 | ||
190 | if (error) | |
191 | clear_bit(BIO_UPTODATE, &s->bio->bi_flags); | |
192 | ||
193 | bio_put(bio); | |
194 | closure_put(cl); | |
195 | } | |
196 | ||
cafe5635 KO |
197 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) |
198 | { | |
199 | struct bio_split_hook *s; | |
8e51e414 | 200 | struct bio *n; |
cafe5635 KO |
201 | |
202 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) | |
203 | goto submit; | |
204 | ||
205 | if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) | |
206 | goto submit; | |
207 | ||
208 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); | |
8e51e414 | 209 | closure_init(&s->cl, NULL); |
cafe5635 KO |
210 | |
211 | s->bio = bio; | |
212 | s->p = p; | |
213 | s->bi_end_io = bio->bi_end_io; | |
214 | s->bi_private = bio->bi_private; | |
215 | bio_get(bio); | |
216 | ||
8e51e414 KO |
217 | do { |
218 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | |
219 | GFP_NOIO, s->p->bio_split); | |
220 | ||
221 | n->bi_end_io = bch_bio_submit_split_endio; | |
222 | n->bi_private = &s->cl; | |
223 | ||
224 | closure_get(&s->cl); | |
225 | bch_generic_make_request_hack(n); | |
226 | } while (n != bio); | |
227 | ||
228 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); | |
cafe5635 KO |
229 | submit: |
230 | bch_generic_make_request_hack(bio); | |
231 | } | |
232 | ||
233 | /* Bios with headers */ | |
234 | ||
235 | void bch_bbio_free(struct bio *bio, struct cache_set *c) | |
236 | { | |
237 | struct bbio *b = container_of(bio, struct bbio, bio); | |
238 | mempool_free(b, c->bio_meta); | |
239 | } | |
240 | ||
241 | struct bio *bch_bbio_alloc(struct cache_set *c) | |
242 | { | |
243 | struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); | |
244 | struct bio *bio = &b->bio; | |
245 | ||
246 | bio_init(bio); | |
247 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; | |
248 | bio->bi_max_vecs = bucket_pages(c); | |
249 | bio->bi_io_vec = bio->bi_inline_vecs; | |
250 | ||
251 | return bio; | |
252 | } | |
253 | ||
254 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c) | |
255 | { | |
256 | struct bbio *b = container_of(bio, struct bbio, bio); | |
257 | ||
4f024f37 KO |
258 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
259 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; | |
cafe5635 KO |
260 | |
261 | b->submit_time_us = local_clock_us(); | |
262 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); | |
263 | } | |
264 | ||
265 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |
266 | struct bkey *k, unsigned ptr) | |
267 | { | |
268 | struct bbio *b = container_of(bio, struct bbio, bio); | |
269 | bch_bkey_copy_single_ptr(&b->key, k, ptr); | |
270 | __bch_submit_bbio(bio, c); | |
271 | } | |
272 | ||
273 | /* IO errors */ | |
274 | ||
275 | void bch_count_io_errors(struct cache *ca, int error, const char *m) | |
276 | { | |
277 | /* | |
278 | * The halflife of an error is: | |
279 | * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh | |
280 | */ | |
281 | ||
282 | if (ca->set->error_decay) { | |
283 | unsigned count = atomic_inc_return(&ca->io_count); | |
284 | ||
285 | while (count > ca->set->error_decay) { | |
286 | unsigned errors; | |
287 | unsigned old = count; | |
288 | unsigned new = count - ca->set->error_decay; | |
289 | ||
290 | /* | |
291 | * First we subtract refresh from count; each time we | |
292 | * succesfully do so, we rescale the errors once: | |
293 | */ | |
294 | ||
295 | count = atomic_cmpxchg(&ca->io_count, old, new); | |
296 | ||
297 | if (count == old) { | |
298 | count = new; | |
299 | ||
300 | errors = atomic_read(&ca->io_errors); | |
301 | do { | |
302 | old = errors; | |
303 | new = ((uint64_t) errors * 127) / 128; | |
304 | errors = atomic_cmpxchg(&ca->io_errors, | |
305 | old, new); | |
306 | } while (old != errors); | |
307 | } | |
308 | } | |
309 | } | |
310 | ||
311 | if (error) { | |
312 | char buf[BDEVNAME_SIZE]; | |
313 | unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, | |
314 | &ca->io_errors); | |
315 | errors >>= IO_ERROR_SHIFT; | |
316 | ||
317 | if (errors < ca->set->error_limit) | |
318 | pr_err("%s: IO error on %s, recovering", | |
319 | bdevname(ca->bdev, buf), m); | |
320 | else | |
321 | bch_cache_set_error(ca->set, | |
322 | "%s: too many IO errors %s", | |
323 | bdevname(ca->bdev, buf), m); | |
324 | } | |
325 | } | |
326 | ||
327 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |
328 | int error, const char *m) | |
329 | { | |
330 | struct bbio *b = container_of(bio, struct bbio, bio); | |
331 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | |
332 | ||
333 | unsigned threshold = bio->bi_rw & REQ_WRITE | |
334 | ? c->congested_write_threshold_us | |
335 | : c->congested_read_threshold_us; | |
336 | ||
337 | if (threshold) { | |
338 | unsigned t = local_clock_us(); | |
339 | ||
340 | int us = t - b->submit_time_us; | |
341 | int congested = atomic_read(&c->congested); | |
342 | ||
343 | if (us > (int) threshold) { | |
344 | int ms = us / 1024; | |
345 | c->congested_last_us = t; | |
346 | ||
347 | ms = min(ms, CONGESTED_MAX + congested); | |
348 | atomic_sub(ms, &c->congested); | |
349 | } else if (congested < 0) | |
350 | atomic_inc(&c->congested); | |
351 | } | |
352 | ||
353 | bch_count_io_errors(ca, error, m); | |
354 | } | |
355 | ||
356 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | |
357 | int error, const char *m) | |
358 | { | |
359 | struct closure *cl = bio->bi_private; | |
360 | ||
361 | bch_bbio_count_io_errors(c, bio, error, m); | |
362 | bio_put(bio); | |
363 | closure_put(cl); | |
364 | } |