Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Some low level IO code, and hacks for various block layer limitations | |
3 | * | |
4 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
5 | * Copyright 2012 Google, Inc. | |
6 | */ | |
7 | ||
8 | #include "bcache.h" | |
9 | #include "bset.h" | |
10 | #include "debug.h" | |
11 | ||
12 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) | |
13 | { | |
14 | struct bio *p = bio->bi_private; | |
15 | ||
16 | bio_endio(p, error); | |
17 | bio_put(bio); | |
18 | } | |
19 | ||
20 | static void bch_generic_make_request_hack(struct bio *bio) | |
21 | { | |
22 | if (bio->bi_idx) { | |
23 | struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | |
24 | ||
25 | memcpy(clone->bi_io_vec, | |
26 | bio_iovec(bio), | |
27 | bio_segments(bio) * sizeof(struct bio_vec)); | |
28 | ||
29 | clone->bi_sector = bio->bi_sector; | |
30 | clone->bi_bdev = bio->bi_bdev; | |
31 | clone->bi_rw = bio->bi_rw; | |
32 | clone->bi_vcnt = bio_segments(bio); | |
33 | clone->bi_size = bio->bi_size; | |
34 | ||
35 | clone->bi_private = bio; | |
36 | clone->bi_end_io = bch_bi_idx_hack_endio; | |
37 | ||
38 | bio = clone; | |
39 | } | |
40 | ||
bca97ada KO |
41 | /* |
42 | * Hack, since drivers that clone bios clone up to bi_max_vecs, but our | |
43 | * bios might have had more than that (before we split them per device | |
44 | * limitations). | |
45 | * | |
46 | * To be taken out once immutable bvec stuff is in. | |
47 | */ | |
48 | bio->bi_max_vecs = bio->bi_vcnt; | |
49 | ||
cafe5635 KO |
50 | generic_make_request(bio); |
51 | } | |
52 | ||
53 | /** | |
54 | * bch_bio_split - split a bio | |
55 | * @bio: bio to split | |
56 | * @sectors: number of sectors to split from the front of @bio | |
57 | * @gfp: gfp mask | |
58 | * @bs: bio set to allocate from | |
59 | * | |
60 | * Allocates and returns a new bio which represents @sectors from the start of | |
61 | * @bio, and updates @bio to represent the remaining sectors. | |
62 | * | |
63 | * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio | |
64 | * unchanged. | |
65 | * | |
66 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | |
67 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | |
68 | * freed before the split. | |
69 | * | |
70 | * If bch_bio_split() is running under generic_make_request(), it's not safe to | |
71 | * allocate more than one bio from the same bio set. Therefore, if it is running | |
72 | * under generic_make_request() it masks out __GFP_WAIT when doing the | |
73 | * allocation. The caller must check for failure if there's any possibility of | |
74 | * it being called from under generic_make_request(); it is then the caller's | |
75 | * responsibility to retry from a safe context (by e.g. punting to workqueue). | |
76 | */ | |
77 | struct bio *bch_bio_split(struct bio *bio, int sectors, | |
78 | gfp_t gfp, struct bio_set *bs) | |
79 | { | |
80 | unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; | |
81 | struct bio_vec *bv; | |
82 | struct bio *ret = NULL; | |
83 | ||
84 | BUG_ON(sectors <= 0); | |
85 | ||
86 | /* | |
87 | * If we're being called from underneath generic_make_request() and we | |
88 | * already allocated any bios from this bio set, we risk deadlock if we | |
89 | * use the mempool. So instead, we possibly fail and let the caller punt | |
90 | * to workqueue or somesuch and retry in a safe context. | |
91 | */ | |
92 | if (current->bio_list) | |
93 | gfp &= ~__GFP_WAIT; | |
94 | ||
95 | if (sectors >= bio_sectors(bio)) | |
96 | return bio; | |
97 | ||
98 | if (bio->bi_rw & REQ_DISCARD) { | |
99 | ret = bio_alloc_bioset(gfp, 1, bs); | |
5c694129 KAM |
100 | if (!ret) |
101 | return NULL; | |
cafe5635 KO |
102 | idx = 0; |
103 | goto out; | |
104 | } | |
105 | ||
106 | bio_for_each_segment(bv, bio, idx) { | |
107 | vcnt = idx - bio->bi_idx; | |
108 | ||
109 | if (!nbytes) { | |
110 | ret = bio_alloc_bioset(gfp, vcnt, bs); | |
111 | if (!ret) | |
112 | return NULL; | |
113 | ||
114 | memcpy(ret->bi_io_vec, bio_iovec(bio), | |
115 | sizeof(struct bio_vec) * vcnt); | |
116 | ||
117 | break; | |
118 | } else if (nbytes < bv->bv_len) { | |
119 | ret = bio_alloc_bioset(gfp, ++vcnt, bs); | |
120 | if (!ret) | |
121 | return NULL; | |
122 | ||
123 | memcpy(ret->bi_io_vec, bio_iovec(bio), | |
124 | sizeof(struct bio_vec) * vcnt); | |
125 | ||
126 | ret->bi_io_vec[vcnt - 1].bv_len = nbytes; | |
127 | bv->bv_offset += nbytes; | |
128 | bv->bv_len -= nbytes; | |
129 | break; | |
130 | } | |
131 | ||
132 | nbytes -= bv->bv_len; | |
133 | } | |
134 | out: | |
135 | ret->bi_bdev = bio->bi_bdev; | |
136 | ret->bi_sector = bio->bi_sector; | |
137 | ret->bi_size = sectors << 9; | |
138 | ret->bi_rw = bio->bi_rw; | |
139 | ret->bi_vcnt = vcnt; | |
140 | ret->bi_max_vecs = vcnt; | |
141 | ||
142 | bio->bi_sector += sectors; | |
143 | bio->bi_size -= sectors << 9; | |
144 | bio->bi_idx = idx; | |
145 | ||
146 | if (bio_integrity(bio)) { | |
147 | if (bio_integrity_clone(ret, bio, gfp)) { | |
148 | bio_put(ret); | |
149 | return NULL; | |
150 | } | |
151 | ||
152 | bio_integrity_trim(ret, 0, bio_sectors(ret)); | |
153 | bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); | |
154 | } | |
155 | ||
156 | return ret; | |
157 | } | |
158 | ||
159 | static unsigned bch_bio_max_sectors(struct bio *bio) | |
160 | { | |
161 | unsigned ret = bio_sectors(bio); | |
162 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1545f137 KO |
163 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, |
164 | queue_max_segments(q)); | |
cafe5635 | 165 | struct bio_vec *bv, *end = bio_iovec(bio) + |
1545f137 | 166 | min_t(int, bio_segments(bio), max_segments); |
cafe5635 | 167 | |
cafe5635 KO |
168 | if (bio->bi_rw & REQ_DISCARD) |
169 | return min(ret, q->limits.max_discard_sectors); | |
170 | ||
1545f137 | 171 | if (bio_segments(bio) > max_segments || |
cafe5635 KO |
172 | q->merge_bvec_fn) { |
173 | ret = 0; | |
174 | ||
175 | for (bv = bio_iovec(bio); bv < end; bv++) { | |
a09ded8e KO |
176 | struct bvec_merge_data bvm = { |
177 | .bi_bdev = bio->bi_bdev, | |
178 | .bi_sector = bio->bi_sector, | |
179 | .bi_size = ret << 9, | |
180 | .bi_rw = bio->bi_rw, | |
181 | }; | |
182 | ||
cafe5635 KO |
183 | if (q->merge_bvec_fn && |
184 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) | |
185 | break; | |
186 | ||
a09ded8e | 187 | ret += bv->bv_len >> 9; |
cafe5635 | 188 | } |
cafe5635 KO |
189 | } |
190 | ||
191 | ret = min(ret, queue_max_sectors(q)); | |
192 | ||
193 | WARN_ON(!ret); | |
194 | ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); | |
195 | ||
196 | return ret; | |
197 | } | |
198 | ||
199 | static void bch_bio_submit_split_done(struct closure *cl) | |
200 | { | |
201 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
202 | ||
203 | s->bio->bi_end_io = s->bi_end_io; | |
204 | s->bio->bi_private = s->bi_private; | |
205 | bio_endio(s->bio, 0); | |
206 | ||
207 | closure_debug_destroy(&s->cl); | |
208 | mempool_free(s, s->p->bio_split_hook); | |
209 | } | |
210 | ||
211 | static void bch_bio_submit_split_endio(struct bio *bio, int error) | |
212 | { | |
213 | struct closure *cl = bio->bi_private; | |
214 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
215 | ||
216 | if (error) | |
217 | clear_bit(BIO_UPTODATE, &s->bio->bi_flags); | |
218 | ||
219 | bio_put(bio); | |
220 | closure_put(cl); | |
221 | } | |
222 | ||
223 | static void __bch_bio_submit_split(struct closure *cl) | |
224 | { | |
225 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
226 | struct bio *bio = s->bio, *n; | |
227 | ||
228 | do { | |
229 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | |
230 | GFP_NOIO, s->p->bio_split); | |
231 | if (!n) | |
232 | continue_at(cl, __bch_bio_submit_split, system_wq); | |
233 | ||
234 | n->bi_end_io = bch_bio_submit_split_endio; | |
235 | n->bi_private = cl; | |
236 | ||
237 | closure_get(cl); | |
238 | bch_generic_make_request_hack(n); | |
239 | } while (n != bio); | |
240 | ||
241 | continue_at(cl, bch_bio_submit_split_done, NULL); | |
242 | } | |
243 | ||
244 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |
245 | { | |
246 | struct bio_split_hook *s; | |
247 | ||
248 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) | |
249 | goto submit; | |
250 | ||
251 | if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) | |
252 | goto submit; | |
253 | ||
254 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); | |
255 | ||
256 | s->bio = bio; | |
257 | s->p = p; | |
258 | s->bi_end_io = bio->bi_end_io; | |
259 | s->bi_private = bio->bi_private; | |
260 | bio_get(bio); | |
261 | ||
262 | closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL); | |
263 | return; | |
264 | submit: | |
265 | bch_generic_make_request_hack(bio); | |
266 | } | |
267 | ||
268 | /* Bios with headers */ | |
269 | ||
270 | void bch_bbio_free(struct bio *bio, struct cache_set *c) | |
271 | { | |
272 | struct bbio *b = container_of(bio, struct bbio, bio); | |
273 | mempool_free(b, c->bio_meta); | |
274 | } | |
275 | ||
276 | struct bio *bch_bbio_alloc(struct cache_set *c) | |
277 | { | |
278 | struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); | |
279 | struct bio *bio = &b->bio; | |
280 | ||
281 | bio_init(bio); | |
282 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; | |
283 | bio->bi_max_vecs = bucket_pages(c); | |
284 | bio->bi_io_vec = bio->bi_inline_vecs; | |
285 | ||
286 | return bio; | |
287 | } | |
288 | ||
289 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c) | |
290 | { | |
291 | struct bbio *b = container_of(bio, struct bbio, bio); | |
292 | ||
293 | bio->bi_sector = PTR_OFFSET(&b->key, 0); | |
294 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; | |
295 | ||
296 | b->submit_time_us = local_clock_us(); | |
297 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); | |
298 | } | |
299 | ||
300 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |
301 | struct bkey *k, unsigned ptr) | |
302 | { | |
303 | struct bbio *b = container_of(bio, struct bbio, bio); | |
304 | bch_bkey_copy_single_ptr(&b->key, k, ptr); | |
305 | __bch_submit_bbio(bio, c); | |
306 | } | |
307 | ||
308 | /* IO errors */ | |
309 | ||
310 | void bch_count_io_errors(struct cache *ca, int error, const char *m) | |
311 | { | |
312 | /* | |
313 | * The halflife of an error is: | |
314 | * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh | |
315 | */ | |
316 | ||
317 | if (ca->set->error_decay) { | |
318 | unsigned count = atomic_inc_return(&ca->io_count); | |
319 | ||
320 | while (count > ca->set->error_decay) { | |
321 | unsigned errors; | |
322 | unsigned old = count; | |
323 | unsigned new = count - ca->set->error_decay; | |
324 | ||
325 | /* | |
326 | * First we subtract refresh from count; each time we | |
327 | * succesfully do so, we rescale the errors once: | |
328 | */ | |
329 | ||
330 | count = atomic_cmpxchg(&ca->io_count, old, new); | |
331 | ||
332 | if (count == old) { | |
333 | count = new; | |
334 | ||
335 | errors = atomic_read(&ca->io_errors); | |
336 | do { | |
337 | old = errors; | |
338 | new = ((uint64_t) errors * 127) / 128; | |
339 | errors = atomic_cmpxchg(&ca->io_errors, | |
340 | old, new); | |
341 | } while (old != errors); | |
342 | } | |
343 | } | |
344 | } | |
345 | ||
346 | if (error) { | |
347 | char buf[BDEVNAME_SIZE]; | |
348 | unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, | |
349 | &ca->io_errors); | |
350 | errors >>= IO_ERROR_SHIFT; | |
351 | ||
352 | if (errors < ca->set->error_limit) | |
353 | pr_err("%s: IO error on %s, recovering", | |
354 | bdevname(ca->bdev, buf), m); | |
355 | else | |
356 | bch_cache_set_error(ca->set, | |
357 | "%s: too many IO errors %s", | |
358 | bdevname(ca->bdev, buf), m); | |
359 | } | |
360 | } | |
361 | ||
362 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |
363 | int error, const char *m) | |
364 | { | |
365 | struct bbio *b = container_of(bio, struct bbio, bio); | |
366 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | |
367 | ||
368 | unsigned threshold = bio->bi_rw & REQ_WRITE | |
369 | ? c->congested_write_threshold_us | |
370 | : c->congested_read_threshold_us; | |
371 | ||
372 | if (threshold) { | |
373 | unsigned t = local_clock_us(); | |
374 | ||
375 | int us = t - b->submit_time_us; | |
376 | int congested = atomic_read(&c->congested); | |
377 | ||
378 | if (us > (int) threshold) { | |
379 | int ms = us / 1024; | |
380 | c->congested_last_us = t; | |
381 | ||
382 | ms = min(ms, CONGESTED_MAX + congested); | |
383 | atomic_sub(ms, &c->congested); | |
384 | } else if (congested < 0) | |
385 | atomic_inc(&c->congested); | |
386 | } | |
387 | ||
388 | bch_count_io_errors(ca, error, m); | |
389 | } | |
390 | ||
391 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | |
392 | int error, const char *m) | |
393 | { | |
394 | struct closure *cl = bio->bi_private; | |
395 | ||
396 | bch_bbio_count_io_errors(c, bio, error, m); | |
397 | bio_put(bio); | |
398 | closure_put(cl); | |
399 | } |