Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Some low level IO code, and hacks for various block layer limitations | |
3 | * | |
4 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
5 | * Copyright 2012 Google, Inc. | |
6 | */ | |
7 | ||
8 | #include "bcache.h" | |
9 | #include "bset.h" | |
10 | #include "debug.h" | |
11 | ||
c37511b8 KO |
12 | #include <linux/blkdev.h> |
13 | ||
cafe5635 KO |
14 | /** |
15 | * bch_bio_split - split a bio | |
16 | * @bio: bio to split | |
17 | * @sectors: number of sectors to split from the front of @bio | |
18 | * @gfp: gfp mask | |
19 | * @bs: bio set to allocate from | |
20 | * | |
21 | * Allocates and returns a new bio which represents @sectors from the start of | |
22 | * @bio, and updates @bio to represent the remaining sectors. | |
23 | * | |
24 | * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio | |
25 | * unchanged. | |
26 | * | |
27 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | |
28 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | |
29 | * freed before the split. | |
cafe5635 KO |
30 | */ |
31 | struct bio *bch_bio_split(struct bio *bio, int sectors, | |
32 | gfp_t gfp, struct bio_set *bs) | |
33 | { | |
7988613b KO |
34 | unsigned vcnt = 0, nbytes = sectors << 9; |
35 | struct bio_vec bv; | |
36 | struct bvec_iter iter; | |
cafe5635 KO |
37 | struct bio *ret = NULL; |
38 | ||
39 | BUG_ON(sectors <= 0); | |
40 | ||
cafe5635 KO |
41 | if (sectors >= bio_sectors(bio)) |
42 | return bio; | |
43 | ||
44 | if (bio->bi_rw & REQ_DISCARD) { | |
45 | ret = bio_alloc_bioset(gfp, 1, bs); | |
5c694129 KAM |
46 | if (!ret) |
47 | return NULL; | |
cafe5635 KO |
48 | goto out; |
49 | } | |
50 | ||
7988613b KO |
51 | bio_for_each_segment(bv, bio, iter) { |
52 | vcnt++; | |
cafe5635 | 53 | |
7988613b KO |
54 | if (nbytes <= bv.bv_len) |
55 | break; | |
cafe5635 | 56 | |
7988613b KO |
57 | nbytes -= bv.bv_len; |
58 | } | |
cafe5635 | 59 | |
7988613b KO |
60 | ret = bio_alloc_bioset(gfp, vcnt, bs); |
61 | if (!ret) | |
62 | return NULL; | |
cafe5635 | 63 | |
7988613b KO |
64 | bio_for_each_segment(bv, bio, iter) { |
65 | ret->bi_io_vec[ret->bi_vcnt++] = bv; | |
cafe5635 | 66 | |
7988613b | 67 | if (ret->bi_vcnt == vcnt) |
cafe5635 | 68 | break; |
cafe5635 | 69 | } |
7988613b KO |
70 | |
71 | ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes; | |
cafe5635 KO |
72 | out: |
73 | ret->bi_bdev = bio->bi_bdev; | |
4f024f37 KO |
74 | ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
75 | ret->bi_iter.bi_size = sectors << 9; | |
cafe5635 | 76 | ret->bi_rw = bio->bi_rw; |
cafe5635 KO |
77 | |
78 | if (bio_integrity(bio)) { | |
79 | if (bio_integrity_clone(ret, bio, gfp)) { | |
80 | bio_put(ret); | |
81 | return NULL; | |
82 | } | |
83 | ||
84 | bio_integrity_trim(ret, 0, bio_sectors(ret)); | |
cafe5635 KO |
85 | } |
86 | ||
7988613b KO |
87 | bio_advance(bio, ret->bi_iter.bi_size); |
88 | ||
cafe5635 KO |
89 | return ret; |
90 | } | |
91 | ||
92 | static unsigned bch_bio_max_sectors(struct bio *bio) | |
93 | { | |
cafe5635 | 94 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
458b76ed KO |
95 | struct bio_vec bv; |
96 | struct bvec_iter iter; | |
97 | unsigned ret = 0, seg = 0; | |
cafe5635 | 98 | |
cafe5635 | 99 | if (bio->bi_rw & REQ_DISCARD) |
458b76ed | 100 | return min(bio_sectors(bio), q->limits.max_discard_sectors); |
cafe5635 | 101 | |
458b76ed KO |
102 | bio_for_each_segment(bv, bio, iter) { |
103 | struct bvec_merge_data bvm = { | |
104 | .bi_bdev = bio->bi_bdev, | |
105 | .bi_sector = bio->bi_iter.bi_sector, | |
106 | .bi_size = ret << 9, | |
107 | .bi_rw = bio->bi_rw, | |
108 | }; | |
109 | ||
110 | if (seg == min_t(unsigned, BIO_MAX_PAGES, | |
111 | queue_max_segments(q))) | |
112 | break; | |
8e51e414 | 113 | |
458b76ed KO |
114 | if (q->merge_bvec_fn && |
115 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | |
116 | break; | |
cafe5635 | 117 | |
458b76ed KO |
118 | seg++; |
119 | ret += bv.bv_len >> 9; | |
cafe5635 KO |
120 | } |
121 | ||
122 | ret = min(ret, queue_max_sectors(q)); | |
123 | ||
124 | WARN_ON(!ret); | |
a4ad39b1 | 125 | ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); |
cafe5635 KO |
126 | |
127 | return ret; | |
128 | } | |
129 | ||
130 | static void bch_bio_submit_split_done(struct closure *cl) | |
131 | { | |
132 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
133 | ||
134 | s->bio->bi_end_io = s->bi_end_io; | |
135 | s->bio->bi_private = s->bi_private; | |
196d38bc | 136 | bio_endio_nodec(s->bio, 0); |
cafe5635 KO |
137 | |
138 | closure_debug_destroy(&s->cl); | |
139 | mempool_free(s, s->p->bio_split_hook); | |
140 | } | |
141 | ||
142 | static void bch_bio_submit_split_endio(struct bio *bio, int error) | |
143 | { | |
144 | struct closure *cl = bio->bi_private; | |
145 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | |
146 | ||
147 | if (error) | |
148 | clear_bit(BIO_UPTODATE, &s->bio->bi_flags); | |
149 | ||
150 | bio_put(bio); | |
151 | closure_put(cl); | |
152 | } | |
153 | ||
cafe5635 KO |
154 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) |
155 | { | |
156 | struct bio_split_hook *s; | |
8e51e414 | 157 | struct bio *n; |
cafe5635 KO |
158 | |
159 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) | |
160 | goto submit; | |
161 | ||
162 | if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) | |
163 | goto submit; | |
164 | ||
165 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); | |
8e51e414 | 166 | closure_init(&s->cl, NULL); |
cafe5635 KO |
167 | |
168 | s->bio = bio; | |
169 | s->p = p; | |
170 | s->bi_end_io = bio->bi_end_io; | |
171 | s->bi_private = bio->bi_private; | |
172 | bio_get(bio); | |
173 | ||
8e51e414 KO |
174 | do { |
175 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | |
176 | GFP_NOIO, s->p->bio_split); | |
177 | ||
178 | n->bi_end_io = bch_bio_submit_split_endio; | |
179 | n->bi_private = &s->cl; | |
180 | ||
181 | closure_get(&s->cl); | |
e90abc8e | 182 | generic_make_request(n); |
8e51e414 KO |
183 | } while (n != bio); |
184 | ||
185 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); | |
cafe5635 | 186 | submit: |
e90abc8e | 187 | generic_make_request(bio); |
cafe5635 KO |
188 | } |
189 | ||
190 | /* Bios with headers */ | |
191 | ||
192 | void bch_bbio_free(struct bio *bio, struct cache_set *c) | |
193 | { | |
194 | struct bbio *b = container_of(bio, struct bbio, bio); | |
195 | mempool_free(b, c->bio_meta); | |
196 | } | |
197 | ||
198 | struct bio *bch_bbio_alloc(struct cache_set *c) | |
199 | { | |
200 | struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); | |
201 | struct bio *bio = &b->bio; | |
202 | ||
203 | bio_init(bio); | |
204 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; | |
205 | bio->bi_max_vecs = bucket_pages(c); | |
206 | bio->bi_io_vec = bio->bi_inline_vecs; | |
207 | ||
208 | return bio; | |
209 | } | |
210 | ||
211 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c) | |
212 | { | |
213 | struct bbio *b = container_of(bio, struct bbio, bio); | |
214 | ||
4f024f37 KO |
215 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
216 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; | |
cafe5635 KO |
217 | |
218 | b->submit_time_us = local_clock_us(); | |
219 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); | |
220 | } | |
221 | ||
222 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |
223 | struct bkey *k, unsigned ptr) | |
224 | { | |
225 | struct bbio *b = container_of(bio, struct bbio, bio); | |
226 | bch_bkey_copy_single_ptr(&b->key, k, ptr); | |
227 | __bch_submit_bbio(bio, c); | |
228 | } | |
229 | ||
230 | /* IO errors */ | |
231 | ||
232 | void bch_count_io_errors(struct cache *ca, int error, const char *m) | |
233 | { | |
234 | /* | |
235 | * The halflife of an error is: | |
236 | * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh | |
237 | */ | |
238 | ||
239 | if (ca->set->error_decay) { | |
240 | unsigned count = atomic_inc_return(&ca->io_count); | |
241 | ||
242 | while (count > ca->set->error_decay) { | |
243 | unsigned errors; | |
244 | unsigned old = count; | |
245 | unsigned new = count - ca->set->error_decay; | |
246 | ||
247 | /* | |
248 | * First we subtract refresh from count; each time we | |
249 | * succesfully do so, we rescale the errors once: | |
250 | */ | |
251 | ||
252 | count = atomic_cmpxchg(&ca->io_count, old, new); | |
253 | ||
254 | if (count == old) { | |
255 | count = new; | |
256 | ||
257 | errors = atomic_read(&ca->io_errors); | |
258 | do { | |
259 | old = errors; | |
260 | new = ((uint64_t) errors * 127) / 128; | |
261 | errors = atomic_cmpxchg(&ca->io_errors, | |
262 | old, new); | |
263 | } while (old != errors); | |
264 | } | |
265 | } | |
266 | } | |
267 | ||
268 | if (error) { | |
269 | char buf[BDEVNAME_SIZE]; | |
270 | unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, | |
271 | &ca->io_errors); | |
272 | errors >>= IO_ERROR_SHIFT; | |
273 | ||
274 | if (errors < ca->set->error_limit) | |
275 | pr_err("%s: IO error on %s, recovering", | |
276 | bdevname(ca->bdev, buf), m); | |
277 | else | |
278 | bch_cache_set_error(ca->set, | |
279 | "%s: too many IO errors %s", | |
280 | bdevname(ca->bdev, buf), m); | |
281 | } | |
282 | } | |
283 | ||
284 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |
285 | int error, const char *m) | |
286 | { | |
287 | struct bbio *b = container_of(bio, struct bbio, bio); | |
288 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | |
289 | ||
290 | unsigned threshold = bio->bi_rw & REQ_WRITE | |
291 | ? c->congested_write_threshold_us | |
292 | : c->congested_read_threshold_us; | |
293 | ||
294 | if (threshold) { | |
295 | unsigned t = local_clock_us(); | |
296 | ||
297 | int us = t - b->submit_time_us; | |
298 | int congested = atomic_read(&c->congested); | |
299 | ||
300 | if (us > (int) threshold) { | |
301 | int ms = us / 1024; | |
302 | c->congested_last_us = t; | |
303 | ||
304 | ms = min(ms, CONGESTED_MAX + congested); | |
305 | atomic_sub(ms, &c->congested); | |
306 | } else if (congested < 0) | |
307 | atomic_inc(&c->congested); | |
308 | } | |
309 | ||
310 | bch_count_io_errors(ca, error, m); | |
311 | } | |
312 | ||
313 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | |
314 | int error, const char *m) | |
315 | { | |
316 | struct closure *cl = bio->bi_private; | |
317 | ||
318 | bch_bbio_count_io_errors(c, bio, error, m); | |
319 | bio_put(bio); | |
320 | closure_put(cl); | |
321 | } |