Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Main bcache entry point - handle a read or a write request and decide what to | |
3 | * do with it; the make_request functions are called by the block layer. | |
4 | * | |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
9 | #include "bcache.h" | |
10 | #include "btree.h" | |
11 | #include "debug.h" | |
12 | #include "request.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 KO |
14 | |
15 | #include <linux/cgroup.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/hash.h> | |
18 | #include <linux/random.h> | |
19 | #include "blk-cgroup.h" | |
20 | ||
21 | #include <trace/events/bcache.h> | |
22 | ||
23 | #define CUTOFF_CACHE_ADD 95 | |
24 | #define CUTOFF_CACHE_READA 90 | |
cafe5635 KO |
25 | |
26 | struct kmem_cache *bch_search_cache; | |
27 | ||
28 | static void check_should_skip(struct cached_dev *, struct search *); | |
29 | ||
30 | /* Cgroup interface */ | |
31 | ||
32 | #ifdef CONFIG_CGROUP_BCACHE | |
33 | static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; | |
34 | ||
35 | static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) | |
36 | { | |
37 | struct cgroup_subsys_state *css; | |
38 | return cgroup && | |
39 | (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) | |
40 | ? container_of(css, struct bch_cgroup, css) | |
41 | : &bcache_default_cgroup; | |
42 | } | |
43 | ||
44 | struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) | |
45 | { | |
46 | struct cgroup_subsys_state *css = bio->bi_css | |
47 | ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) | |
48 | : task_subsys_state(current, bcache_subsys_id); | |
49 | ||
50 | return css | |
51 | ? container_of(css, struct bch_cgroup, css) | |
52 | : &bcache_default_cgroup; | |
53 | } | |
54 | ||
55 | static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, | |
56 | struct file *file, | |
57 | char __user *buf, size_t nbytes, loff_t *ppos) | |
58 | { | |
59 | char tmp[1024]; | |
169ef1cf KO |
60 | int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, |
61 | cgroup_to_bcache(cgrp)->cache_mode + 1); | |
cafe5635 KO |
62 | |
63 | if (len < 0) | |
64 | return len; | |
65 | ||
66 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | |
67 | } | |
68 | ||
69 | static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, | |
70 | const char *buf) | |
71 | { | |
169ef1cf | 72 | int v = bch_read_string_list(buf, bch_cache_modes); |
cafe5635 KO |
73 | if (v < 0) |
74 | return v; | |
75 | ||
76 | cgroup_to_bcache(cgrp)->cache_mode = v - 1; | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) | |
81 | { | |
82 | return cgroup_to_bcache(cgrp)->verify; | |
83 | } | |
84 | ||
85 | static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
86 | { | |
87 | cgroup_to_bcache(cgrp)->verify = val; | |
88 | return 0; | |
89 | } | |
90 | ||
91 | static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) | |
92 | { | |
93 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
94 | return atomic_read(&bcachecg->stats.cache_hits); | |
95 | } | |
96 | ||
97 | static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) | |
98 | { | |
99 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
100 | return atomic_read(&bcachecg->stats.cache_misses); | |
101 | } | |
102 | ||
103 | static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, | |
104 | struct cftype *cft) | |
105 | { | |
106 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
107 | return atomic_read(&bcachecg->stats.cache_bypass_hits); | |
108 | } | |
109 | ||
110 | static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, | |
111 | struct cftype *cft) | |
112 | { | |
113 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
114 | return atomic_read(&bcachecg->stats.cache_bypass_misses); | |
115 | } | |
116 | ||
117 | static struct cftype bch_files[] = { | |
118 | { | |
119 | .name = "cache_mode", | |
120 | .read = cache_mode_read, | |
121 | .write_string = cache_mode_write, | |
122 | }, | |
123 | { | |
124 | .name = "verify", | |
125 | .read_u64 = bch_verify_read, | |
126 | .write_u64 = bch_verify_write, | |
127 | }, | |
128 | { | |
129 | .name = "cache_hits", | |
130 | .read_u64 = bch_cache_hits_read, | |
131 | }, | |
132 | { | |
133 | .name = "cache_misses", | |
134 | .read_u64 = bch_cache_misses_read, | |
135 | }, | |
136 | { | |
137 | .name = "cache_bypass_hits", | |
138 | .read_u64 = bch_cache_bypass_hits_read, | |
139 | }, | |
140 | { | |
141 | .name = "cache_bypass_misses", | |
142 | .read_u64 = bch_cache_bypass_misses_read, | |
143 | }, | |
144 | { } /* terminate */ | |
145 | }; | |
146 | ||
147 | static void init_bch_cgroup(struct bch_cgroup *cg) | |
148 | { | |
149 | cg->cache_mode = -1; | |
150 | } | |
151 | ||
152 | static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) | |
153 | { | |
154 | struct bch_cgroup *cg; | |
155 | ||
156 | cg = kzalloc(sizeof(*cg), GFP_KERNEL); | |
157 | if (!cg) | |
158 | return ERR_PTR(-ENOMEM); | |
159 | init_bch_cgroup(cg); | |
160 | return &cg->css; | |
161 | } | |
162 | ||
163 | static void bcachecg_destroy(struct cgroup *cgroup) | |
164 | { | |
165 | struct bch_cgroup *cg = cgroup_to_bcache(cgroup); | |
166 | free_css_id(&bcache_subsys, &cg->css); | |
167 | kfree(cg); | |
168 | } | |
169 | ||
170 | struct cgroup_subsys bcache_subsys = { | |
171 | .create = bcachecg_create, | |
172 | .destroy = bcachecg_destroy, | |
173 | .subsys_id = bcache_subsys_id, | |
174 | .name = "bcache", | |
175 | .module = THIS_MODULE, | |
176 | }; | |
177 | EXPORT_SYMBOL_GPL(bcache_subsys); | |
178 | #endif | |
179 | ||
180 | static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) | |
181 | { | |
182 | #ifdef CONFIG_CGROUP_BCACHE | |
183 | int r = bch_bio_to_cgroup(bio)->cache_mode; | |
184 | if (r >= 0) | |
185 | return r; | |
186 | #endif | |
187 | return BDEV_CACHE_MODE(&dc->sb); | |
188 | } | |
189 | ||
190 | static bool verify(struct cached_dev *dc, struct bio *bio) | |
191 | { | |
192 | #ifdef CONFIG_CGROUP_BCACHE | |
193 | if (bch_bio_to_cgroup(bio)->verify) | |
194 | return true; | |
195 | #endif | |
196 | return dc->verify; | |
197 | } | |
198 | ||
199 | static void bio_csum(struct bio *bio, struct bkey *k) | |
200 | { | |
201 | struct bio_vec *bv; | |
202 | uint64_t csum = 0; | |
203 | int i; | |
204 | ||
205 | bio_for_each_segment(bv, bio, i) { | |
206 | void *d = kmap(bv->bv_page) + bv->bv_offset; | |
169ef1cf | 207 | csum = bch_crc64_update(csum, d, bv->bv_len); |
cafe5635 KO |
208 | kunmap(bv->bv_page); |
209 | } | |
210 | ||
211 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | |
212 | } | |
213 | ||
214 | /* Insert data into cache */ | |
215 | ||
216 | static void bio_invalidate(struct closure *cl) | |
217 | { | |
218 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
219 | struct bio *bio = op->cache_bio; | |
220 | ||
221 | pr_debug("invalidating %i sectors from %llu", | |
222 | bio_sectors(bio), (uint64_t) bio->bi_sector); | |
223 | ||
224 | while (bio_sectors(bio)) { | |
225 | unsigned len = min(bio_sectors(bio), 1U << 14); | |
226 | ||
227 | if (bch_keylist_realloc(&op->keys, 0, op->c)) | |
228 | goto out; | |
229 | ||
230 | bio->bi_sector += len; | |
231 | bio->bi_size -= len << 9; | |
232 | ||
233 | bch_keylist_add(&op->keys, | |
234 | &KEY(op->inode, bio->bi_sector, len)); | |
235 | } | |
236 | ||
237 | op->insert_data_done = true; | |
238 | bio_put(bio); | |
239 | out: | |
240 | continue_at(cl, bch_journal, bcache_wq); | |
241 | } | |
242 | ||
243 | struct open_bucket { | |
244 | struct list_head list; | |
245 | struct task_struct *last; | |
246 | unsigned sectors_free; | |
247 | BKEY_PADDED(key); | |
248 | }; | |
249 | ||
250 | void bch_open_buckets_free(struct cache_set *c) | |
251 | { | |
252 | struct open_bucket *b; | |
253 | ||
254 | while (!list_empty(&c->data_buckets)) { | |
255 | b = list_first_entry(&c->data_buckets, | |
256 | struct open_bucket, list); | |
257 | list_del(&b->list); | |
258 | kfree(b); | |
259 | } | |
260 | } | |
261 | ||
262 | int bch_open_buckets_alloc(struct cache_set *c) | |
263 | { | |
264 | int i; | |
265 | ||
266 | spin_lock_init(&c->data_bucket_lock); | |
267 | ||
268 | for (i = 0; i < 6; i++) { | |
269 | struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); | |
270 | if (!b) | |
271 | return -ENOMEM; | |
272 | ||
273 | list_add(&b->list, &c->data_buckets); | |
274 | } | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
279 | /* | |
280 | * We keep multiple buckets open for writes, and try to segregate different | |
281 | * write streams for better cache utilization: first we look for a bucket where | |
282 | * the last write to it was sequential with the current write, and failing that | |
283 | * we look for a bucket that was last used by the same task. | |
284 | * | |
285 | * The ideas is if you've got multiple tasks pulling data into the cache at the | |
286 | * same time, you'll get better cache utilization if you try to segregate their | |
287 | * data and preserve locality. | |
288 | * | |
289 | * For example, say you've starting Firefox at the same time you're copying a | |
290 | * bunch of files. Firefox will likely end up being fairly hot and stay in the | |
291 | * cache awhile, but the data you copied might not be; if you wrote all that | |
292 | * data to the same buckets it'd get invalidated at the same time. | |
293 | * | |
294 | * Both of those tasks will be doing fairly random IO so we can't rely on | |
295 | * detecting sequential IO to segregate their data, but going off of the task | |
296 | * should be a sane heuristic. | |
297 | */ | |
298 | static struct open_bucket *pick_data_bucket(struct cache_set *c, | |
299 | const struct bkey *search, | |
300 | struct task_struct *task, | |
301 | struct bkey *alloc) | |
302 | { | |
303 | struct open_bucket *ret, *ret_task = NULL; | |
304 | ||
305 | list_for_each_entry_reverse(ret, &c->data_buckets, list) | |
306 | if (!bkey_cmp(&ret->key, search)) | |
307 | goto found; | |
308 | else if (ret->last == task) | |
309 | ret_task = ret; | |
310 | ||
311 | ret = ret_task ?: list_first_entry(&c->data_buckets, | |
312 | struct open_bucket, list); | |
313 | found: | |
314 | if (!ret->sectors_free && KEY_PTRS(alloc)) { | |
315 | ret->sectors_free = c->sb.bucket_size; | |
316 | bkey_copy(&ret->key, alloc); | |
317 | bkey_init(alloc); | |
318 | } | |
319 | ||
320 | if (!ret->sectors_free) | |
321 | ret = NULL; | |
322 | ||
323 | return ret; | |
324 | } | |
325 | ||
326 | /* | |
327 | * Allocates some space in the cache to write to, and k to point to the newly | |
328 | * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the | |
329 | * end of the newly allocated space). | |
330 | * | |
331 | * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many | |
332 | * sectors were actually allocated. | |
333 | * | |
334 | * If s->writeback is true, will not fail. | |
335 | */ | |
336 | static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, | |
337 | struct search *s) | |
338 | { | |
339 | struct cache_set *c = s->op.c; | |
340 | struct open_bucket *b; | |
341 | BKEY_PADDED(key) alloc; | |
342 | struct closure cl, *w = NULL; | |
343 | unsigned i; | |
344 | ||
345 | if (s->writeback) { | |
346 | closure_init_stack(&cl); | |
347 | w = &cl; | |
348 | } | |
349 | ||
350 | /* | |
351 | * We might have to allocate a new bucket, which we can't do with a | |
352 | * spinlock held. So if we have to allocate, we drop the lock, allocate | |
353 | * and then retry. KEY_PTRS() indicates whether alloc points to | |
354 | * allocated bucket(s). | |
355 | */ | |
356 | ||
357 | bkey_init(&alloc.key); | |
358 | spin_lock(&c->data_bucket_lock); | |
359 | ||
360 | while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { | |
361 | unsigned watermark = s->op.write_prio | |
362 | ? WATERMARK_MOVINGGC | |
363 | : WATERMARK_NONE; | |
364 | ||
365 | spin_unlock(&c->data_bucket_lock); | |
366 | ||
367 | if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) | |
368 | return false; | |
369 | ||
370 | spin_lock(&c->data_bucket_lock); | |
371 | } | |
372 | ||
373 | /* | |
374 | * If we had to allocate, we might race and not need to allocate the | |
375 | * second time we call find_data_bucket(). If we allocated a bucket but | |
376 | * didn't use it, drop the refcount bch_bucket_alloc_set() took: | |
377 | */ | |
378 | if (KEY_PTRS(&alloc.key)) | |
379 | __bkey_put(c, &alloc.key); | |
380 | ||
381 | for (i = 0; i < KEY_PTRS(&b->key); i++) | |
382 | EBUG_ON(ptr_stale(c, &b->key, i)); | |
383 | ||
384 | /* Set up the pointer to the space we're allocating: */ | |
385 | ||
386 | for (i = 0; i < KEY_PTRS(&b->key); i++) | |
387 | k->ptr[i] = b->key.ptr[i]; | |
388 | ||
389 | sectors = min(sectors, b->sectors_free); | |
390 | ||
391 | SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); | |
392 | SET_KEY_SIZE(k, sectors); | |
393 | SET_KEY_PTRS(k, KEY_PTRS(&b->key)); | |
394 | ||
395 | /* | |
396 | * Move b to the end of the lru, and keep track of what this bucket was | |
397 | * last used for: | |
398 | */ | |
399 | list_move_tail(&b->list, &c->data_buckets); | |
400 | bkey_copy_key(&b->key, k); | |
401 | b->last = s->task; | |
402 | ||
403 | b->sectors_free -= sectors; | |
404 | ||
405 | for (i = 0; i < KEY_PTRS(&b->key); i++) { | |
406 | SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); | |
407 | ||
408 | atomic_long_add(sectors, | |
409 | &PTR_CACHE(c, &b->key, i)->sectors_written); | |
410 | } | |
411 | ||
412 | if (b->sectors_free < c->sb.block_size) | |
413 | b->sectors_free = 0; | |
414 | ||
415 | /* | |
416 | * k takes refcounts on the buckets it points to until it's inserted | |
417 | * into the btree, but if we're done with this bucket we just transfer | |
418 | * get_data_bucket()'s refcount. | |
419 | */ | |
420 | if (b->sectors_free) | |
421 | for (i = 0; i < KEY_PTRS(&b->key); i++) | |
422 | atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); | |
423 | ||
424 | spin_unlock(&c->data_bucket_lock); | |
425 | return true; | |
426 | } | |
427 | ||
428 | static void bch_insert_data_error(struct closure *cl) | |
429 | { | |
430 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
431 | ||
432 | /* | |
433 | * Our data write just errored, which means we've got a bunch of keys to | |
434 | * insert that point to data that wasn't succesfully written. | |
435 | * | |
436 | * We don't have to insert those keys but we still have to invalidate | |
437 | * that region of the cache - so, if we just strip off all the pointers | |
438 | * from the keys we'll accomplish just that. | |
439 | */ | |
440 | ||
441 | struct bkey *src = op->keys.bottom, *dst = op->keys.bottom; | |
442 | ||
443 | while (src != op->keys.top) { | |
444 | struct bkey *n = bkey_next(src); | |
445 | ||
446 | SET_KEY_PTRS(src, 0); | |
447 | bkey_copy(dst, src); | |
448 | ||
449 | dst = bkey_next(dst); | |
450 | src = n; | |
451 | } | |
452 | ||
453 | op->keys.top = dst; | |
454 | ||
455 | bch_journal(cl); | |
456 | } | |
457 | ||
458 | static void bch_insert_data_endio(struct bio *bio, int error) | |
459 | { | |
460 | struct closure *cl = bio->bi_private; | |
461 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
462 | struct search *s = container_of(op, struct search, op); | |
463 | ||
464 | if (error) { | |
465 | /* TODO: We could try to recover from this. */ | |
466 | if (s->writeback) | |
467 | s->error = error; | |
468 | else if (s->write) | |
469 | set_closure_fn(cl, bch_insert_data_error, bcache_wq); | |
470 | else | |
471 | set_closure_fn(cl, NULL, NULL); | |
472 | } | |
473 | ||
474 | bch_bbio_endio(op->c, bio, error, "writing data to cache"); | |
475 | } | |
476 | ||
477 | static void bch_insert_data_loop(struct closure *cl) | |
478 | { | |
479 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
480 | struct search *s = container_of(op, struct search, op); | |
481 | struct bio *bio = op->cache_bio, *n; | |
482 | ||
483 | if (op->skip) | |
484 | return bio_invalidate(cl); | |
485 | ||
486 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | |
487 | set_gc_sectors(op->c); | |
488 | bch_queue_gc(op->c); | |
489 | } | |
490 | ||
491 | do { | |
492 | unsigned i; | |
493 | struct bkey *k; | |
494 | struct bio_set *split = s->d | |
495 | ? s->d->bio_split : op->c->bio_split; | |
496 | ||
497 | /* 1 for the device pointer and 1 for the chksum */ | |
498 | if (bch_keylist_realloc(&op->keys, | |
499 | 1 + (op->csum ? 1 : 0), | |
500 | op->c)) | |
501 | continue_at(cl, bch_journal, bcache_wq); | |
502 | ||
503 | k = op->keys.top; | |
504 | bkey_init(k); | |
505 | SET_KEY_INODE(k, op->inode); | |
506 | SET_KEY_OFFSET(k, bio->bi_sector); | |
507 | ||
508 | if (!bch_alloc_sectors(k, bio_sectors(bio), s)) | |
509 | goto err; | |
510 | ||
511 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); | |
512 | if (!n) { | |
513 | __bkey_put(op->c, k); | |
514 | continue_at(cl, bch_insert_data_loop, bcache_wq); | |
515 | } | |
516 | ||
517 | n->bi_end_io = bch_insert_data_endio; | |
518 | n->bi_private = cl; | |
519 | ||
520 | if (s->writeback) { | |
521 | SET_KEY_DIRTY(k, true); | |
522 | ||
523 | for (i = 0; i < KEY_PTRS(k); i++) | |
524 | SET_GC_MARK(PTR_BUCKET(op->c, k, i), | |
525 | GC_MARK_DIRTY); | |
526 | } | |
527 | ||
528 | SET_KEY_CSUM(k, op->csum); | |
529 | if (KEY_CSUM(k)) | |
530 | bio_csum(n, k); | |
531 | ||
c37511b8 | 532 | trace_bcache_cache_insert(k); |
cafe5635 KO |
533 | bch_keylist_push(&op->keys); |
534 | ||
cafe5635 KO |
535 | n->bi_rw |= REQ_WRITE; |
536 | bch_submit_bbio(n, op->c, k, 0); | |
537 | } while (n != bio); | |
538 | ||
539 | op->insert_data_done = true; | |
540 | continue_at(cl, bch_journal, bcache_wq); | |
541 | err: | |
542 | /* bch_alloc_sectors() blocks if s->writeback = true */ | |
543 | BUG_ON(s->writeback); | |
544 | ||
545 | /* | |
546 | * But if it's not a writeback write we'd rather just bail out if | |
547 | * there aren't any buckets ready to write to - it might take awhile and | |
548 | * we might be starving btree writes for gc or something. | |
549 | */ | |
550 | ||
551 | if (s->write) { | |
552 | /* | |
553 | * Writethrough write: We can't complete the write until we've | |
554 | * updated the index. But we don't want to delay the write while | |
555 | * we wait for buckets to be freed up, so just invalidate the | |
556 | * rest of the write. | |
557 | */ | |
558 | op->skip = true; | |
559 | return bio_invalidate(cl); | |
560 | } else { | |
561 | /* | |
562 | * From a cache miss, we can just insert the keys for the data | |
563 | * we have written or bail out if we didn't do anything. | |
564 | */ | |
565 | op->insert_data_done = true; | |
566 | bio_put(bio); | |
567 | ||
568 | if (!bch_keylist_empty(&op->keys)) | |
569 | continue_at(cl, bch_journal, bcache_wq); | |
570 | else | |
571 | closure_return(cl); | |
572 | } | |
573 | } | |
574 | ||
575 | /** | |
576 | * bch_insert_data - stick some data in the cache | |
577 | * | |
578 | * This is the starting point for any data to end up in a cache device; it could | |
579 | * be from a normal write, or a writeback write, or a write to a flash only | |
580 | * volume - it's also used by the moving garbage collector to compact data in | |
581 | * mostly empty buckets. | |
582 | * | |
583 | * It first writes the data to the cache, creating a list of keys to be inserted | |
584 | * (if the data had to be fragmented there will be multiple keys); after the | |
585 | * data is written it calls bch_journal, and after the keys have been added to | |
586 | * the next journal write they're inserted into the btree. | |
587 | * | |
588 | * It inserts the data in op->cache_bio; bi_sector is used for the key offset, | |
589 | * and op->inode is used for the key inode. | |
590 | * | |
591 | * If op->skip is true, instead of inserting the data it invalidates the region | |
592 | * of the cache represented by op->cache_bio and op->inode. | |
593 | */ | |
594 | void bch_insert_data(struct closure *cl) | |
595 | { | |
596 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
597 | ||
598 | bch_keylist_init(&op->keys); | |
599 | bio_get(op->cache_bio); | |
600 | bch_insert_data_loop(cl); | |
601 | } | |
602 | ||
603 | void bch_btree_insert_async(struct closure *cl) | |
604 | { | |
605 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
606 | struct search *s = container_of(op, struct search, op); | |
607 | ||
608 | if (bch_btree_insert(op, op->c)) { | |
609 | s->error = -ENOMEM; | |
610 | op->insert_data_done = true; | |
611 | } | |
612 | ||
613 | if (op->insert_data_done) { | |
614 | bch_keylist_free(&op->keys); | |
615 | closure_return(cl); | |
616 | } else | |
617 | continue_at(cl, bch_insert_data_loop, bcache_wq); | |
618 | } | |
619 | ||
620 | /* Common code for the make_request functions */ | |
621 | ||
622 | static void request_endio(struct bio *bio, int error) | |
623 | { | |
624 | struct closure *cl = bio->bi_private; | |
625 | ||
626 | if (error) { | |
627 | struct search *s = container_of(cl, struct search, cl); | |
628 | s->error = error; | |
629 | /* Only cache read errors are recoverable */ | |
630 | s->recoverable = false; | |
631 | } | |
632 | ||
633 | bio_put(bio); | |
634 | closure_put(cl); | |
635 | } | |
636 | ||
637 | void bch_cache_read_endio(struct bio *bio, int error) | |
638 | { | |
639 | struct bbio *b = container_of(bio, struct bbio, bio); | |
640 | struct closure *cl = bio->bi_private; | |
641 | struct search *s = container_of(cl, struct search, cl); | |
642 | ||
643 | /* | |
644 | * If the bucket was reused while our bio was in flight, we might have | |
645 | * read the wrong data. Set s->error but not error so it doesn't get | |
646 | * counted against the cache device, but we'll still reread the data | |
647 | * from the backing device. | |
648 | */ | |
649 | ||
650 | if (error) | |
651 | s->error = error; | |
652 | else if (ptr_stale(s->op.c, &b->key, 0)) { | |
653 | atomic_long_inc(&s->op.c->cache_read_races); | |
654 | s->error = -EINTR; | |
655 | } | |
656 | ||
657 | bch_bbio_endio(s->op.c, bio, error, "reading from cache"); | |
658 | } | |
659 | ||
660 | static void bio_complete(struct search *s) | |
661 | { | |
662 | if (s->orig_bio) { | |
663 | int cpu, rw = bio_data_dir(s->orig_bio); | |
664 | unsigned long duration = jiffies - s->start_time; | |
665 | ||
666 | cpu = part_stat_lock(); | |
667 | part_round_stats(cpu, &s->d->disk->part0); | |
668 | part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); | |
669 | part_stat_unlock(); | |
670 | ||
671 | trace_bcache_request_end(s, s->orig_bio); | |
672 | bio_endio(s->orig_bio, s->error); | |
673 | s->orig_bio = NULL; | |
674 | } | |
675 | } | |
676 | ||
677 | static void do_bio_hook(struct search *s) | |
678 | { | |
679 | struct bio *bio = &s->bio.bio; | |
680 | memcpy(bio, s->orig_bio, sizeof(struct bio)); | |
681 | ||
682 | bio->bi_end_io = request_endio; | |
683 | bio->bi_private = &s->cl; | |
684 | atomic_set(&bio->bi_cnt, 3); | |
685 | } | |
686 | ||
687 | static void search_free(struct closure *cl) | |
688 | { | |
689 | struct search *s = container_of(cl, struct search, cl); | |
690 | bio_complete(s); | |
691 | ||
692 | if (s->op.cache_bio) | |
693 | bio_put(s->op.cache_bio); | |
694 | ||
695 | if (s->unaligned_bvec) | |
696 | mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); | |
697 | ||
698 | closure_debug_destroy(cl); | |
699 | mempool_free(s, s->d->c->search); | |
700 | } | |
701 | ||
702 | static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | |
703 | { | |
704 | struct bio_vec *bv; | |
705 | struct search *s = mempool_alloc(d->c->search, GFP_NOIO); | |
706 | memset(s, 0, offsetof(struct search, op.keys)); | |
707 | ||
708 | __closure_init(&s->cl, NULL); | |
709 | ||
710 | s->op.inode = d->id; | |
711 | s->op.c = d->c; | |
712 | s->d = d; | |
713 | s->op.lock = -1; | |
714 | s->task = current; | |
715 | s->orig_bio = bio; | |
716 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | |
717 | s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; | |
718 | s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; | |
719 | s->recoverable = 1; | |
720 | s->start_time = jiffies; | |
721 | do_bio_hook(s); | |
722 | ||
723 | if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { | |
724 | bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); | |
725 | memcpy(bv, bio_iovec(bio), | |
726 | sizeof(struct bio_vec) * bio_segments(bio)); | |
727 | ||
728 | s->bio.bio.bi_io_vec = bv; | |
729 | s->unaligned_bvec = 1; | |
730 | } | |
731 | ||
732 | return s; | |
733 | } | |
734 | ||
735 | static void btree_read_async(struct closure *cl) | |
736 | { | |
737 | struct btree_op *op = container_of(cl, struct btree_op, cl); | |
738 | ||
739 | int ret = btree_root(search_recurse, op->c, op); | |
740 | ||
741 | if (ret == -EAGAIN) | |
742 | continue_at(cl, btree_read_async, bcache_wq); | |
743 | ||
744 | closure_return(cl); | |
745 | } | |
746 | ||
747 | /* Cached devices */ | |
748 | ||
749 | static void cached_dev_bio_complete(struct closure *cl) | |
750 | { | |
751 | struct search *s = container_of(cl, struct search, cl); | |
752 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
753 | ||
754 | search_free(cl); | |
755 | cached_dev_put(dc); | |
756 | } | |
757 | ||
758 | /* Process reads */ | |
759 | ||
760 | static void cached_dev_read_complete(struct closure *cl) | |
761 | { | |
762 | struct search *s = container_of(cl, struct search, cl); | |
763 | ||
764 | if (s->op.insert_collision) | |
765 | bch_mark_cache_miss_collision(s); | |
766 | ||
767 | if (s->op.cache_bio) { | |
768 | int i; | |
769 | struct bio_vec *bv; | |
770 | ||
771 | __bio_for_each_segment(bv, s->op.cache_bio, i, 0) | |
772 | __free_page(bv->bv_page); | |
773 | } | |
774 | ||
775 | cached_dev_bio_complete(cl); | |
776 | } | |
777 | ||
778 | static void request_read_error(struct closure *cl) | |
779 | { | |
780 | struct search *s = container_of(cl, struct search, cl); | |
781 | struct bio_vec *bv; | |
782 | int i; | |
783 | ||
784 | if (s->recoverable) { | |
c37511b8 KO |
785 | /* Retry from the backing device: */ |
786 | trace_bcache_read_retry(s->orig_bio); | |
cafe5635 KO |
787 | |
788 | s->error = 0; | |
789 | bv = s->bio.bio.bi_io_vec; | |
790 | do_bio_hook(s); | |
791 | s->bio.bio.bi_io_vec = bv; | |
792 | ||
793 | if (!s->unaligned_bvec) | |
794 | bio_for_each_segment(bv, s->orig_bio, i) | |
795 | bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; | |
796 | else | |
797 | memcpy(s->bio.bio.bi_io_vec, | |
798 | bio_iovec(s->orig_bio), | |
799 | sizeof(struct bio_vec) * | |
800 | bio_segments(s->orig_bio)); | |
801 | ||
802 | /* XXX: invalidate cache */ | |
803 | ||
cafe5635 KO |
804 | closure_bio_submit(&s->bio.bio, &s->cl, s->d); |
805 | } | |
806 | ||
807 | continue_at(cl, cached_dev_read_complete, NULL); | |
808 | } | |
809 | ||
810 | static void request_read_done(struct closure *cl) | |
811 | { | |
812 | struct search *s = container_of(cl, struct search, cl); | |
813 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
814 | ||
815 | /* | |
816 | * s->cache_bio != NULL implies that we had a cache miss; cache_bio now | |
817 | * contains data ready to be inserted into the cache. | |
818 | * | |
819 | * First, we copy the data we just read from cache_bio's bounce buffers | |
820 | * to the buffers the original bio pointed to: | |
821 | */ | |
822 | ||
823 | if (s->op.cache_bio) { | |
824 | struct bio_vec *src, *dst; | |
825 | unsigned src_offset, dst_offset, bytes; | |
826 | void *dst_ptr; | |
827 | ||
828 | bio_reset(s->op.cache_bio); | |
829 | s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; | |
830 | s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; | |
831 | s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; | |
169ef1cf | 832 | bch_bio_map(s->op.cache_bio, NULL); |
cafe5635 KO |
833 | |
834 | src = bio_iovec(s->op.cache_bio); | |
835 | dst = bio_iovec(s->cache_miss); | |
836 | src_offset = src->bv_offset; | |
837 | dst_offset = dst->bv_offset; | |
838 | dst_ptr = kmap(dst->bv_page); | |
839 | ||
840 | while (1) { | |
841 | if (dst_offset == dst->bv_offset + dst->bv_len) { | |
842 | kunmap(dst->bv_page); | |
843 | dst++; | |
844 | if (dst == bio_iovec_idx(s->cache_miss, | |
845 | s->cache_miss->bi_vcnt)) | |
846 | break; | |
847 | ||
848 | dst_offset = dst->bv_offset; | |
849 | dst_ptr = kmap(dst->bv_page); | |
850 | } | |
851 | ||
852 | if (src_offset == src->bv_offset + src->bv_len) { | |
853 | src++; | |
854 | if (src == bio_iovec_idx(s->op.cache_bio, | |
855 | s->op.cache_bio->bi_vcnt)) | |
856 | BUG(); | |
857 | ||
858 | src_offset = src->bv_offset; | |
859 | } | |
860 | ||
861 | bytes = min(dst->bv_offset + dst->bv_len - dst_offset, | |
862 | src->bv_offset + src->bv_len - src_offset); | |
863 | ||
864 | memcpy(dst_ptr + dst_offset, | |
865 | page_address(src->bv_page) + src_offset, | |
866 | bytes); | |
867 | ||
868 | src_offset += bytes; | |
869 | dst_offset += bytes; | |
870 | } | |
871 | ||
872 | bio_put(s->cache_miss); | |
873 | s->cache_miss = NULL; | |
874 | } | |
875 | ||
876 | if (verify(dc, &s->bio.bio) && s->recoverable) | |
877 | bch_data_verify(s); | |
878 | ||
879 | bio_complete(s); | |
880 | ||
881 | if (s->op.cache_bio && | |
882 | !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { | |
883 | s->op.type = BTREE_REPLACE; | |
884 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); | |
885 | } | |
886 | ||
887 | continue_at(cl, cached_dev_read_complete, NULL); | |
888 | } | |
889 | ||
890 | static void request_read_done_bh(struct closure *cl) | |
891 | { | |
892 | struct search *s = container_of(cl, struct search, cl); | |
893 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
894 | ||
895 | bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); | |
c37511b8 | 896 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); |
cafe5635 KO |
897 | |
898 | if (s->error) | |
899 | continue_at_nobarrier(cl, request_read_error, bcache_wq); | |
900 | else if (s->op.cache_bio || verify(dc, &s->bio.bio)) | |
901 | continue_at_nobarrier(cl, request_read_done, bcache_wq); | |
902 | else | |
903 | continue_at_nobarrier(cl, cached_dev_read_complete, NULL); | |
904 | } | |
905 | ||
906 | static int cached_dev_cache_miss(struct btree *b, struct search *s, | |
907 | struct bio *bio, unsigned sectors) | |
908 | { | |
909 | int ret = 0; | |
910 | unsigned reada; | |
911 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
912 | struct bio *miss; | |
913 | ||
914 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | |
915 | if (!miss) | |
916 | return -EAGAIN; | |
917 | ||
918 | if (miss == bio) | |
919 | s->op.lookup_done = true; | |
920 | ||
921 | miss->bi_end_io = request_endio; | |
922 | miss->bi_private = &s->cl; | |
923 | ||
924 | if (s->cache_miss || s->op.skip) | |
925 | goto out_submit; | |
926 | ||
927 | if (miss != bio || | |
928 | (bio->bi_rw & REQ_RAHEAD) || | |
929 | (bio->bi_rw & REQ_META) || | |
930 | s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) | |
931 | reada = 0; | |
932 | else { | |
933 | reada = min(dc->readahead >> 9, | |
934 | sectors - bio_sectors(miss)); | |
935 | ||
936 | if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev)) | |
937 | reada = bdev_sectors(miss->bi_bdev) - bio_end(miss); | |
938 | } | |
939 | ||
940 | s->cache_bio_sectors = bio_sectors(miss) + reada; | |
941 | s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, | |
942 | DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), | |
943 | dc->disk.bio_split); | |
944 | ||
945 | if (!s->op.cache_bio) | |
946 | goto out_submit; | |
947 | ||
948 | s->op.cache_bio->bi_sector = miss->bi_sector; | |
949 | s->op.cache_bio->bi_bdev = miss->bi_bdev; | |
950 | s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; | |
951 | ||
952 | s->op.cache_bio->bi_end_io = request_endio; | |
953 | s->op.cache_bio->bi_private = &s->cl; | |
954 | ||
955 | /* btree_search_recurse()'s btree iterator is no good anymore */ | |
956 | ret = -EINTR; | |
957 | if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) | |
958 | goto out_put; | |
959 | ||
169ef1cf KO |
960 | bch_bio_map(s->op.cache_bio, NULL); |
961 | if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) | |
cafe5635 KO |
962 | goto out_put; |
963 | ||
964 | s->cache_miss = miss; | |
965 | bio_get(s->op.cache_bio); | |
966 | ||
cafe5635 KO |
967 | closure_bio_submit(s->op.cache_bio, &s->cl, s->d); |
968 | ||
969 | return ret; | |
970 | out_put: | |
971 | bio_put(s->op.cache_bio); | |
972 | s->op.cache_bio = NULL; | |
973 | out_submit: | |
974 | closure_bio_submit(miss, &s->cl, s->d); | |
975 | return ret; | |
976 | } | |
977 | ||
978 | static void request_read(struct cached_dev *dc, struct search *s) | |
979 | { | |
980 | struct closure *cl = &s->cl; | |
981 | ||
982 | check_should_skip(dc, s); | |
983 | closure_call(&s->op.cl, btree_read_async, NULL, cl); | |
984 | ||
985 | continue_at(cl, request_read_done_bh, NULL); | |
986 | } | |
987 | ||
988 | /* Process writes */ | |
989 | ||
990 | static void cached_dev_write_complete(struct closure *cl) | |
991 | { | |
992 | struct search *s = container_of(cl, struct search, cl); | |
993 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
994 | ||
995 | up_read_non_owner(&dc->writeback_lock); | |
996 | cached_dev_bio_complete(cl); | |
997 | } | |
998 | ||
cafe5635 KO |
999 | static void request_write(struct cached_dev *dc, struct search *s) |
1000 | { | |
1001 | struct closure *cl = &s->cl; | |
1002 | struct bio *bio = &s->bio.bio; | |
1003 | struct bkey start, end; | |
1004 | start = KEY(dc->disk.id, bio->bi_sector, 0); | |
1005 | end = KEY(dc->disk.id, bio_end(bio), 0); | |
1006 | ||
1007 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); | |
1008 | ||
1009 | check_should_skip(dc, s); | |
1010 | down_read_non_owner(&dc->writeback_lock); | |
1011 | ||
1012 | if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { | |
1013 | s->op.skip = false; | |
1014 | s->writeback = true; | |
1015 | } | |
1016 | ||
1017 | if (bio->bi_rw & REQ_DISCARD) | |
1018 | goto skip; | |
1019 | ||
72c27061 KO |
1020 | if (should_writeback(dc, s->orig_bio, |
1021 | cache_mode(dc, bio), | |
1022 | s->op.skip)) { | |
1023 | s->op.skip = false; | |
1024 | s->writeback = true; | |
1025 | } | |
1026 | ||
cafe5635 KO |
1027 | if (s->op.skip) |
1028 | goto skip; | |
1029 | ||
c37511b8 KO |
1030 | trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); |
1031 | ||
cafe5635 KO |
1032 | if (!s->writeback) { |
1033 | s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, | |
1034 | dc->disk.bio_split); | |
1035 | ||
cafe5635 KO |
1036 | closure_bio_submit(bio, cl, s->d); |
1037 | } else { | |
1038 | s->op.cache_bio = bio; | |
279afbad | 1039 | bch_writeback_add(dc); |
cafe5635 KO |
1040 | } |
1041 | out: | |
1042 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); | |
1043 | continue_at(cl, cached_dev_write_complete, NULL); | |
1044 | skip: | |
1045 | s->op.skip = true; | |
1046 | s->op.cache_bio = s->orig_bio; | |
1047 | bio_get(s->op.cache_bio); | |
cafe5635 KO |
1048 | |
1049 | if ((bio->bi_rw & REQ_DISCARD) && | |
1050 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | |
1051 | goto out; | |
1052 | ||
1053 | closure_bio_submit(bio, cl, s->d); | |
1054 | goto out; | |
1055 | } | |
1056 | ||
1057 | static void request_nodata(struct cached_dev *dc, struct search *s) | |
1058 | { | |
1059 | struct closure *cl = &s->cl; | |
1060 | struct bio *bio = &s->bio.bio; | |
1061 | ||
1062 | if (bio->bi_rw & REQ_DISCARD) { | |
1063 | request_write(dc, s); | |
1064 | return; | |
1065 | } | |
1066 | ||
1067 | if (s->op.flush_journal) | |
1068 | bch_journal_meta(s->op.c, cl); | |
1069 | ||
1070 | closure_bio_submit(bio, cl, s->d); | |
1071 | ||
1072 | continue_at(cl, cached_dev_bio_complete, NULL); | |
1073 | } | |
1074 | ||
1075 | /* Cached devices - read & write stuff */ | |
1076 | ||
c37511b8 | 1077 | unsigned bch_get_congested(struct cache_set *c) |
cafe5635 KO |
1078 | { |
1079 | int i; | |
c37511b8 | 1080 | long rand; |
cafe5635 KO |
1081 | |
1082 | if (!c->congested_read_threshold_us && | |
1083 | !c->congested_write_threshold_us) | |
1084 | return 0; | |
1085 | ||
1086 | i = (local_clock_us() - c->congested_last_us) / 1024; | |
1087 | if (i < 0) | |
1088 | return 0; | |
1089 | ||
1090 | i += atomic_read(&c->congested); | |
1091 | if (i >= 0) | |
1092 | return 0; | |
1093 | ||
1094 | i += CONGESTED_MAX; | |
1095 | ||
c37511b8 KO |
1096 | if (i > 0) |
1097 | i = fract_exp_two(i, 6); | |
1098 | ||
1099 | rand = get_random_int(); | |
1100 | i -= bitmap_weight(&rand, BITS_PER_LONG); | |
1101 | ||
1102 | return i > 0 ? i : 1; | |
cafe5635 KO |
1103 | } |
1104 | ||
1105 | static void add_sequential(struct task_struct *t) | |
1106 | { | |
1107 | ewma_add(t->sequential_io_avg, | |
1108 | t->sequential_io, 8, 0); | |
1109 | ||
1110 | t->sequential_io = 0; | |
1111 | } | |
1112 | ||
b1a67b0f | 1113 | static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) |
cafe5635 | 1114 | { |
b1a67b0f KO |
1115 | return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; |
1116 | } | |
cafe5635 | 1117 | |
b1a67b0f KO |
1118 | static void check_should_skip(struct cached_dev *dc, struct search *s) |
1119 | { | |
cafe5635 KO |
1120 | struct cache_set *c = s->op.c; |
1121 | struct bio *bio = &s->bio.bio; | |
cafe5635 | 1122 | unsigned mode = cache_mode(dc, bio); |
c37511b8 | 1123 | unsigned sectors, congested = bch_get_congested(c); |
cafe5635 KO |
1124 | |
1125 | if (atomic_read(&dc->disk.detaching) || | |
1126 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || | |
1127 | (bio->bi_rw & REQ_DISCARD)) | |
1128 | goto skip; | |
1129 | ||
1130 | if (mode == CACHE_MODE_NONE || | |
1131 | (mode == CACHE_MODE_WRITEAROUND && | |
1132 | (bio->bi_rw & REQ_WRITE))) | |
1133 | goto skip; | |
1134 | ||
1135 | if (bio->bi_sector & (c->sb.block_size - 1) || | |
1136 | bio_sectors(bio) & (c->sb.block_size - 1)) { | |
1137 | pr_debug("skipping unaligned io"); | |
1138 | goto skip; | |
1139 | } | |
1140 | ||
c37511b8 KO |
1141 | if (!congested && !dc->sequential_cutoff) |
1142 | goto rescale; | |
cafe5635 | 1143 | |
c37511b8 KO |
1144 | if (!congested && |
1145 | mode == CACHE_MODE_WRITEBACK && | |
1146 | (bio->bi_rw & REQ_WRITE) && | |
1147 | (bio->bi_rw & REQ_SYNC)) | |
1148 | goto rescale; | |
cafe5635 KO |
1149 | |
1150 | if (dc->sequential_merge) { | |
1151 | struct io *i; | |
1152 | ||
1153 | spin_lock(&dc->io_lock); | |
1154 | ||
b1a67b0f | 1155 | hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) |
cafe5635 KO |
1156 | if (i->last == bio->bi_sector && |
1157 | time_before(jiffies, i->jiffies)) | |
1158 | goto found; | |
1159 | ||
1160 | i = list_first_entry(&dc->io_lru, struct io, lru); | |
1161 | ||
1162 | add_sequential(s->task); | |
1163 | i->sequential = 0; | |
1164 | found: | |
1165 | if (i->sequential + bio->bi_size > i->sequential) | |
1166 | i->sequential += bio->bi_size; | |
1167 | ||
1168 | i->last = bio_end(bio); | |
1169 | i->jiffies = jiffies + msecs_to_jiffies(5000); | |
1170 | s->task->sequential_io = i->sequential; | |
1171 | ||
1172 | hlist_del(&i->hash); | |
b1a67b0f | 1173 | hlist_add_head(&i->hash, iohash(dc, i->last)); |
cafe5635 KO |
1174 | list_move_tail(&i->lru, &dc->io_lru); |
1175 | ||
1176 | spin_unlock(&dc->io_lock); | |
1177 | } else { | |
1178 | s->task->sequential_io = bio->bi_size; | |
1179 | ||
1180 | add_sequential(s->task); | |
1181 | } | |
1182 | ||
c37511b8 KO |
1183 | sectors = max(s->task->sequential_io, |
1184 | s->task->sequential_io_avg) >> 9; | |
cafe5635 | 1185 | |
c37511b8 KO |
1186 | if (dc->sequential_cutoff && |
1187 | sectors >= dc->sequential_cutoff >> 9) { | |
1188 | trace_bcache_bypass_sequential(s->orig_bio); | |
cafe5635 | 1189 | goto skip; |
c37511b8 KO |
1190 | } |
1191 | ||
1192 | if (congested && sectors >= congested) { | |
1193 | trace_bcache_bypass_congested(s->orig_bio); | |
1194 | goto skip; | |
1195 | } | |
cafe5635 KO |
1196 | |
1197 | rescale: | |
1198 | bch_rescale_priorities(c, bio_sectors(bio)); | |
1199 | return; | |
1200 | skip: | |
1201 | bch_mark_sectors_bypassed(s, bio_sectors(bio)); | |
1202 | s->op.skip = true; | |
1203 | } | |
1204 | ||
1205 | static void cached_dev_make_request(struct request_queue *q, struct bio *bio) | |
1206 | { | |
1207 | struct search *s; | |
1208 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
1209 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1210 | int cpu, rw = bio_data_dir(bio); | |
1211 | ||
1212 | cpu = part_stat_lock(); | |
1213 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
1214 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
1215 | part_stat_unlock(); | |
1216 | ||
1217 | bio->bi_bdev = dc->bdev; | |
2903381f | 1218 | bio->bi_sector += dc->sb.data_offset; |
cafe5635 KO |
1219 | |
1220 | if (cached_dev_get(dc)) { | |
1221 | s = search_alloc(bio, d); | |
1222 | trace_bcache_request_start(s, bio); | |
1223 | ||
1224 | if (!bio_has_data(bio)) | |
1225 | request_nodata(dc, s); | |
1226 | else if (rw) | |
1227 | request_write(dc, s); | |
1228 | else | |
1229 | request_read(dc, s); | |
1230 | } else { | |
1231 | if ((bio->bi_rw & REQ_DISCARD) && | |
1232 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | |
1233 | bio_endio(bio, 0); | |
1234 | else | |
1235 | bch_generic_make_request(bio, &d->bio_split_hook); | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1240 | unsigned int cmd, unsigned long arg) | |
1241 | { | |
1242 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1243 | return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); | |
1244 | } | |
1245 | ||
1246 | static int cached_dev_congested(void *data, int bits) | |
1247 | { | |
1248 | struct bcache_device *d = data; | |
1249 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1250 | struct request_queue *q = bdev_get_queue(dc->bdev); | |
1251 | int ret = 0; | |
1252 | ||
1253 | if (bdi_congested(&q->backing_dev_info, bits)) | |
1254 | return 1; | |
1255 | ||
1256 | if (cached_dev_get(dc)) { | |
1257 | unsigned i; | |
1258 | struct cache *ca; | |
1259 | ||
1260 | for_each_cache(ca, d->c, i) { | |
1261 | q = bdev_get_queue(ca->bdev); | |
1262 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1263 | } | |
1264 | ||
1265 | cached_dev_put(dc); | |
1266 | } | |
1267 | ||
1268 | return ret; | |
1269 | } | |
1270 | ||
1271 | void bch_cached_dev_request_init(struct cached_dev *dc) | |
1272 | { | |
1273 | struct gendisk *g = dc->disk.disk; | |
1274 | ||
1275 | g->queue->make_request_fn = cached_dev_make_request; | |
1276 | g->queue->backing_dev_info.congested_fn = cached_dev_congested; | |
1277 | dc->disk.cache_miss = cached_dev_cache_miss; | |
1278 | dc->disk.ioctl = cached_dev_ioctl; | |
1279 | } | |
1280 | ||
1281 | /* Flash backed devices */ | |
1282 | ||
1283 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | |
1284 | struct bio *bio, unsigned sectors) | |
1285 | { | |
1286 | /* Zero fill bio */ | |
1287 | ||
1288 | while (bio->bi_idx != bio->bi_vcnt) { | |
1289 | struct bio_vec *bv = bio_iovec(bio); | |
1290 | unsigned j = min(bv->bv_len >> 9, sectors); | |
1291 | ||
1292 | void *p = kmap(bv->bv_page); | |
1293 | memset(p + bv->bv_offset, 0, j << 9); | |
1294 | kunmap(bv->bv_page); | |
1295 | ||
1296 | bv->bv_len -= j << 9; | |
1297 | bv->bv_offset += j << 9; | |
1298 | ||
1299 | if (bv->bv_len) | |
1300 | return 0; | |
1301 | ||
1302 | bio->bi_sector += j; | |
1303 | bio->bi_size -= j << 9; | |
1304 | ||
1305 | bio->bi_idx++; | |
1306 | sectors -= j; | |
1307 | } | |
1308 | ||
1309 | s->op.lookup_done = true; | |
1310 | ||
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |
1315 | { | |
1316 | struct search *s; | |
1317 | struct closure *cl; | |
1318 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
1319 | int cpu, rw = bio_data_dir(bio); | |
1320 | ||
1321 | cpu = part_stat_lock(); | |
1322 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
1323 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
1324 | part_stat_unlock(); | |
1325 | ||
1326 | s = search_alloc(bio, d); | |
1327 | cl = &s->cl; | |
1328 | bio = &s->bio.bio; | |
1329 | ||
1330 | trace_bcache_request_start(s, bio); | |
1331 | ||
1332 | if (bio_has_data(bio) && !rw) { | |
1333 | closure_call(&s->op.cl, btree_read_async, NULL, cl); | |
1334 | } else if (bio_has_data(bio) || s->op.skip) { | |
1335 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, | |
1336 | &KEY(d->id, bio->bi_sector, 0), | |
1337 | &KEY(d->id, bio_end(bio), 0)); | |
1338 | ||
1339 | s->writeback = true; | |
1340 | s->op.cache_bio = bio; | |
1341 | ||
1342 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); | |
1343 | } else { | |
1344 | /* No data - probably a cache flush */ | |
1345 | if (s->op.flush_journal) | |
1346 | bch_journal_meta(s->op.c, cl); | |
1347 | } | |
1348 | ||
1349 | continue_at(cl, search_free, NULL); | |
1350 | } | |
1351 | ||
1352 | static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1353 | unsigned int cmd, unsigned long arg) | |
1354 | { | |
1355 | return -ENOTTY; | |
1356 | } | |
1357 | ||
1358 | static int flash_dev_congested(void *data, int bits) | |
1359 | { | |
1360 | struct bcache_device *d = data; | |
1361 | struct request_queue *q; | |
1362 | struct cache *ca; | |
1363 | unsigned i; | |
1364 | int ret = 0; | |
1365 | ||
1366 | for_each_cache(ca, d->c, i) { | |
1367 | q = bdev_get_queue(ca->bdev); | |
1368 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1369 | } | |
1370 | ||
1371 | return ret; | |
1372 | } | |
1373 | ||
1374 | void bch_flash_dev_request_init(struct bcache_device *d) | |
1375 | { | |
1376 | struct gendisk *g = d->disk; | |
1377 | ||
1378 | g->queue->make_request_fn = flash_dev_make_request; | |
1379 | g->queue->backing_dev_info.congested_fn = flash_dev_congested; | |
1380 | d->cache_miss = flash_dev_cache_miss; | |
1381 | d->ioctl = flash_dev_ioctl; | |
1382 | } | |
1383 | ||
1384 | void bch_request_exit(void) | |
1385 | { | |
1386 | #ifdef CONFIG_CGROUP_BCACHE | |
1387 | cgroup_unload_subsys(&bcache_subsys); | |
1388 | #endif | |
1389 | if (bch_search_cache) | |
1390 | kmem_cache_destroy(bch_search_cache); | |
1391 | } | |
1392 | ||
1393 | int __init bch_request_init(void) | |
1394 | { | |
1395 | bch_search_cache = KMEM_CACHE(search, 0); | |
1396 | if (!bch_search_cache) | |
1397 | return -ENOMEM; | |
1398 | ||
1399 | #ifdef CONFIG_CGROUP_BCACHE | |
1400 | cgroup_load_subsys(&bcache_subsys); | |
1401 | init_bch_cgroup(&bcache_default_cgroup); | |
1402 | ||
1403 | cgroup_add_cftypes(&bcache_subsys, bch_files); | |
1404 | #endif | |
1405 | return 0; | |
1406 | } |