bcache: Convert bucket_wait to wait_queue_head_t
[deliverable/linux.git] / drivers / md / bcache / request.c
1 /*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
20
21 #include <trace/events/bcache.h>
22
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
25
26 struct kmem_cache *bch_search_cache;
27
28 static void bch_data_insert_start(struct closure *);
29
30 /* Cgroup interface */
31
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
34
35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
36 {
37 struct cgroup_subsys_state *css;
38 return cgroup &&
39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 ? container_of(css, struct bch_cgroup, css)
41 : &bcache_default_cgroup;
42 }
43
44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
45 {
46 struct cgroup_subsys_state *css = bio->bi_css
47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 : task_subsys_state(current, bcache_subsys_id);
49
50 return css
51 ? container_of(css, struct bch_cgroup, css)
52 : &bcache_default_cgroup;
53 }
54
55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
56 struct file *file,
57 char __user *buf, size_t nbytes, loff_t *ppos)
58 {
59 char tmp[1024];
60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 cgroup_to_bcache(cgrp)->cache_mode + 1);
62
63 if (len < 0)
64 return len;
65
66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
67 }
68
69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
70 const char *buf)
71 {
72 int v = bch_read_string_list(buf, bch_cache_modes);
73 if (v < 0)
74 return v;
75
76 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
77 return 0;
78 }
79
80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
81 {
82 return cgroup_to_bcache(cgrp)->verify;
83 }
84
85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
86 {
87 cgroup_to_bcache(cgrp)->verify = val;
88 return 0;
89 }
90
91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
92 {
93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 return atomic_read(&bcachecg->stats.cache_hits);
95 }
96
97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
98 {
99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 return atomic_read(&bcachecg->stats.cache_misses);
101 }
102
103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
104 struct cftype *cft)
105 {
106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 return atomic_read(&bcachecg->stats.cache_bypass_hits);
108 }
109
110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
111 struct cftype *cft)
112 {
113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 return atomic_read(&bcachecg->stats.cache_bypass_misses);
115 }
116
117 static struct cftype bch_files[] = {
118 {
119 .name = "cache_mode",
120 .read = cache_mode_read,
121 .write_string = cache_mode_write,
122 },
123 {
124 .name = "verify",
125 .read_u64 = bch_verify_read,
126 .write_u64 = bch_verify_write,
127 },
128 {
129 .name = "cache_hits",
130 .read_u64 = bch_cache_hits_read,
131 },
132 {
133 .name = "cache_misses",
134 .read_u64 = bch_cache_misses_read,
135 },
136 {
137 .name = "cache_bypass_hits",
138 .read_u64 = bch_cache_bypass_hits_read,
139 },
140 {
141 .name = "cache_bypass_misses",
142 .read_u64 = bch_cache_bypass_misses_read,
143 },
144 { } /* terminate */
145 };
146
147 static void init_bch_cgroup(struct bch_cgroup *cg)
148 {
149 cg->cache_mode = -1;
150 }
151
152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
153 {
154 struct bch_cgroup *cg;
155
156 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
157 if (!cg)
158 return ERR_PTR(-ENOMEM);
159 init_bch_cgroup(cg);
160 return &cg->css;
161 }
162
163 static void bcachecg_destroy(struct cgroup *cgroup)
164 {
165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 free_css_id(&bcache_subsys, &cg->css);
167 kfree(cg);
168 }
169
170 struct cgroup_subsys bcache_subsys = {
171 .create = bcachecg_create,
172 .destroy = bcachecg_destroy,
173 .subsys_id = bcache_subsys_id,
174 .name = "bcache",
175 .module = THIS_MODULE,
176 };
177 EXPORT_SYMBOL_GPL(bcache_subsys);
178 #endif
179
180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
181 {
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r = bch_bio_to_cgroup(bio)->cache_mode;
184 if (r >= 0)
185 return r;
186 #endif
187 return BDEV_CACHE_MODE(&dc->sb);
188 }
189
190 static bool verify(struct cached_dev *dc, struct bio *bio)
191 {
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio)->verify)
194 return true;
195 #endif
196 return dc->verify;
197 }
198
199 static void bio_csum(struct bio *bio, struct bkey *k)
200 {
201 struct bio_vec *bv;
202 uint64_t csum = 0;
203 int i;
204
205 bio_for_each_segment(bv, bio, i) {
206 void *d = kmap(bv->bv_page) + bv->bv_offset;
207 csum = bch_crc64_update(csum, d, bv->bv_len);
208 kunmap(bv->bv_page);
209 }
210
211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
212 }
213
214 /* Insert data into cache */
215
216 static void bch_data_insert_keys(struct closure *cl)
217 {
218 struct btree_op *op = container_of(cl, struct btree_op, cl);
219 struct search *s = container_of(op, struct search, op);
220
221 /*
222 * If we're looping, might already be waiting on
223 * another journal write - can't wait on more than one journal write at
224 * a time
225 *
226 * XXX: this looks wrong
227 */
228 #if 0
229 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
230 closure_sync(&s->cl);
231 #endif
232
233 if (s->write)
234 op->journal = bch_journal(op->c, &s->insert_keys,
235 op->flush_journal
236 ? &s->cl : NULL);
237
238 if (bch_btree_insert(op, op->c, &s->insert_keys)) {
239 s->error = -ENOMEM;
240 op->insert_data_done = true;
241 }
242
243 if (op->journal)
244 atomic_dec_bug(op->journal);
245 op->journal = NULL;
246
247 if (!op->insert_data_done)
248 continue_at(cl, bch_data_insert_start, bcache_wq);
249
250 bch_keylist_free(&s->insert_keys);
251 closure_return(cl);
252 }
253
254 struct open_bucket {
255 struct list_head list;
256 struct task_struct *last;
257 unsigned sectors_free;
258 BKEY_PADDED(key);
259 };
260
261 void bch_open_buckets_free(struct cache_set *c)
262 {
263 struct open_bucket *b;
264
265 while (!list_empty(&c->data_buckets)) {
266 b = list_first_entry(&c->data_buckets,
267 struct open_bucket, list);
268 list_del(&b->list);
269 kfree(b);
270 }
271 }
272
273 int bch_open_buckets_alloc(struct cache_set *c)
274 {
275 int i;
276
277 spin_lock_init(&c->data_bucket_lock);
278
279 for (i = 0; i < 6; i++) {
280 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
281 if (!b)
282 return -ENOMEM;
283
284 list_add(&b->list, &c->data_buckets);
285 }
286
287 return 0;
288 }
289
290 /*
291 * We keep multiple buckets open for writes, and try to segregate different
292 * write streams for better cache utilization: first we look for a bucket where
293 * the last write to it was sequential with the current write, and failing that
294 * we look for a bucket that was last used by the same task.
295 *
296 * The ideas is if you've got multiple tasks pulling data into the cache at the
297 * same time, you'll get better cache utilization if you try to segregate their
298 * data and preserve locality.
299 *
300 * For example, say you've starting Firefox at the same time you're copying a
301 * bunch of files. Firefox will likely end up being fairly hot and stay in the
302 * cache awhile, but the data you copied might not be; if you wrote all that
303 * data to the same buckets it'd get invalidated at the same time.
304 *
305 * Both of those tasks will be doing fairly random IO so we can't rely on
306 * detecting sequential IO to segregate their data, but going off of the task
307 * should be a sane heuristic.
308 */
309 static struct open_bucket *pick_data_bucket(struct cache_set *c,
310 const struct bkey *search,
311 struct task_struct *task,
312 struct bkey *alloc)
313 {
314 struct open_bucket *ret, *ret_task = NULL;
315
316 list_for_each_entry_reverse(ret, &c->data_buckets, list)
317 if (!bkey_cmp(&ret->key, search))
318 goto found;
319 else if (ret->last == task)
320 ret_task = ret;
321
322 ret = ret_task ?: list_first_entry(&c->data_buckets,
323 struct open_bucket, list);
324 found:
325 if (!ret->sectors_free && KEY_PTRS(alloc)) {
326 ret->sectors_free = c->sb.bucket_size;
327 bkey_copy(&ret->key, alloc);
328 bkey_init(alloc);
329 }
330
331 if (!ret->sectors_free)
332 ret = NULL;
333
334 return ret;
335 }
336
337 /*
338 * Allocates some space in the cache to write to, and k to point to the newly
339 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
340 * end of the newly allocated space).
341 *
342 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
343 * sectors were actually allocated.
344 *
345 * If s->writeback is true, will not fail.
346 */
347 static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
348 struct search *s)
349 {
350 struct cache_set *c = s->op.c;
351 struct open_bucket *b;
352 BKEY_PADDED(key) alloc;
353 unsigned i;
354
355 /*
356 * We might have to allocate a new bucket, which we can't do with a
357 * spinlock held. So if we have to allocate, we drop the lock, allocate
358 * and then retry. KEY_PTRS() indicates whether alloc points to
359 * allocated bucket(s).
360 */
361
362 bkey_init(&alloc.key);
363 spin_lock(&c->data_bucket_lock);
364
365 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
366 unsigned watermark = s->op.write_prio
367 ? WATERMARK_MOVINGGC
368 : WATERMARK_NONE;
369
370 spin_unlock(&c->data_bucket_lock);
371
372 if (bch_bucket_alloc_set(c, watermark, &alloc.key,
373 1, s->writeback))
374 return false;
375
376 spin_lock(&c->data_bucket_lock);
377 }
378
379 /*
380 * If we had to allocate, we might race and not need to allocate the
381 * second time we call find_data_bucket(). If we allocated a bucket but
382 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
383 */
384 if (KEY_PTRS(&alloc.key))
385 __bkey_put(c, &alloc.key);
386
387 for (i = 0; i < KEY_PTRS(&b->key); i++)
388 EBUG_ON(ptr_stale(c, &b->key, i));
389
390 /* Set up the pointer to the space we're allocating: */
391
392 for (i = 0; i < KEY_PTRS(&b->key); i++)
393 k->ptr[i] = b->key.ptr[i];
394
395 sectors = min(sectors, b->sectors_free);
396
397 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
398 SET_KEY_SIZE(k, sectors);
399 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
400
401 /*
402 * Move b to the end of the lru, and keep track of what this bucket was
403 * last used for:
404 */
405 list_move_tail(&b->list, &c->data_buckets);
406 bkey_copy_key(&b->key, k);
407 b->last = s->task;
408
409 b->sectors_free -= sectors;
410
411 for (i = 0; i < KEY_PTRS(&b->key); i++) {
412 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
413
414 atomic_long_add(sectors,
415 &PTR_CACHE(c, &b->key, i)->sectors_written);
416 }
417
418 if (b->sectors_free < c->sb.block_size)
419 b->sectors_free = 0;
420
421 /*
422 * k takes refcounts on the buckets it points to until it's inserted
423 * into the btree, but if we're done with this bucket we just transfer
424 * get_data_bucket()'s refcount.
425 */
426 if (b->sectors_free)
427 for (i = 0; i < KEY_PTRS(&b->key); i++)
428 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
429
430 spin_unlock(&c->data_bucket_lock);
431 return true;
432 }
433
434 static void bch_data_invalidate(struct closure *cl)
435 {
436 struct btree_op *op = container_of(cl, struct btree_op, cl);
437 struct search *s = container_of(op, struct search, op);
438 struct bio *bio = op->cache_bio;
439
440 pr_debug("invalidating %i sectors from %llu",
441 bio_sectors(bio), (uint64_t) bio->bi_sector);
442
443 while (bio_sectors(bio)) {
444 unsigned len = min(bio_sectors(bio), 1U << 14);
445
446 if (bch_keylist_realloc(&s->insert_keys, 0, op->c))
447 goto out;
448
449 bio->bi_sector += len;
450 bio->bi_size -= len << 9;
451
452 bch_keylist_add(&s->insert_keys,
453 &KEY(op->inode, bio->bi_sector, len));
454 }
455
456 op->insert_data_done = true;
457 bio_put(bio);
458 out:
459 continue_at(cl, bch_data_insert_keys, bcache_wq);
460 }
461
462 static void bch_data_insert_error(struct closure *cl)
463 {
464 struct btree_op *op = container_of(cl, struct btree_op, cl);
465 struct search *s = container_of(op, struct search, op);
466
467 /*
468 * Our data write just errored, which means we've got a bunch of keys to
469 * insert that point to data that wasn't succesfully written.
470 *
471 * We don't have to insert those keys but we still have to invalidate
472 * that region of the cache - so, if we just strip off all the pointers
473 * from the keys we'll accomplish just that.
474 */
475
476 struct bkey *src = s->insert_keys.keys, *dst = s->insert_keys.keys;
477
478 while (src != s->insert_keys.top) {
479 struct bkey *n = bkey_next(src);
480
481 SET_KEY_PTRS(src, 0);
482 memmove(dst, src, bkey_bytes(src));
483
484 dst = bkey_next(dst);
485 src = n;
486 }
487
488 s->insert_keys.top = dst;
489
490 bch_data_insert_keys(cl);
491 }
492
493 static void bch_data_insert_endio(struct bio *bio, int error)
494 {
495 struct closure *cl = bio->bi_private;
496 struct btree_op *op = container_of(cl, struct btree_op, cl);
497 struct search *s = container_of(op, struct search, op);
498
499 if (error) {
500 /* TODO: We could try to recover from this. */
501 if (s->writeback)
502 s->error = error;
503 else if (s->write)
504 set_closure_fn(cl, bch_data_insert_error, bcache_wq);
505 else
506 set_closure_fn(cl, NULL, NULL);
507 }
508
509 bch_bbio_endio(op->c, bio, error, "writing data to cache");
510 }
511
512 static void bch_data_insert_start(struct closure *cl)
513 {
514 struct btree_op *op = container_of(cl, struct btree_op, cl);
515 struct search *s = container_of(op, struct search, op);
516 struct bio *bio = op->cache_bio, *n;
517
518 if (op->bypass)
519 return bch_data_invalidate(cl);
520
521 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
522 set_gc_sectors(op->c);
523 bch_queue_gc(op->c);
524 }
525
526 /*
527 * Journal writes are marked REQ_FLUSH; if the original write was a
528 * flush, it'll wait on the journal write.
529 */
530 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
531
532 do {
533 unsigned i;
534 struct bkey *k;
535 struct bio_set *split = s->d
536 ? s->d->bio_split : op->c->bio_split;
537
538 /* 1 for the device pointer and 1 for the chksum */
539 if (bch_keylist_realloc(&s->insert_keys,
540 1 + (op->csum ? 1 : 0),
541 op->c))
542 continue_at(cl, bch_data_insert_keys, bcache_wq);
543
544 k = s->insert_keys.top;
545 bkey_init(k);
546 SET_KEY_INODE(k, op->inode);
547 SET_KEY_OFFSET(k, bio->bi_sector);
548
549 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
550 goto err;
551
552 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
553
554 n->bi_end_io = bch_data_insert_endio;
555 n->bi_private = cl;
556
557 if (s->writeback) {
558 SET_KEY_DIRTY(k, true);
559
560 for (i = 0; i < KEY_PTRS(k); i++)
561 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
562 GC_MARK_DIRTY);
563 }
564
565 SET_KEY_CSUM(k, op->csum);
566 if (KEY_CSUM(k))
567 bio_csum(n, k);
568
569 trace_bcache_cache_insert(k);
570 bch_keylist_push(&s->insert_keys);
571
572 n->bi_rw |= REQ_WRITE;
573 bch_submit_bbio(n, op->c, k, 0);
574 } while (n != bio);
575
576 op->insert_data_done = true;
577 continue_at(cl, bch_data_insert_keys, bcache_wq);
578 err:
579 /* bch_alloc_sectors() blocks if s->writeback = true */
580 BUG_ON(s->writeback);
581
582 /*
583 * But if it's not a writeback write we'd rather just bail out if
584 * there aren't any buckets ready to write to - it might take awhile and
585 * we might be starving btree writes for gc or something.
586 */
587
588 if (s->write) {
589 /*
590 * Writethrough write: We can't complete the write until we've
591 * updated the index. But we don't want to delay the write while
592 * we wait for buckets to be freed up, so just invalidate the
593 * rest of the write.
594 */
595 op->bypass = true;
596 return bch_data_invalidate(cl);
597 } else {
598 /*
599 * From a cache miss, we can just insert the keys for the data
600 * we have written or bail out if we didn't do anything.
601 */
602 op->insert_data_done = true;
603 bio_put(bio);
604
605 if (!bch_keylist_empty(&s->insert_keys))
606 continue_at(cl, bch_data_insert_keys, bcache_wq);
607 else
608 closure_return(cl);
609 }
610 }
611
612 /**
613 * bch_data_insert - stick some data in the cache
614 *
615 * This is the starting point for any data to end up in a cache device; it could
616 * be from a normal write, or a writeback write, or a write to a flash only
617 * volume - it's also used by the moving garbage collector to compact data in
618 * mostly empty buckets.
619 *
620 * It first writes the data to the cache, creating a list of keys to be inserted
621 * (if the data had to be fragmented there will be multiple keys); after the
622 * data is written it calls bch_journal, and after the keys have been added to
623 * the next journal write they're inserted into the btree.
624 *
625 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
626 * and op->inode is used for the key inode.
627 *
628 * If op->bypass is true, instead of inserting the data it invalidates the
629 * region of the cache represented by op->cache_bio and op->inode.
630 */
631 void bch_data_insert(struct closure *cl)
632 {
633 struct btree_op *op = container_of(cl, struct btree_op, cl);
634 struct search *s = container_of(op, struct search, op);
635
636 bch_keylist_init(&s->insert_keys);
637 bio_get(op->cache_bio);
638 bch_data_insert_start(cl);
639 }
640
641 /* Common code for the make_request functions */
642
643 static void request_endio(struct bio *bio, int error)
644 {
645 struct closure *cl = bio->bi_private;
646
647 if (error) {
648 struct search *s = container_of(cl, struct search, cl);
649 s->error = error;
650 /* Only cache read errors are recoverable */
651 s->recoverable = false;
652 }
653
654 bio_put(bio);
655 closure_put(cl);
656 }
657
658 void bch_cache_read_endio(struct bio *bio, int error)
659 {
660 struct bbio *b = container_of(bio, struct bbio, bio);
661 struct closure *cl = bio->bi_private;
662 struct search *s = container_of(cl, struct search, cl);
663
664 /*
665 * If the bucket was reused while our bio was in flight, we might have
666 * read the wrong data. Set s->error but not error so it doesn't get
667 * counted against the cache device, but we'll still reread the data
668 * from the backing device.
669 */
670
671 if (error)
672 s->error = error;
673 else if (ptr_stale(s->op.c, &b->key, 0)) {
674 atomic_long_inc(&s->op.c->cache_read_races);
675 s->error = -EINTR;
676 }
677
678 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
679 }
680
681 static void bio_complete(struct search *s)
682 {
683 if (s->orig_bio) {
684 int cpu, rw = bio_data_dir(s->orig_bio);
685 unsigned long duration = jiffies - s->start_time;
686
687 cpu = part_stat_lock();
688 part_round_stats(cpu, &s->d->disk->part0);
689 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
690 part_stat_unlock();
691
692 trace_bcache_request_end(s, s->orig_bio);
693 bio_endio(s->orig_bio, s->error);
694 s->orig_bio = NULL;
695 }
696 }
697
698 static void do_bio_hook(struct search *s)
699 {
700 struct bio *bio = &s->bio.bio;
701 memcpy(bio, s->orig_bio, sizeof(struct bio));
702
703 bio->bi_end_io = request_endio;
704 bio->bi_private = &s->cl;
705 atomic_set(&bio->bi_cnt, 3);
706 }
707
708 static void search_free(struct closure *cl)
709 {
710 struct search *s = container_of(cl, struct search, cl);
711 bio_complete(s);
712
713 if (s->op.cache_bio)
714 bio_put(s->op.cache_bio);
715
716 if (s->unaligned_bvec)
717 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
718
719 closure_debug_destroy(cl);
720 mempool_free(s, s->d->c->search);
721 }
722
723 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
724 {
725 struct search *s;
726 struct bio_vec *bv;
727
728 s = mempool_alloc(d->c->search, GFP_NOIO);
729 memset(s, 0, offsetof(struct search, insert_keys));
730
731 __closure_init(&s->cl, NULL);
732
733 s->op.inode = d->id;
734 s->op.c = d->c;
735 s->d = d;
736 s->op.lock = -1;
737 s->task = current;
738 s->orig_bio = bio;
739 s->write = (bio->bi_rw & REQ_WRITE) != 0;
740 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
741 s->recoverable = 1;
742 s->start_time = jiffies;
743 do_bio_hook(s);
744
745 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
746 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
747 memcpy(bv, bio_iovec(bio),
748 sizeof(struct bio_vec) * bio_segments(bio));
749
750 s->bio.bio.bi_io_vec = bv;
751 s->unaligned_bvec = 1;
752 }
753
754 return s;
755 }
756
757 static void btree_read_async(struct closure *cl)
758 {
759 struct btree_op *op = container_of(cl, struct btree_op, cl);
760
761 int ret = btree_root(search_recurse, op->c, op);
762
763 if (ret == -EAGAIN)
764 continue_at(cl, btree_read_async, bcache_wq);
765
766 closure_return(cl);
767 }
768
769 /* Cached devices */
770
771 static void cached_dev_bio_complete(struct closure *cl)
772 {
773 struct search *s = container_of(cl, struct search, cl);
774 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
775
776 search_free(cl);
777 cached_dev_put(dc);
778 }
779
780 unsigned bch_get_congested(struct cache_set *c)
781 {
782 int i;
783 long rand;
784
785 if (!c->congested_read_threshold_us &&
786 !c->congested_write_threshold_us)
787 return 0;
788
789 i = (local_clock_us() - c->congested_last_us) / 1024;
790 if (i < 0)
791 return 0;
792
793 i += atomic_read(&c->congested);
794 if (i >= 0)
795 return 0;
796
797 i += CONGESTED_MAX;
798
799 if (i > 0)
800 i = fract_exp_two(i, 6);
801
802 rand = get_random_int();
803 i -= bitmap_weight(&rand, BITS_PER_LONG);
804
805 return i > 0 ? i : 1;
806 }
807
808 static void add_sequential(struct task_struct *t)
809 {
810 ewma_add(t->sequential_io_avg,
811 t->sequential_io, 8, 0);
812
813 t->sequential_io = 0;
814 }
815
816 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
817 {
818 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
819 }
820
821 static bool check_should_bypass(struct cached_dev *dc, struct search *s)
822 {
823 struct cache_set *c = s->op.c;
824 struct bio *bio = &s->bio.bio;
825 unsigned mode = cache_mode(dc, bio);
826 unsigned sectors, congested = bch_get_congested(c);
827
828 if (atomic_read(&dc->disk.detaching) ||
829 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
830 (bio->bi_rw & REQ_DISCARD))
831 goto skip;
832
833 if (mode == CACHE_MODE_NONE ||
834 (mode == CACHE_MODE_WRITEAROUND &&
835 (bio->bi_rw & REQ_WRITE)))
836 goto skip;
837
838 if (bio->bi_sector & (c->sb.block_size - 1) ||
839 bio_sectors(bio) & (c->sb.block_size - 1)) {
840 pr_debug("skipping unaligned io");
841 goto skip;
842 }
843
844 if (!congested && !dc->sequential_cutoff)
845 goto rescale;
846
847 if (!congested &&
848 mode == CACHE_MODE_WRITEBACK &&
849 (bio->bi_rw & REQ_WRITE) &&
850 (bio->bi_rw & REQ_SYNC))
851 goto rescale;
852
853 if (dc->sequential_merge) {
854 struct io *i;
855
856 spin_lock(&dc->io_lock);
857
858 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
859 if (i->last == bio->bi_sector &&
860 time_before(jiffies, i->jiffies))
861 goto found;
862
863 i = list_first_entry(&dc->io_lru, struct io, lru);
864
865 add_sequential(s->task);
866 i->sequential = 0;
867 found:
868 if (i->sequential + bio->bi_size > i->sequential)
869 i->sequential += bio->bi_size;
870
871 i->last = bio_end_sector(bio);
872 i->jiffies = jiffies + msecs_to_jiffies(5000);
873 s->task->sequential_io = i->sequential;
874
875 hlist_del(&i->hash);
876 hlist_add_head(&i->hash, iohash(dc, i->last));
877 list_move_tail(&i->lru, &dc->io_lru);
878
879 spin_unlock(&dc->io_lock);
880 } else {
881 s->task->sequential_io = bio->bi_size;
882
883 add_sequential(s->task);
884 }
885
886 sectors = max(s->task->sequential_io,
887 s->task->sequential_io_avg) >> 9;
888
889 if (dc->sequential_cutoff &&
890 sectors >= dc->sequential_cutoff >> 9) {
891 trace_bcache_bypass_sequential(s->orig_bio);
892 goto skip;
893 }
894
895 if (congested && sectors >= congested) {
896 trace_bcache_bypass_congested(s->orig_bio);
897 goto skip;
898 }
899
900 rescale:
901 bch_rescale_priorities(c, bio_sectors(bio));
902 return false;
903 skip:
904 bch_mark_sectors_bypassed(s, bio_sectors(bio));
905 return true;
906 }
907
908 /* Process reads */
909
910 static void cached_dev_cache_miss_done(struct closure *cl)
911 {
912 struct search *s = container_of(cl, struct search, cl);
913
914 if (s->op.insert_collision)
915 bch_mark_cache_miss_collision(s);
916
917 if (s->op.cache_bio) {
918 int i;
919 struct bio_vec *bv;
920
921 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
922 __free_page(bv->bv_page);
923 }
924
925 cached_dev_bio_complete(cl);
926 }
927
928 static void cached_dev_read_error(struct closure *cl)
929 {
930 struct search *s = container_of(cl, struct search, cl);
931 struct bio *bio = &s->bio.bio;
932 struct bio_vec *bv;
933 int i;
934
935 if (s->recoverable) {
936 /* Retry from the backing device: */
937 trace_bcache_read_retry(s->orig_bio);
938
939 s->error = 0;
940 bv = s->bio.bio.bi_io_vec;
941 do_bio_hook(s);
942 s->bio.bio.bi_io_vec = bv;
943
944 if (!s->unaligned_bvec)
945 bio_for_each_segment(bv, s->orig_bio, i)
946 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
947 else
948 memcpy(s->bio.bio.bi_io_vec,
949 bio_iovec(s->orig_bio),
950 sizeof(struct bio_vec) *
951 bio_segments(s->orig_bio));
952
953 /* XXX: invalidate cache */
954
955 closure_bio_submit(bio, cl, s->d);
956 }
957
958 continue_at(cl, cached_dev_cache_miss_done, NULL);
959 }
960
961 static void cached_dev_read_done(struct closure *cl)
962 {
963 struct search *s = container_of(cl, struct search, cl);
964 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
965
966 /*
967 * We had a cache miss; cache_bio now contains data ready to be inserted
968 * into the cache.
969 *
970 * First, we copy the data we just read from cache_bio's bounce buffers
971 * to the buffers the original bio pointed to:
972 */
973
974 if (s->op.cache_bio) {
975 bio_reset(s->op.cache_bio);
976 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
977 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
978 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
979 bch_bio_map(s->op.cache_bio, NULL);
980
981 bio_copy_data(s->cache_miss, s->op.cache_bio);
982
983 bio_put(s->cache_miss);
984 s->cache_miss = NULL;
985 }
986
987 if (verify(dc, &s->bio.bio) && s->recoverable)
988 bch_data_verify(s);
989
990 bio_complete(s);
991
992 if (s->op.cache_bio &&
993 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
994 s->op.type = BTREE_REPLACE;
995 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
996 }
997
998 continue_at(cl, cached_dev_cache_miss_done, NULL);
999 }
1000
1001 static void cached_dev_read_done_bh(struct closure *cl)
1002 {
1003 struct search *s = container_of(cl, struct search, cl);
1004 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1005
1006 bch_mark_cache_accounting(s, !s->cache_miss, s->op.bypass);
1007 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass);
1008
1009 if (s->error)
1010 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
1011 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
1012 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
1013 else
1014 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
1015 }
1016
1017 static int cached_dev_cache_miss(struct btree *b, struct search *s,
1018 struct bio *bio, unsigned sectors)
1019 {
1020 int ret = 0;
1021 unsigned reada = 0;
1022 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1023 struct bio *miss, *cache_bio;
1024
1025 if (s->cache_miss || s->op.bypass) {
1026 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1027 if (miss == bio)
1028 s->op.lookup_done = true;
1029 goto out_submit;
1030 }
1031
1032 if (!(bio->bi_rw & REQ_RAHEAD) &&
1033 !(bio->bi_rw & REQ_META) &&
1034 s->op.c->gc_stats.in_use < CUTOFF_CACHE_READA)
1035 reada = min_t(sector_t, dc->readahead >> 9,
1036 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
1037
1038 s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
1039
1040 s->op.replace = KEY(s->op.inode, bio->bi_sector +
1041 s->cache_bio_sectors, s->cache_bio_sectors);
1042
1043 ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace);
1044 if (ret)
1045 return ret;
1046
1047 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1048 if (miss == bio)
1049 s->op.lookup_done = true;
1050 else
1051 /* btree_search_recurse()'s btree iterator is no good anymore */
1052 ret = -EINTR;
1053
1054 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
1055 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
1056 dc->disk.bio_split);
1057 if (!cache_bio)
1058 goto out_submit;
1059
1060 cache_bio->bi_sector = miss->bi_sector;
1061 cache_bio->bi_bdev = miss->bi_bdev;
1062 cache_bio->bi_size = s->cache_bio_sectors << 9;
1063
1064 cache_bio->bi_end_io = request_endio;
1065 cache_bio->bi_private = &s->cl;
1066
1067 bch_bio_map(cache_bio, NULL);
1068 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
1069 goto out_put;
1070
1071 s->cache_miss = miss;
1072 s->op.cache_bio = cache_bio;
1073 bio_get(cache_bio);
1074 closure_bio_submit(cache_bio, &s->cl, s->d);
1075
1076 return ret;
1077 out_put:
1078 bio_put(cache_bio);
1079 out_submit:
1080 miss->bi_end_io = request_endio;
1081 miss->bi_private = &s->cl;
1082 closure_bio_submit(miss, &s->cl, s->d);
1083 return ret;
1084 }
1085
1086 static void cached_dev_read(struct cached_dev *dc, struct search *s)
1087 {
1088 struct closure *cl = &s->cl;
1089
1090 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1091 continue_at(cl, cached_dev_read_done_bh, NULL);
1092 }
1093
1094 /* Process writes */
1095
1096 static void cached_dev_write_complete(struct closure *cl)
1097 {
1098 struct search *s = container_of(cl, struct search, cl);
1099 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1100
1101 up_read_non_owner(&dc->writeback_lock);
1102 cached_dev_bio_complete(cl);
1103 }
1104
1105 static void cached_dev_write(struct cached_dev *dc, struct search *s)
1106 {
1107 struct closure *cl = &s->cl;
1108 struct bio *bio = &s->bio.bio;
1109 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
1110 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1111
1112 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1113
1114 down_read_non_owner(&dc->writeback_lock);
1115 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1116 /*
1117 * We overlap with some dirty data undergoing background
1118 * writeback, force this write to writeback
1119 */
1120 s->op.bypass = false;
1121 s->writeback = true;
1122 }
1123
1124 /*
1125 * Discards aren't _required_ to do anything, so skipping if
1126 * check_overlapping returned true is ok
1127 *
1128 * But check_overlapping drops dirty keys for which io hasn't started,
1129 * so we still want to call it.
1130 */
1131 if (bio->bi_rw & REQ_DISCARD)
1132 s->op.bypass = true;
1133
1134 if (should_writeback(dc, s->orig_bio,
1135 cache_mode(dc, bio),
1136 s->op.bypass)) {
1137 s->op.bypass = false;
1138 s->writeback = true;
1139 }
1140
1141 trace_bcache_write(s->orig_bio, s->writeback, s->op.bypass);
1142
1143 if (s->op.bypass) {
1144 s->op.cache_bio = s->orig_bio;
1145 bio_get(s->op.cache_bio);
1146
1147 if (!(bio->bi_rw & REQ_DISCARD) ||
1148 blk_queue_discard(bdev_get_queue(dc->bdev)))
1149 closure_bio_submit(bio, cl, s->d);
1150 } else if (s->writeback) {
1151 bch_writeback_add(dc);
1152 s->op.cache_bio = bio;
1153
1154 if (bio->bi_rw & REQ_FLUSH) {
1155 /* Also need to send a flush to the backing device */
1156 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1157 dc->disk.bio_split);
1158
1159 flush->bi_rw = WRITE_FLUSH;
1160 flush->bi_bdev = bio->bi_bdev;
1161 flush->bi_end_io = request_endio;
1162 flush->bi_private = cl;
1163
1164 closure_bio_submit(flush, cl, s->d);
1165 }
1166 } else {
1167 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1168 dc->disk.bio_split);
1169
1170 closure_bio_submit(bio, cl, s->d);
1171 }
1172
1173 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1174 continue_at(cl, cached_dev_write_complete, NULL);
1175 }
1176
1177 static void cached_dev_nodata(struct closure *cl)
1178 {
1179 struct search *s = container_of(cl, struct search, cl);
1180 struct bio *bio = &s->bio.bio;
1181
1182 if (s->op.flush_journal)
1183 bch_journal_meta(s->op.c, cl);
1184
1185 /* If it's a flush, we send the flush to the backing device too */
1186 closure_bio_submit(bio, cl, s->d);
1187
1188 continue_at(cl, cached_dev_bio_complete, NULL);
1189 }
1190
1191 /* Cached devices - read & write stuff */
1192
1193 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1194 {
1195 struct search *s;
1196 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1197 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1198 int cpu, rw = bio_data_dir(bio);
1199
1200 cpu = part_stat_lock();
1201 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1202 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1203 part_stat_unlock();
1204
1205 bio->bi_bdev = dc->bdev;
1206 bio->bi_sector += dc->sb.data_offset;
1207
1208 if (cached_dev_get(dc)) {
1209 s = search_alloc(bio, d);
1210 trace_bcache_request_start(s, bio);
1211
1212 if (!bio->bi_size) {
1213 /*
1214 * can't call bch_journal_meta from under
1215 * generic_make_request
1216 */
1217 continue_at_nobarrier(&s->cl,
1218 cached_dev_nodata,
1219 bcache_wq);
1220 } else {
1221 s->op.bypass = check_should_bypass(dc, s);
1222
1223 if (rw)
1224 cached_dev_write(dc, s);
1225 else
1226 cached_dev_read(dc, s);
1227 }
1228 } else {
1229 if ((bio->bi_rw & REQ_DISCARD) &&
1230 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1231 bio_endio(bio, 0);
1232 else
1233 bch_generic_make_request(bio, &d->bio_split_hook);
1234 }
1235 }
1236
1237 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1238 unsigned int cmd, unsigned long arg)
1239 {
1240 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1241 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1242 }
1243
1244 static int cached_dev_congested(void *data, int bits)
1245 {
1246 struct bcache_device *d = data;
1247 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1248 struct request_queue *q = bdev_get_queue(dc->bdev);
1249 int ret = 0;
1250
1251 if (bdi_congested(&q->backing_dev_info, bits))
1252 return 1;
1253
1254 if (cached_dev_get(dc)) {
1255 unsigned i;
1256 struct cache *ca;
1257
1258 for_each_cache(ca, d->c, i) {
1259 q = bdev_get_queue(ca->bdev);
1260 ret |= bdi_congested(&q->backing_dev_info, bits);
1261 }
1262
1263 cached_dev_put(dc);
1264 }
1265
1266 return ret;
1267 }
1268
1269 void bch_cached_dev_request_init(struct cached_dev *dc)
1270 {
1271 struct gendisk *g = dc->disk.disk;
1272
1273 g->queue->make_request_fn = cached_dev_make_request;
1274 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1275 dc->disk.cache_miss = cached_dev_cache_miss;
1276 dc->disk.ioctl = cached_dev_ioctl;
1277 }
1278
1279 /* Flash backed devices */
1280
1281 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1282 struct bio *bio, unsigned sectors)
1283 {
1284 struct bio_vec *bv;
1285 int i;
1286
1287 /* Zero fill bio */
1288
1289 bio_for_each_segment(bv, bio, i) {
1290 unsigned j = min(bv->bv_len >> 9, sectors);
1291
1292 void *p = kmap(bv->bv_page);
1293 memset(p + bv->bv_offset, 0, j << 9);
1294 kunmap(bv->bv_page);
1295
1296 sectors -= j;
1297 }
1298
1299 bio_advance(bio, min(sectors << 9, bio->bi_size));
1300
1301 if (!bio->bi_size)
1302 s->op.lookup_done = true;
1303
1304 return 0;
1305 }
1306
1307 static void flash_dev_nodata(struct closure *cl)
1308 {
1309 struct search *s = container_of(cl, struct search, cl);
1310
1311 if (s->op.flush_journal)
1312 bch_journal_meta(s->op.c, cl);
1313
1314 continue_at(cl, search_free, NULL);
1315 }
1316
1317 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1318 {
1319 struct search *s;
1320 struct closure *cl;
1321 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1322 int cpu, rw = bio_data_dir(bio);
1323
1324 cpu = part_stat_lock();
1325 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1326 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1327 part_stat_unlock();
1328
1329 s = search_alloc(bio, d);
1330 cl = &s->cl;
1331 bio = &s->bio.bio;
1332
1333 trace_bcache_request_start(s, bio);
1334
1335 if (!bio->bi_size) {
1336 /*
1337 * can't call bch_journal_meta from under
1338 * generic_make_request
1339 */
1340 continue_at_nobarrier(&s->cl,
1341 flash_dev_nodata,
1342 bcache_wq);
1343 } else if (rw) {
1344 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1345 &KEY(d->id, bio->bi_sector, 0),
1346 &KEY(d->id, bio_end_sector(bio), 0));
1347
1348 s->op.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1349 s->writeback = true;
1350 s->op.cache_bio = bio;
1351
1352 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1353 } else {
1354 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1355 }
1356
1357 continue_at(cl, search_free, NULL);
1358 }
1359
1360 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1361 unsigned int cmd, unsigned long arg)
1362 {
1363 return -ENOTTY;
1364 }
1365
1366 static int flash_dev_congested(void *data, int bits)
1367 {
1368 struct bcache_device *d = data;
1369 struct request_queue *q;
1370 struct cache *ca;
1371 unsigned i;
1372 int ret = 0;
1373
1374 for_each_cache(ca, d->c, i) {
1375 q = bdev_get_queue(ca->bdev);
1376 ret |= bdi_congested(&q->backing_dev_info, bits);
1377 }
1378
1379 return ret;
1380 }
1381
1382 void bch_flash_dev_request_init(struct bcache_device *d)
1383 {
1384 struct gendisk *g = d->disk;
1385
1386 g->queue->make_request_fn = flash_dev_make_request;
1387 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1388 d->cache_miss = flash_dev_cache_miss;
1389 d->ioctl = flash_dev_ioctl;
1390 }
1391
1392 void bch_request_exit(void)
1393 {
1394 #ifdef CONFIG_CGROUP_BCACHE
1395 cgroup_unload_subsys(&bcache_subsys);
1396 #endif
1397 if (bch_search_cache)
1398 kmem_cache_destroy(bch_search_cache);
1399 }
1400
1401 int __init bch_request_init(void)
1402 {
1403 bch_search_cache = KMEM_CACHE(search, 0);
1404 if (!bch_search_cache)
1405 return -ENOMEM;
1406
1407 #ifdef CONFIG_CGROUP_BCACHE
1408 cgroup_load_subsys(&bcache_subsys);
1409 init_bch_cgroup(&bcache_default_cgroup);
1410
1411 cgroup_add_cftypes(&bcache_subsys, bch_files);
1412 #endif
1413 return 0;
1414 }
This page took 0.060012 seconds and 6 git commands to generate.