2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include <linux/cgroup.h>
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include "blk-cgroup.h"
20 #include <trace/events/bcache.h>
22 #define CUTOFF_CACHE_ADD 95
23 #define CUTOFF_CACHE_READA 90
24 #define CUTOFF_WRITEBACK 50
25 #define CUTOFF_WRITEBACK_SYNC 75
27 struct kmem_cache
*bch_search_cache
;
29 static void check_should_skip(struct cached_dev
*, struct search
*);
31 /* Cgroup interface */
33 #ifdef CONFIG_CGROUP_BCACHE
34 static struct bch_cgroup bcache_default_cgroup
= { .cache_mode
= -1 };
36 static struct bch_cgroup
*cgroup_to_bcache(struct cgroup
*cgroup
)
38 struct cgroup_subsys_state
*css
;
40 (css
= cgroup_subsys_state(cgroup
, bcache_subsys_id
))
41 ? container_of(css
, struct bch_cgroup
, css
)
42 : &bcache_default_cgroup
;
45 struct bch_cgroup
*bch_bio_to_cgroup(struct bio
*bio
)
47 struct cgroup_subsys_state
*css
= bio
->bi_css
48 ? cgroup_subsys_state(bio
->bi_css
->cgroup
, bcache_subsys_id
)
49 : task_subsys_state(current
, bcache_subsys_id
);
52 ? container_of(css
, struct bch_cgroup
, css
)
53 : &bcache_default_cgroup
;
56 static ssize_t
cache_mode_read(struct cgroup
*cgrp
, struct cftype
*cft
,
58 char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
61 int len
= bch_snprint_string_list(tmp
, PAGE_SIZE
, bch_cache_modes
,
62 cgroup_to_bcache(cgrp
)->cache_mode
+ 1);
67 return simple_read_from_buffer(buf
, nbytes
, ppos
, tmp
, len
);
70 static int cache_mode_write(struct cgroup
*cgrp
, struct cftype
*cft
,
73 int v
= bch_read_string_list(buf
, bch_cache_modes
);
77 cgroup_to_bcache(cgrp
)->cache_mode
= v
- 1;
81 static u64
bch_verify_read(struct cgroup
*cgrp
, struct cftype
*cft
)
83 return cgroup_to_bcache(cgrp
)->verify
;
86 static int bch_verify_write(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
88 cgroup_to_bcache(cgrp
)->verify
= val
;
92 static u64
bch_cache_hits_read(struct cgroup
*cgrp
, struct cftype
*cft
)
94 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
95 return atomic_read(&bcachecg
->stats
.cache_hits
);
98 static u64
bch_cache_misses_read(struct cgroup
*cgrp
, struct cftype
*cft
)
100 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
101 return atomic_read(&bcachecg
->stats
.cache_misses
);
104 static u64
bch_cache_bypass_hits_read(struct cgroup
*cgrp
,
107 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
108 return atomic_read(&bcachecg
->stats
.cache_bypass_hits
);
111 static u64
bch_cache_bypass_misses_read(struct cgroup
*cgrp
,
114 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
115 return atomic_read(&bcachecg
->stats
.cache_bypass_misses
);
118 static struct cftype bch_files
[] = {
120 .name
= "cache_mode",
121 .read
= cache_mode_read
,
122 .write_string
= cache_mode_write
,
126 .read_u64
= bch_verify_read
,
127 .write_u64
= bch_verify_write
,
130 .name
= "cache_hits",
131 .read_u64
= bch_cache_hits_read
,
134 .name
= "cache_misses",
135 .read_u64
= bch_cache_misses_read
,
138 .name
= "cache_bypass_hits",
139 .read_u64
= bch_cache_bypass_hits_read
,
142 .name
= "cache_bypass_misses",
143 .read_u64
= bch_cache_bypass_misses_read
,
148 static void init_bch_cgroup(struct bch_cgroup
*cg
)
153 static struct cgroup_subsys_state
*bcachecg_create(struct cgroup
*cgroup
)
155 struct bch_cgroup
*cg
;
157 cg
= kzalloc(sizeof(*cg
), GFP_KERNEL
);
159 return ERR_PTR(-ENOMEM
);
164 static void bcachecg_destroy(struct cgroup
*cgroup
)
166 struct bch_cgroup
*cg
= cgroup_to_bcache(cgroup
);
167 free_css_id(&bcache_subsys
, &cg
->css
);
171 struct cgroup_subsys bcache_subsys
= {
172 .create
= bcachecg_create
,
173 .destroy
= bcachecg_destroy
,
174 .subsys_id
= bcache_subsys_id
,
176 .module
= THIS_MODULE
,
178 EXPORT_SYMBOL_GPL(bcache_subsys
);
181 static unsigned cache_mode(struct cached_dev
*dc
, struct bio
*bio
)
183 #ifdef CONFIG_CGROUP_BCACHE
184 int r
= bch_bio_to_cgroup(bio
)->cache_mode
;
188 return BDEV_CACHE_MODE(&dc
->sb
);
191 static bool verify(struct cached_dev
*dc
, struct bio
*bio
)
193 #ifdef CONFIG_CGROUP_BCACHE
194 if (bch_bio_to_cgroup(bio
)->verify
)
200 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
206 bio_for_each_segment(bv
, bio
, i
) {
207 void *d
= kmap(bv
->bv_page
) + bv
->bv_offset
;
208 csum
= bch_crc64_update(csum
, d
, bv
->bv_len
);
212 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
215 /* Insert data into cache */
217 static void bio_invalidate(struct closure
*cl
)
219 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
220 struct bio
*bio
= op
->cache_bio
;
222 pr_debug("invalidating %i sectors from %llu",
223 bio_sectors(bio
), (uint64_t) bio
->bi_sector
);
225 while (bio_sectors(bio
)) {
226 unsigned len
= min(bio_sectors(bio
), 1U << 14);
228 if (bch_keylist_realloc(&op
->keys
, 0, op
->c
))
231 bio
->bi_sector
+= len
;
232 bio
->bi_size
-= len
<< 9;
234 bch_keylist_add(&op
->keys
,
235 &KEY(op
->inode
, bio
->bi_sector
, len
));
238 op
->insert_data_done
= true;
241 continue_at(cl
, bch_journal
, bcache_wq
);
245 struct list_head list
;
246 struct task_struct
*last
;
247 unsigned sectors_free
;
251 void bch_open_buckets_free(struct cache_set
*c
)
253 struct open_bucket
*b
;
255 while (!list_empty(&c
->data_buckets
)) {
256 b
= list_first_entry(&c
->data_buckets
,
257 struct open_bucket
, list
);
263 int bch_open_buckets_alloc(struct cache_set
*c
)
267 spin_lock_init(&c
->data_bucket_lock
);
269 for (i
= 0; i
< 6; i
++) {
270 struct open_bucket
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
274 list_add(&b
->list
, &c
->data_buckets
);
281 * We keep multiple buckets open for writes, and try to segregate different
282 * write streams for better cache utilization: first we look for a bucket where
283 * the last write to it was sequential with the current write, and failing that
284 * we look for a bucket that was last used by the same task.
286 * The ideas is if you've got multiple tasks pulling data into the cache at the
287 * same time, you'll get better cache utilization if you try to segregate their
288 * data and preserve locality.
290 * For example, say you've starting Firefox at the same time you're copying a
291 * bunch of files. Firefox will likely end up being fairly hot and stay in the
292 * cache awhile, but the data you copied might not be; if you wrote all that
293 * data to the same buckets it'd get invalidated at the same time.
295 * Both of those tasks will be doing fairly random IO so we can't rely on
296 * detecting sequential IO to segregate their data, but going off of the task
297 * should be a sane heuristic.
299 static struct open_bucket
*pick_data_bucket(struct cache_set
*c
,
300 const struct bkey
*search
,
301 struct task_struct
*task
,
304 struct open_bucket
*ret
, *ret_task
= NULL
;
306 list_for_each_entry_reverse(ret
, &c
->data_buckets
, list
)
307 if (!bkey_cmp(&ret
->key
, search
))
309 else if (ret
->last
== task
)
312 ret
= ret_task
?: list_first_entry(&c
->data_buckets
,
313 struct open_bucket
, list
);
315 if (!ret
->sectors_free
&& KEY_PTRS(alloc
)) {
316 ret
->sectors_free
= c
->sb
.bucket_size
;
317 bkey_copy(&ret
->key
, alloc
);
321 if (!ret
->sectors_free
)
328 * Allocates some space in the cache to write to, and k to point to the newly
329 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
330 * end of the newly allocated space).
332 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
333 * sectors were actually allocated.
335 * If s->writeback is true, will not fail.
337 static bool bch_alloc_sectors(struct bkey
*k
, unsigned sectors
,
340 struct cache_set
*c
= s
->op
.c
;
341 struct open_bucket
*b
;
342 BKEY_PADDED(key
) alloc
;
343 struct closure cl
, *w
= NULL
;
347 closure_init_stack(&cl
);
352 * We might have to allocate a new bucket, which we can't do with a
353 * spinlock held. So if we have to allocate, we drop the lock, allocate
354 * and then retry. KEY_PTRS() indicates whether alloc points to
355 * allocated bucket(s).
358 bkey_init(&alloc
.key
);
359 spin_lock(&c
->data_bucket_lock
);
361 while (!(b
= pick_data_bucket(c
, k
, s
->task
, &alloc
.key
))) {
362 unsigned watermark
= s
->op
.write_prio
366 spin_unlock(&c
->data_bucket_lock
);
368 if (bch_bucket_alloc_set(c
, watermark
, &alloc
.key
, 1, w
))
371 spin_lock(&c
->data_bucket_lock
);
375 * If we had to allocate, we might race and not need to allocate the
376 * second time we call find_data_bucket(). If we allocated a bucket but
377 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
379 if (KEY_PTRS(&alloc
.key
))
380 __bkey_put(c
, &alloc
.key
);
382 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
383 EBUG_ON(ptr_stale(c
, &b
->key
, i
));
385 /* Set up the pointer to the space we're allocating: */
387 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
388 k
->ptr
[i
] = b
->key
.ptr
[i
];
390 sectors
= min(sectors
, b
->sectors_free
);
392 SET_KEY_OFFSET(k
, KEY_OFFSET(k
) + sectors
);
393 SET_KEY_SIZE(k
, sectors
);
394 SET_KEY_PTRS(k
, KEY_PTRS(&b
->key
));
397 * Move b to the end of the lru, and keep track of what this bucket was
400 list_move_tail(&b
->list
, &c
->data_buckets
);
401 bkey_copy_key(&b
->key
, k
);
404 b
->sectors_free
-= sectors
;
406 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++) {
407 SET_PTR_OFFSET(&b
->key
, i
, PTR_OFFSET(&b
->key
, i
) + sectors
);
409 atomic_long_add(sectors
,
410 &PTR_CACHE(c
, &b
->key
, i
)->sectors_written
);
413 if (b
->sectors_free
< c
->sb
.block_size
)
417 * k takes refcounts on the buckets it points to until it's inserted
418 * into the btree, but if we're done with this bucket we just transfer
419 * get_data_bucket()'s refcount.
422 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
423 atomic_inc(&PTR_BUCKET(c
, &b
->key
, i
)->pin
);
425 spin_unlock(&c
->data_bucket_lock
);
429 static void bch_insert_data_error(struct closure
*cl
)
431 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
434 * Our data write just errored, which means we've got a bunch of keys to
435 * insert that point to data that wasn't succesfully written.
437 * We don't have to insert those keys but we still have to invalidate
438 * that region of the cache - so, if we just strip off all the pointers
439 * from the keys we'll accomplish just that.
442 struct bkey
*src
= op
->keys
.bottom
, *dst
= op
->keys
.bottom
;
444 while (src
!= op
->keys
.top
) {
445 struct bkey
*n
= bkey_next(src
);
447 SET_KEY_PTRS(src
, 0);
450 dst
= bkey_next(dst
);
459 static void bch_insert_data_endio(struct bio
*bio
, int error
)
461 struct closure
*cl
= bio
->bi_private
;
462 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
463 struct search
*s
= container_of(op
, struct search
, op
);
466 /* TODO: We could try to recover from this. */
470 set_closure_fn(cl
, bch_insert_data_error
, bcache_wq
);
472 set_closure_fn(cl
, NULL
, NULL
);
475 bch_bbio_endio(op
->c
, bio
, error
, "writing data to cache");
478 static void bch_insert_data_loop(struct closure
*cl
)
480 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
481 struct search
*s
= container_of(op
, struct search
, op
);
482 struct bio
*bio
= op
->cache_bio
, *n
;
485 return bio_invalidate(cl
);
487 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0) {
488 set_gc_sectors(op
->c
);
495 struct bio_set
*split
= s
->d
496 ? s
->d
->bio_split
: op
->c
->bio_split
;
498 /* 1 for the device pointer and 1 for the chksum */
499 if (bch_keylist_realloc(&op
->keys
,
500 1 + (op
->csum
? 1 : 0),
502 continue_at(cl
, bch_journal
, bcache_wq
);
506 SET_KEY_INODE(k
, op
->inode
);
507 SET_KEY_OFFSET(k
, bio
->bi_sector
);
509 if (!bch_alloc_sectors(k
, bio_sectors(bio
), s
))
512 n
= bch_bio_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
514 __bkey_put(op
->c
, k
);
515 continue_at(cl
, bch_insert_data_loop
, bcache_wq
);
518 n
->bi_end_io
= bch_insert_data_endio
;
522 SET_KEY_DIRTY(k
, true);
524 for (i
= 0; i
< KEY_PTRS(k
); i
++)
525 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
529 SET_KEY_CSUM(k
, op
->csum
);
533 pr_debug("%s", pkey(k
));
534 bch_keylist_push(&op
->keys
);
536 trace_bcache_cache_insert(n
, n
->bi_sector
, n
->bi_bdev
);
537 n
->bi_rw
|= REQ_WRITE
;
538 bch_submit_bbio(n
, op
->c
, k
, 0);
541 op
->insert_data_done
= true;
542 continue_at(cl
, bch_journal
, bcache_wq
);
544 /* bch_alloc_sectors() blocks if s->writeback = true */
545 BUG_ON(s
->writeback
);
548 * But if it's not a writeback write we'd rather just bail out if
549 * there aren't any buckets ready to write to - it might take awhile and
550 * we might be starving btree writes for gc or something.
555 * Writethrough write: We can't complete the write until we've
556 * updated the index. But we don't want to delay the write while
557 * we wait for buckets to be freed up, so just invalidate the
561 return bio_invalidate(cl
);
564 * From a cache miss, we can just insert the keys for the data
565 * we have written or bail out if we didn't do anything.
567 op
->insert_data_done
= true;
570 if (!bch_keylist_empty(&op
->keys
))
571 continue_at(cl
, bch_journal
, bcache_wq
);
578 * bch_insert_data - stick some data in the cache
580 * This is the starting point for any data to end up in a cache device; it could
581 * be from a normal write, or a writeback write, or a write to a flash only
582 * volume - it's also used by the moving garbage collector to compact data in
583 * mostly empty buckets.
585 * It first writes the data to the cache, creating a list of keys to be inserted
586 * (if the data had to be fragmented there will be multiple keys); after the
587 * data is written it calls bch_journal, and after the keys have been added to
588 * the next journal write they're inserted into the btree.
590 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591 * and op->inode is used for the key inode.
593 * If op->skip is true, instead of inserting the data it invalidates the region
594 * of the cache represented by op->cache_bio and op->inode.
596 void bch_insert_data(struct closure
*cl
)
598 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
600 bch_keylist_init(&op
->keys
);
601 bio_get(op
->cache_bio
);
602 bch_insert_data_loop(cl
);
605 void bch_btree_insert_async(struct closure
*cl
)
607 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
608 struct search
*s
= container_of(op
, struct search
, op
);
610 if (bch_btree_insert(op
, op
->c
)) {
612 op
->insert_data_done
= true;
615 if (op
->insert_data_done
) {
616 bch_keylist_free(&op
->keys
);
619 continue_at(cl
, bch_insert_data_loop
, bcache_wq
);
622 /* Common code for the make_request functions */
624 static void request_endio(struct bio
*bio
, int error
)
626 struct closure
*cl
= bio
->bi_private
;
629 struct search
*s
= container_of(cl
, struct search
, cl
);
631 /* Only cache read errors are recoverable */
632 s
->recoverable
= false;
639 void bch_cache_read_endio(struct bio
*bio
, int error
)
641 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
642 struct closure
*cl
= bio
->bi_private
;
643 struct search
*s
= container_of(cl
, struct search
, cl
);
646 * If the bucket was reused while our bio was in flight, we might have
647 * read the wrong data. Set s->error but not error so it doesn't get
648 * counted against the cache device, but we'll still reread the data
649 * from the backing device.
654 else if (ptr_stale(s
->op
.c
, &b
->key
, 0)) {
655 atomic_long_inc(&s
->op
.c
->cache_read_races
);
659 bch_bbio_endio(s
->op
.c
, bio
, error
, "reading from cache");
662 static void bio_complete(struct search
*s
)
665 int cpu
, rw
= bio_data_dir(s
->orig_bio
);
666 unsigned long duration
= jiffies
- s
->start_time
;
668 cpu
= part_stat_lock();
669 part_round_stats(cpu
, &s
->d
->disk
->part0
);
670 part_stat_add(cpu
, &s
->d
->disk
->part0
, ticks
[rw
], duration
);
673 trace_bcache_request_end(s
, s
->orig_bio
);
674 bio_endio(s
->orig_bio
, s
->error
);
679 static void do_bio_hook(struct search
*s
)
681 struct bio
*bio
= &s
->bio
.bio
;
682 memcpy(bio
, s
->orig_bio
, sizeof(struct bio
));
684 bio
->bi_end_io
= request_endio
;
685 bio
->bi_private
= &s
->cl
;
686 atomic_set(&bio
->bi_cnt
, 3);
689 static void search_free(struct closure
*cl
)
691 struct search
*s
= container_of(cl
, struct search
, cl
);
695 bio_put(s
->op
.cache_bio
);
697 if (s
->unaligned_bvec
)
698 mempool_free(s
->bio
.bio
.bi_io_vec
, s
->d
->unaligned_bvec
);
700 closure_debug_destroy(cl
);
701 mempool_free(s
, s
->d
->c
->search
);
704 static struct search
*search_alloc(struct bio
*bio
, struct bcache_device
*d
)
707 struct search
*s
= mempool_alloc(d
->c
->search
, GFP_NOIO
);
708 memset(s
, 0, offsetof(struct search
, op
.keys
));
710 __closure_init(&s
->cl
, NULL
);
718 s
->write
= (bio
->bi_rw
& REQ_WRITE
) != 0;
719 s
->op
.flush_journal
= (bio
->bi_rw
& REQ_FLUSH
) != 0;
720 s
->op
.skip
= (bio
->bi_rw
& REQ_DISCARD
) != 0;
722 s
->start_time
= jiffies
;
725 if (bio
->bi_size
!= bio_segments(bio
) * PAGE_SIZE
) {
726 bv
= mempool_alloc(d
->unaligned_bvec
, GFP_NOIO
);
727 memcpy(bv
, bio_iovec(bio
),
728 sizeof(struct bio_vec
) * bio_segments(bio
));
730 s
->bio
.bio
.bi_io_vec
= bv
;
731 s
->unaligned_bvec
= 1;
737 static void btree_read_async(struct closure
*cl
)
739 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
741 int ret
= btree_root(search_recurse
, op
->c
, op
);
744 continue_at(cl
, btree_read_async
, bcache_wq
);
751 static void cached_dev_bio_complete(struct closure
*cl
)
753 struct search
*s
= container_of(cl
, struct search
, cl
);
754 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
762 static void cached_dev_read_complete(struct closure
*cl
)
764 struct search
*s
= container_of(cl
, struct search
, cl
);
766 if (s
->op
.insert_collision
)
767 bch_mark_cache_miss_collision(s
);
769 if (s
->op
.cache_bio
) {
773 __bio_for_each_segment(bv
, s
->op
.cache_bio
, i
, 0)
774 __free_page(bv
->bv_page
);
777 cached_dev_bio_complete(cl
);
780 static void request_read_error(struct closure
*cl
)
782 struct search
*s
= container_of(cl
, struct search
, cl
);
786 if (s
->recoverable
) {
787 /* The cache read failed, but we can retry from the backing
790 pr_debug("recovering at sector %llu",
791 (uint64_t) s
->orig_bio
->bi_sector
);
794 bv
= s
->bio
.bio
.bi_io_vec
;
796 s
->bio
.bio
.bi_io_vec
= bv
;
798 if (!s
->unaligned_bvec
)
799 bio_for_each_segment(bv
, s
->orig_bio
, i
)
800 bv
->bv_offset
= 0, bv
->bv_len
= PAGE_SIZE
;
802 memcpy(s
->bio
.bio
.bi_io_vec
,
803 bio_iovec(s
->orig_bio
),
804 sizeof(struct bio_vec
) *
805 bio_segments(s
->orig_bio
));
807 /* XXX: invalidate cache */
809 trace_bcache_read_retry(&s
->bio
.bio
);
810 closure_bio_submit(&s
->bio
.bio
, &s
->cl
, s
->d
);
813 continue_at(cl
, cached_dev_read_complete
, NULL
);
816 static void request_read_done(struct closure
*cl
)
818 struct search
*s
= container_of(cl
, struct search
, cl
);
819 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
822 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
823 * contains data ready to be inserted into the cache.
825 * First, we copy the data we just read from cache_bio's bounce buffers
826 * to the buffers the original bio pointed to:
829 if (s
->op
.cache_bio
) {
830 struct bio_vec
*src
, *dst
;
831 unsigned src_offset
, dst_offset
, bytes
;
834 bio_reset(s
->op
.cache_bio
);
835 s
->op
.cache_bio
->bi_sector
= s
->cache_miss
->bi_sector
;
836 s
->op
.cache_bio
->bi_bdev
= s
->cache_miss
->bi_bdev
;
837 s
->op
.cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
838 bch_bio_map(s
->op
.cache_bio
, NULL
);
840 src
= bio_iovec(s
->op
.cache_bio
);
841 dst
= bio_iovec(s
->cache_miss
);
842 src_offset
= src
->bv_offset
;
843 dst_offset
= dst
->bv_offset
;
844 dst_ptr
= kmap(dst
->bv_page
);
847 if (dst_offset
== dst
->bv_offset
+ dst
->bv_len
) {
848 kunmap(dst
->bv_page
);
850 if (dst
== bio_iovec_idx(s
->cache_miss
,
851 s
->cache_miss
->bi_vcnt
))
854 dst_offset
= dst
->bv_offset
;
855 dst_ptr
= kmap(dst
->bv_page
);
858 if (src_offset
== src
->bv_offset
+ src
->bv_len
) {
860 if (src
== bio_iovec_idx(s
->op
.cache_bio
,
861 s
->op
.cache_bio
->bi_vcnt
))
864 src_offset
= src
->bv_offset
;
867 bytes
= min(dst
->bv_offset
+ dst
->bv_len
- dst_offset
,
868 src
->bv_offset
+ src
->bv_len
- src_offset
);
870 memcpy(dst_ptr
+ dst_offset
,
871 page_address(src
->bv_page
) + src_offset
,
878 bio_put(s
->cache_miss
);
879 s
->cache_miss
= NULL
;
882 if (verify(dc
, &s
->bio
.bio
) && s
->recoverable
)
887 if (s
->op
.cache_bio
&&
888 !test_bit(CACHE_SET_STOPPING
, &s
->op
.c
->flags
)) {
889 s
->op
.type
= BTREE_REPLACE
;
890 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
893 continue_at(cl
, cached_dev_read_complete
, NULL
);
896 static void request_read_done_bh(struct closure
*cl
)
898 struct search
*s
= container_of(cl
, struct search
, cl
);
899 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
901 bch_mark_cache_accounting(s
, !s
->cache_miss
, s
->op
.skip
);
904 continue_at_nobarrier(cl
, request_read_error
, bcache_wq
);
905 else if (s
->op
.cache_bio
|| verify(dc
, &s
->bio
.bio
))
906 continue_at_nobarrier(cl
, request_read_done
, bcache_wq
);
908 continue_at_nobarrier(cl
, cached_dev_read_complete
, NULL
);
911 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
912 struct bio
*bio
, unsigned sectors
)
916 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
919 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
924 s
->op
.lookup_done
= true;
926 miss
->bi_end_io
= request_endio
;
927 miss
->bi_private
= &s
->cl
;
929 if (s
->cache_miss
|| s
->op
.skip
)
933 (bio
->bi_rw
& REQ_RAHEAD
) ||
934 (bio
->bi_rw
& REQ_META
) ||
935 s
->op
.c
->gc_stats
.in_use
>= CUTOFF_CACHE_READA
)
938 reada
= min(dc
->readahead
>> 9,
939 sectors
- bio_sectors(miss
));
941 if (bio_end(miss
) + reada
> bdev_sectors(miss
->bi_bdev
))
942 reada
= bdev_sectors(miss
->bi_bdev
) - bio_end(miss
);
945 s
->cache_bio_sectors
= bio_sectors(miss
) + reada
;
946 s
->op
.cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
947 DIV_ROUND_UP(s
->cache_bio_sectors
, PAGE_SECTORS
),
950 if (!s
->op
.cache_bio
)
953 s
->op
.cache_bio
->bi_sector
= miss
->bi_sector
;
954 s
->op
.cache_bio
->bi_bdev
= miss
->bi_bdev
;
955 s
->op
.cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
957 s
->op
.cache_bio
->bi_end_io
= request_endio
;
958 s
->op
.cache_bio
->bi_private
= &s
->cl
;
960 /* btree_search_recurse()'s btree iterator is no good anymore */
962 if (!bch_btree_insert_check_key(b
, &s
->op
, s
->op
.cache_bio
))
965 bch_bio_map(s
->op
.cache_bio
, NULL
);
966 if (bch_bio_alloc_pages(s
->op
.cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
969 s
->cache_miss
= miss
;
970 bio_get(s
->op
.cache_bio
);
972 trace_bcache_cache_miss(s
->orig_bio
);
973 closure_bio_submit(s
->op
.cache_bio
, &s
->cl
, s
->d
);
977 bio_put(s
->op
.cache_bio
);
978 s
->op
.cache_bio
= NULL
;
980 closure_bio_submit(miss
, &s
->cl
, s
->d
);
984 static void request_read(struct cached_dev
*dc
, struct search
*s
)
986 struct closure
*cl
= &s
->cl
;
988 check_should_skip(dc
, s
);
989 closure_call(&s
->op
.cl
, btree_read_async
, NULL
, cl
);
991 continue_at(cl
, request_read_done_bh
, NULL
);
996 static void cached_dev_write_complete(struct closure
*cl
)
998 struct search
*s
= container_of(cl
, struct search
, cl
);
999 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1001 up_read_non_owner(&dc
->writeback_lock
);
1002 cached_dev_bio_complete(cl
);
1005 static bool should_writeback(struct cached_dev
*dc
, struct bio
*bio
)
1007 unsigned threshold
= (bio
->bi_rw
& REQ_SYNC
)
1008 ? CUTOFF_WRITEBACK_SYNC
1011 return !atomic_read(&dc
->disk
.detaching
) &&
1012 cache_mode(dc
, bio
) == CACHE_MODE_WRITEBACK
&&
1013 dc
->disk
.c
->gc_stats
.in_use
< threshold
;
1016 static void request_write(struct cached_dev
*dc
, struct search
*s
)
1018 struct closure
*cl
= &s
->cl
;
1019 struct bio
*bio
= &s
->bio
.bio
;
1020 struct bkey start
, end
;
1021 start
= KEY(dc
->disk
.id
, bio
->bi_sector
, 0);
1022 end
= KEY(dc
->disk
.id
, bio_end(bio
), 0);
1024 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
, &start
, &end
);
1026 check_should_skip(dc
, s
);
1027 down_read_non_owner(&dc
->writeback_lock
);
1029 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
1031 s
->writeback
= true;
1034 if (bio
->bi_rw
& REQ_DISCARD
)
1040 if (should_writeback(dc
, s
->orig_bio
))
1041 s
->writeback
= true;
1043 if (!s
->writeback
) {
1044 s
->op
.cache_bio
= bio_clone_bioset(bio
, GFP_NOIO
,
1045 dc
->disk
.bio_split
);
1047 trace_bcache_writethrough(s
->orig_bio
);
1048 closure_bio_submit(bio
, cl
, s
->d
);
1050 s
->op
.cache_bio
= bio
;
1051 trace_bcache_writeback(s
->orig_bio
);
1052 bch_writeback_add(dc
, bio_sectors(bio
));
1055 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
1056 continue_at(cl
, cached_dev_write_complete
, NULL
);
1059 s
->op
.cache_bio
= s
->orig_bio
;
1060 bio_get(s
->op
.cache_bio
);
1061 trace_bcache_write_skip(s
->orig_bio
);
1063 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1064 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1067 closure_bio_submit(bio
, cl
, s
->d
);
1071 static void request_nodata(struct cached_dev
*dc
, struct search
*s
)
1073 struct closure
*cl
= &s
->cl
;
1074 struct bio
*bio
= &s
->bio
.bio
;
1076 if (bio
->bi_rw
& REQ_DISCARD
) {
1077 request_write(dc
, s
);
1081 if (s
->op
.flush_journal
)
1082 bch_journal_meta(s
->op
.c
, cl
);
1084 closure_bio_submit(bio
, cl
, s
->d
);
1086 continue_at(cl
, cached_dev_bio_complete
, NULL
);
1089 /* Cached devices - read & write stuff */
1091 int bch_get_congested(struct cache_set
*c
)
1095 if (!c
->congested_read_threshold_us
&&
1096 !c
->congested_write_threshold_us
)
1099 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
1103 i
+= atomic_read(&c
->congested
);
1109 return i
<= 0 ? 1 : fract_exp_two(i
, 6);
1112 static void add_sequential(struct task_struct
*t
)
1114 ewma_add(t
->sequential_io_avg
,
1115 t
->sequential_io
, 8, 0);
1117 t
->sequential_io
= 0;
1120 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
1122 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
1125 static void check_should_skip(struct cached_dev
*dc
, struct search
*s
)
1127 struct cache_set
*c
= s
->op
.c
;
1128 struct bio
*bio
= &s
->bio
.bio
;
1131 int cutoff
= bch_get_congested(c
);
1132 unsigned mode
= cache_mode(dc
, bio
);
1134 if (atomic_read(&dc
->disk
.detaching
) ||
1135 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
1136 (bio
->bi_rw
& REQ_DISCARD
))
1139 if (mode
== CACHE_MODE_NONE
||
1140 (mode
== CACHE_MODE_WRITEAROUND
&&
1141 (bio
->bi_rw
& REQ_WRITE
)))
1144 if (bio
->bi_sector
& (c
->sb
.block_size
- 1) ||
1145 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
1146 pr_debug("skipping unaligned io");
1151 cutoff
= dc
->sequential_cutoff
>> 9;
1156 if (mode
== CACHE_MODE_WRITEBACK
&&
1157 (bio
->bi_rw
& REQ_WRITE
) &&
1158 (bio
->bi_rw
& REQ_SYNC
))
1162 if (dc
->sequential_merge
) {
1165 spin_lock(&dc
->io_lock
);
1167 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_sector
), hash
)
1168 if (i
->last
== bio
->bi_sector
&&
1169 time_before(jiffies
, i
->jiffies
))
1172 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
1174 add_sequential(s
->task
);
1177 if (i
->sequential
+ bio
->bi_size
> i
->sequential
)
1178 i
->sequential
+= bio
->bi_size
;
1180 i
->last
= bio_end(bio
);
1181 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
1182 s
->task
->sequential_io
= i
->sequential
;
1184 hlist_del(&i
->hash
);
1185 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
1186 list_move_tail(&i
->lru
, &dc
->io_lru
);
1188 spin_unlock(&dc
->io_lock
);
1190 s
->task
->sequential_io
= bio
->bi_size
;
1192 add_sequential(s
->task
);
1195 rand
= get_random_int();
1196 cutoff
-= bitmap_weight(&rand
, BITS_PER_LONG
);
1198 if (cutoff
<= (int) (max(s
->task
->sequential_io
,
1199 s
->task
->sequential_io_avg
) >> 9))
1203 bch_rescale_priorities(c
, bio_sectors(bio
));
1206 bch_mark_sectors_bypassed(s
, bio_sectors(bio
));
1210 static void cached_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1213 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1214 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1215 int cpu
, rw
= bio_data_dir(bio
);
1217 cpu
= part_stat_lock();
1218 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1219 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1222 bio
->bi_bdev
= dc
->bdev
;
1223 bio
->bi_sector
+= dc
->sb
.data_offset
;
1225 if (cached_dev_get(dc
)) {
1226 s
= search_alloc(bio
, d
);
1227 trace_bcache_request_start(s
, bio
);
1229 if (!bio_has_data(bio
))
1230 request_nodata(dc
, s
);
1232 request_write(dc
, s
);
1234 request_read(dc
, s
);
1236 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1237 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1240 bch_generic_make_request(bio
, &d
->bio_split_hook
);
1244 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1245 unsigned int cmd
, unsigned long arg
)
1247 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1248 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1251 static int cached_dev_congested(void *data
, int bits
)
1253 struct bcache_device
*d
= data
;
1254 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1255 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1258 if (bdi_congested(&q
->backing_dev_info
, bits
))
1261 if (cached_dev_get(dc
)) {
1265 for_each_cache(ca
, d
->c
, i
) {
1266 q
= bdev_get_queue(ca
->bdev
);
1267 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1276 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1278 struct gendisk
*g
= dc
->disk
.disk
;
1280 g
->queue
->make_request_fn
= cached_dev_make_request
;
1281 g
->queue
->backing_dev_info
.congested_fn
= cached_dev_congested
;
1282 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1283 dc
->disk
.ioctl
= cached_dev_ioctl
;
1286 /* Flash backed devices */
1288 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1289 struct bio
*bio
, unsigned sectors
)
1293 while (bio
->bi_idx
!= bio
->bi_vcnt
) {
1294 struct bio_vec
*bv
= bio_iovec(bio
);
1295 unsigned j
= min(bv
->bv_len
>> 9, sectors
);
1297 void *p
= kmap(bv
->bv_page
);
1298 memset(p
+ bv
->bv_offset
, 0, j
<< 9);
1299 kunmap(bv
->bv_page
);
1301 bv
->bv_len
-= j
<< 9;
1302 bv
->bv_offset
+= j
<< 9;
1307 bio
->bi_sector
+= j
;
1308 bio
->bi_size
-= j
<< 9;
1314 s
->op
.lookup_done
= true;
1319 static void flash_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1323 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1324 int cpu
, rw
= bio_data_dir(bio
);
1326 cpu
= part_stat_lock();
1327 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1328 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1331 s
= search_alloc(bio
, d
);
1335 trace_bcache_request_start(s
, bio
);
1337 if (bio_has_data(bio
) && !rw
) {
1338 closure_call(&s
->op
.cl
, btree_read_async
, NULL
, cl
);
1339 } else if (bio_has_data(bio
) || s
->op
.skip
) {
1340 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
,
1341 &KEY(d
->id
, bio
->bi_sector
, 0),
1342 &KEY(d
->id
, bio_end(bio
), 0));
1344 s
->writeback
= true;
1345 s
->op
.cache_bio
= bio
;
1347 closure_call(&s
->op
.cl
, bch_insert_data
, NULL
, cl
);
1349 /* No data - probably a cache flush */
1350 if (s
->op
.flush_journal
)
1351 bch_journal_meta(s
->op
.c
, cl
);
1354 continue_at(cl
, search_free
, NULL
);
1357 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1358 unsigned int cmd
, unsigned long arg
)
1363 static int flash_dev_congested(void *data
, int bits
)
1365 struct bcache_device
*d
= data
;
1366 struct request_queue
*q
;
1371 for_each_cache(ca
, d
->c
, i
) {
1372 q
= bdev_get_queue(ca
->bdev
);
1373 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1379 void bch_flash_dev_request_init(struct bcache_device
*d
)
1381 struct gendisk
*g
= d
->disk
;
1383 g
->queue
->make_request_fn
= flash_dev_make_request
;
1384 g
->queue
->backing_dev_info
.congested_fn
= flash_dev_congested
;
1385 d
->cache_miss
= flash_dev_cache_miss
;
1386 d
->ioctl
= flash_dev_ioctl
;
1389 void bch_request_exit(void)
1391 #ifdef CONFIG_CGROUP_BCACHE
1392 cgroup_unload_subsys(&bcache_subsys
);
1394 if (bch_search_cache
)
1395 kmem_cache_destroy(bch_search_cache
);
1398 int __init
bch_request_init(void)
1400 bch_search_cache
= KMEM_CACHE(search
, 0);
1401 if (!bch_search_cache
)
1404 #ifdef CONFIG_CGROUP_BCACHE
1405 cgroup_load_subsys(&bcache_subsys
);
1406 init_bch_cgroup(&bcache_default_cgroup
);
1408 cgroup_add_cftypes(&bcache_subsys
, bch_files
);