2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache
*bch_search_cache
;
28 static void bch_data_insert_start(struct closure
*);
30 /* Cgroup interface */
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup
= { .cache_mode
= -1 };
35 static struct bch_cgroup
*cgroup_to_bcache(struct cgroup
*cgroup
)
37 struct cgroup_subsys_state
*css
;
39 (css
= cgroup_subsys_state(cgroup
, bcache_subsys_id
))
40 ? container_of(css
, struct bch_cgroup
, css
)
41 : &bcache_default_cgroup
;
44 struct bch_cgroup
*bch_bio_to_cgroup(struct bio
*bio
)
46 struct cgroup_subsys_state
*css
= bio
->bi_css
47 ? cgroup_subsys_state(bio
->bi_css
->cgroup
, bcache_subsys_id
)
48 : task_subsys_state(current
, bcache_subsys_id
);
51 ? container_of(css
, struct bch_cgroup
, css
)
52 : &bcache_default_cgroup
;
55 static ssize_t
cache_mode_read(struct cgroup
*cgrp
, struct cftype
*cft
,
57 char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
60 int len
= bch_snprint_string_list(tmp
, PAGE_SIZE
, bch_cache_modes
,
61 cgroup_to_bcache(cgrp
)->cache_mode
+ 1);
66 return simple_read_from_buffer(buf
, nbytes
, ppos
, tmp
, len
);
69 static int cache_mode_write(struct cgroup
*cgrp
, struct cftype
*cft
,
72 int v
= bch_read_string_list(buf
, bch_cache_modes
);
76 cgroup_to_bcache(cgrp
)->cache_mode
= v
- 1;
80 static u64
bch_verify_read(struct cgroup
*cgrp
, struct cftype
*cft
)
82 return cgroup_to_bcache(cgrp
)->verify
;
85 static int bch_verify_write(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
87 cgroup_to_bcache(cgrp
)->verify
= val
;
91 static u64
bch_cache_hits_read(struct cgroup
*cgrp
, struct cftype
*cft
)
93 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
94 return atomic_read(&bcachecg
->stats
.cache_hits
);
97 static u64
bch_cache_misses_read(struct cgroup
*cgrp
, struct cftype
*cft
)
99 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
100 return atomic_read(&bcachecg
->stats
.cache_misses
);
103 static u64
bch_cache_bypass_hits_read(struct cgroup
*cgrp
,
106 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
107 return atomic_read(&bcachecg
->stats
.cache_bypass_hits
);
110 static u64
bch_cache_bypass_misses_read(struct cgroup
*cgrp
,
113 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
114 return atomic_read(&bcachecg
->stats
.cache_bypass_misses
);
117 static struct cftype bch_files
[] = {
119 .name
= "cache_mode",
120 .read
= cache_mode_read
,
121 .write_string
= cache_mode_write
,
125 .read_u64
= bch_verify_read
,
126 .write_u64
= bch_verify_write
,
129 .name
= "cache_hits",
130 .read_u64
= bch_cache_hits_read
,
133 .name
= "cache_misses",
134 .read_u64
= bch_cache_misses_read
,
137 .name
= "cache_bypass_hits",
138 .read_u64
= bch_cache_bypass_hits_read
,
141 .name
= "cache_bypass_misses",
142 .read_u64
= bch_cache_bypass_misses_read
,
147 static void init_bch_cgroup(struct bch_cgroup
*cg
)
152 static struct cgroup_subsys_state
*bcachecg_create(struct cgroup
*cgroup
)
154 struct bch_cgroup
*cg
;
156 cg
= kzalloc(sizeof(*cg
), GFP_KERNEL
);
158 return ERR_PTR(-ENOMEM
);
163 static void bcachecg_destroy(struct cgroup
*cgroup
)
165 struct bch_cgroup
*cg
= cgroup_to_bcache(cgroup
);
166 free_css_id(&bcache_subsys
, &cg
->css
);
170 struct cgroup_subsys bcache_subsys
= {
171 .create
= bcachecg_create
,
172 .destroy
= bcachecg_destroy
,
173 .subsys_id
= bcache_subsys_id
,
175 .module
= THIS_MODULE
,
177 EXPORT_SYMBOL_GPL(bcache_subsys
);
180 static unsigned cache_mode(struct cached_dev
*dc
, struct bio
*bio
)
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r
= bch_bio_to_cgroup(bio
)->cache_mode
;
187 return BDEV_CACHE_MODE(&dc
->sb
);
190 static bool verify(struct cached_dev
*dc
, struct bio
*bio
)
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio
)->verify
)
199 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
205 bio_for_each_segment(bv
, bio
, i
) {
206 void *d
= kmap(bv
->bv_page
) + bv
->bv_offset
;
207 csum
= bch_crc64_update(csum
, d
, bv
->bv_len
);
211 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
214 /* Insert data into cache */
216 static void bch_data_insert_keys(struct closure
*cl
)
218 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
219 atomic_t
*journal_ref
= NULL
;
220 struct bkey
*replace_key
= op
->replace
? &op
->replace_key
: NULL
;
224 * If we're looping, might already be waiting on
225 * another journal write - can't wait on more than one journal write at
228 * XXX: this looks wrong
231 while (atomic_read(&s
->cl
.remaining
) & CLOSURE_WAITING
)
232 closure_sync(&s
->cl
);
236 journal_ref
= bch_journal(op
->c
, &op
->insert_keys
,
237 op
->flush_journal
? cl
: NULL
);
239 ret
= bch_btree_insert(op
->c
, &op
->insert_keys
,
240 journal_ref
, replace_key
);
242 op
->replace_collision
= true;
245 op
->insert_data_done
= true;
249 atomic_dec_bug(journal_ref
);
251 if (!op
->insert_data_done
)
252 continue_at(cl
, bch_data_insert_start
, bcache_wq
);
254 bch_keylist_free(&op
->insert_keys
);
258 static void bch_data_invalidate(struct closure
*cl
)
260 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
261 struct bio
*bio
= op
->bio
;
263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio
), (uint64_t) bio
->bi_sector
);
266 while (bio_sectors(bio
)) {
267 unsigned sectors
= min(bio_sectors(bio
),
268 1U << (KEY_SIZE_BITS
- 1));
270 if (bch_keylist_realloc(&op
->insert_keys
, 0, op
->c
))
273 bio
->bi_sector
+= sectors
;
274 bio
->bi_size
-= sectors
<< 9;
276 bch_keylist_add(&op
->insert_keys
,
277 &KEY(op
->inode
, bio
->bi_sector
, sectors
));
280 op
->insert_data_done
= true;
283 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
286 static void bch_data_insert_error(struct closure
*cl
)
288 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
291 * Our data write just errored, which means we've got a bunch of keys to
292 * insert that point to data that wasn't succesfully written.
294 * We don't have to insert those keys but we still have to invalidate
295 * that region of the cache - so, if we just strip off all the pointers
296 * from the keys we'll accomplish just that.
299 struct bkey
*src
= op
->insert_keys
.keys
, *dst
= op
->insert_keys
.keys
;
301 while (src
!= op
->insert_keys
.top
) {
302 struct bkey
*n
= bkey_next(src
);
304 SET_KEY_PTRS(src
, 0);
305 memmove(dst
, src
, bkey_bytes(src
));
307 dst
= bkey_next(dst
);
311 op
->insert_keys
.top
= dst
;
313 bch_data_insert_keys(cl
);
316 static void bch_data_insert_endio(struct bio
*bio
, int error
)
318 struct closure
*cl
= bio
->bi_private
;
319 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
322 /* TODO: We could try to recover from this. */
325 else if (!op
->replace
)
326 set_closure_fn(cl
, bch_data_insert_error
, bcache_wq
);
328 set_closure_fn(cl
, NULL
, NULL
);
331 bch_bbio_endio(op
->c
, bio
, error
, "writing data to cache");
334 static void bch_data_insert_start(struct closure
*cl
)
336 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
337 struct bio
*bio
= op
->bio
, *n
;
340 return bch_data_invalidate(cl
);
342 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0) {
343 set_gc_sectors(op
->c
);
348 * Journal writes are marked REQ_FLUSH; if the original write was a
349 * flush, it'll wait on the journal write.
351 bio
->bi_rw
&= ~(REQ_FLUSH
|REQ_FUA
);
356 struct bio_set
*split
= op
->c
->bio_split
;
358 /* 1 for the device pointer and 1 for the chksum */
359 if (bch_keylist_realloc(&op
->insert_keys
,
360 1 + (op
->csum
? 1 : 0),
362 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
364 k
= op
->insert_keys
.top
;
366 SET_KEY_INODE(k
, op
->inode
);
367 SET_KEY_OFFSET(k
, bio
->bi_sector
);
369 if (!bch_alloc_sectors(op
->c
, k
, bio_sectors(bio
),
370 op
->write_point
, op
->write_prio
,
374 n
= bch_bio_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
376 n
->bi_end_io
= bch_data_insert_endio
;
380 SET_KEY_DIRTY(k
, true);
382 for (i
= 0; i
< KEY_PTRS(k
); i
++)
383 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
387 SET_KEY_CSUM(k
, op
->csum
);
391 trace_bcache_cache_insert(k
);
392 bch_keylist_push(&op
->insert_keys
);
394 n
->bi_rw
|= REQ_WRITE
;
395 bch_submit_bbio(n
, op
->c
, k
, 0);
398 op
->insert_data_done
= true;
399 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
401 /* bch_alloc_sectors() blocks if s->writeback = true */
402 BUG_ON(op
->writeback
);
405 * But if it's not a writeback write we'd rather just bail out if
406 * there aren't any buckets ready to write to - it might take awhile and
407 * we might be starving btree writes for gc or something.
412 * Writethrough write: We can't complete the write until we've
413 * updated the index. But we don't want to delay the write while
414 * we wait for buckets to be freed up, so just invalidate the
418 return bch_data_invalidate(cl
);
421 * From a cache miss, we can just insert the keys for the data
422 * we have written or bail out if we didn't do anything.
424 op
->insert_data_done
= true;
427 if (!bch_keylist_empty(&op
->insert_keys
))
428 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
435 * bch_data_insert - stick some data in the cache
437 * This is the starting point for any data to end up in a cache device; it could
438 * be from a normal write, or a writeback write, or a write to a flash only
439 * volume - it's also used by the moving garbage collector to compact data in
440 * mostly empty buckets.
442 * It first writes the data to the cache, creating a list of keys to be inserted
443 * (if the data had to be fragmented there will be multiple keys); after the
444 * data is written it calls bch_journal, and after the keys have been added to
445 * the next journal write they're inserted into the btree.
447 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
448 * and op->inode is used for the key inode.
450 * If s->bypass is true, instead of inserting the data it invalidates the
451 * region of the cache represented by s->cache_bio and op->inode.
453 void bch_data_insert(struct closure
*cl
)
455 struct data_insert_op
*op
= container_of(cl
, struct data_insert_op
, cl
);
457 trace_bcache_write(op
->bio
, op
->writeback
, op
->bypass
);
459 bch_keylist_init(&op
->insert_keys
);
461 bch_data_insert_start(cl
);
466 unsigned bch_get_congested(struct cache_set
*c
)
471 if (!c
->congested_read_threshold_us
&&
472 !c
->congested_write_threshold_us
)
475 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
479 i
+= atomic_read(&c
->congested
);
486 i
= fract_exp_two(i
, 6);
488 rand
= get_random_int();
489 i
-= bitmap_weight(&rand
, BITS_PER_LONG
);
491 return i
> 0 ? i
: 1;
494 static void add_sequential(struct task_struct
*t
)
496 ewma_add(t
->sequential_io_avg
,
497 t
->sequential_io
, 8, 0);
499 t
->sequential_io
= 0;
502 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
504 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
507 static bool check_should_bypass(struct cached_dev
*dc
, struct bio
*bio
)
509 struct cache_set
*c
= dc
->disk
.c
;
510 unsigned mode
= cache_mode(dc
, bio
);
511 unsigned sectors
, congested
= bch_get_congested(c
);
512 struct task_struct
*task
= current
;
514 if (atomic_read(&dc
->disk
.detaching
) ||
515 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
516 (bio
->bi_rw
& REQ_DISCARD
))
519 if (mode
== CACHE_MODE_NONE
||
520 (mode
== CACHE_MODE_WRITEAROUND
&&
521 (bio
->bi_rw
& REQ_WRITE
)))
524 if (bio
->bi_sector
& (c
->sb
.block_size
- 1) ||
525 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
526 pr_debug("skipping unaligned io");
530 if (!congested
&& !dc
->sequential_cutoff
)
534 mode
== CACHE_MODE_WRITEBACK
&&
535 (bio
->bi_rw
& REQ_WRITE
) &&
536 (bio
->bi_rw
& REQ_SYNC
))
539 if (dc
->sequential_merge
) {
542 spin_lock(&dc
->io_lock
);
544 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_sector
), hash
)
545 if (i
->last
== bio
->bi_sector
&&
546 time_before(jiffies
, i
->jiffies
))
549 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
551 add_sequential(task
);
554 if (i
->sequential
+ bio
->bi_size
> i
->sequential
)
555 i
->sequential
+= bio
->bi_size
;
557 i
->last
= bio_end_sector(bio
);
558 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
559 task
->sequential_io
= i
->sequential
;
562 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
563 list_move_tail(&i
->lru
, &dc
->io_lru
);
565 spin_unlock(&dc
->io_lock
);
567 task
->sequential_io
= bio
->bi_size
;
569 add_sequential(task
);
572 sectors
= max(task
->sequential_io
,
573 task
->sequential_io_avg
) >> 9;
575 if (dc
->sequential_cutoff
&&
576 sectors
>= dc
->sequential_cutoff
>> 9) {
577 trace_bcache_bypass_sequential(bio
);
581 if (congested
&& sectors
>= congested
) {
582 trace_bcache_bypass_congested(bio
);
587 bch_rescale_priorities(c
, bio_sectors(bio
));
590 bch_mark_sectors_bypassed(c
, dc
, bio_sectors(bio
));
597 /* Stack frame for bio_complete */
600 struct bcache_device
*d
;
603 struct bio
*orig_bio
;
604 struct bio
*cache_miss
;
606 unsigned insert_bio_sectors
;
608 unsigned recoverable
:1;
609 unsigned unaligned_bvec
:1;
612 unsigned long start_time
;
615 struct data_insert_op iop
;
618 static void bch_cache_read_endio(struct bio
*bio
, int error
)
620 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
621 struct closure
*cl
= bio
->bi_private
;
622 struct search
*s
= container_of(cl
, struct search
, cl
);
625 * If the bucket was reused while our bio was in flight, we might have
626 * read the wrong data. Set s->error but not error so it doesn't get
627 * counted against the cache device, but we'll still reread the data
628 * from the backing device.
632 s
->iop
.error
= error
;
633 else if (ptr_stale(s
->iop
.c
, &b
->key
, 0)) {
634 atomic_long_inc(&s
->iop
.c
->cache_read_races
);
635 s
->iop
.error
= -EINTR
;
638 bch_bbio_endio(s
->iop
.c
, bio
, error
, "reading from cache");
642 * Read from a single key, handling the initial cache miss if the key starts in
643 * the middle of the bio
645 static int cache_lookup_fn(struct btree_op
*op
, struct btree
*b
, struct bkey
*k
)
647 struct search
*s
= container_of(op
, struct search
, op
);
648 struct bio
*n
, *bio
= &s
->bio
.bio
;
649 struct bkey
*bio_key
;
652 if (bkey_cmp(k
, &KEY(s
->iop
.inode
, bio
->bi_sector
, 0)) <= 0)
655 if (KEY_INODE(k
) != s
->iop
.inode
||
656 KEY_START(k
) > bio
->bi_sector
) {
657 unsigned bio_sectors
= bio_sectors(bio
);
658 unsigned sectors
= KEY_INODE(k
) == s
->iop
.inode
659 ? min_t(uint64_t, INT_MAX
,
660 KEY_START(k
) - bio
->bi_sector
)
663 int ret
= s
->d
->cache_miss(b
, s
, bio
, sectors
);
664 if (ret
!= MAP_CONTINUE
)
667 /* if this was a complete miss we shouldn't get here */
668 BUG_ON(bio_sectors
<= sectors
);
674 /* XXX: figure out best pointer - for multiple cache devices */
677 PTR_BUCKET(b
->c
, k
, ptr
)->prio
= INITIAL_PRIO
;
679 n
= bch_bio_split(bio
, min_t(uint64_t, INT_MAX
,
680 KEY_OFFSET(k
) - bio
->bi_sector
),
681 GFP_NOIO
, s
->d
->bio_split
);
683 bio_key
= &container_of(n
, struct bbio
, bio
)->key
;
684 bch_bkey_copy_single_ptr(bio_key
, k
, ptr
);
686 bch_cut_front(&KEY(s
->iop
.inode
, n
->bi_sector
, 0), bio_key
);
687 bch_cut_back(&KEY(s
->iop
.inode
, bio_end_sector(n
), 0), bio_key
);
689 n
->bi_end_io
= bch_cache_read_endio
;
690 n
->bi_private
= &s
->cl
;
693 * The bucket we're reading from might be reused while our bio
694 * is in flight, and we could then end up reading the wrong
697 * We guard against this by checking (in cache_read_endio()) if
698 * the pointer is stale again; if so, we treat it as an error
699 * and reread from the backing device (but we don't pass that
700 * error up anywhere).
703 __bch_submit_bbio(n
, b
->c
);
704 return n
== bio
? MAP_DONE
: MAP_CONTINUE
;
707 static void cache_lookup(struct closure
*cl
)
709 struct search
*s
= container_of(cl
, struct search
, iop
.cl
);
710 struct bio
*bio
= &s
->bio
.bio
;
712 int ret
= bch_btree_map_keys(&s
->op
, s
->iop
.c
,
713 &KEY(s
->iop
.inode
, bio
->bi_sector
, 0),
714 cache_lookup_fn
, MAP_END_KEY
);
716 continue_at(cl
, cache_lookup
, bcache_wq
);
721 /* Common code for the make_request functions */
723 static void request_endio(struct bio
*bio
, int error
)
725 struct closure
*cl
= bio
->bi_private
;
728 struct search
*s
= container_of(cl
, struct search
, cl
);
729 s
->iop
.error
= error
;
730 /* Only cache read errors are recoverable */
731 s
->recoverable
= false;
738 static void bio_complete(struct search
*s
)
741 int cpu
, rw
= bio_data_dir(s
->orig_bio
);
742 unsigned long duration
= jiffies
- s
->start_time
;
744 cpu
= part_stat_lock();
745 part_round_stats(cpu
, &s
->d
->disk
->part0
);
746 part_stat_add(cpu
, &s
->d
->disk
->part0
, ticks
[rw
], duration
);
749 trace_bcache_request_end(s
->d
, s
->orig_bio
);
750 bio_endio(s
->orig_bio
, s
->iop
.error
);
755 static void do_bio_hook(struct search
*s
)
757 struct bio
*bio
= &s
->bio
.bio
;
758 memcpy(bio
, s
->orig_bio
, sizeof(struct bio
));
760 bio
->bi_end_io
= request_endio
;
761 bio
->bi_private
= &s
->cl
;
762 atomic_set(&bio
->bi_cnt
, 3);
765 static void search_free(struct closure
*cl
)
767 struct search
*s
= container_of(cl
, struct search
, cl
);
773 if (s
->unaligned_bvec
)
774 mempool_free(s
->bio
.bio
.bi_io_vec
, s
->d
->unaligned_bvec
);
776 closure_debug_destroy(cl
);
777 mempool_free(s
, s
->d
->c
->search
);
780 static struct search
*search_alloc(struct bio
*bio
, struct bcache_device
*d
)
785 s
= mempool_alloc(d
->c
->search
, GFP_NOIO
);
786 memset(s
, 0, offsetof(struct search
, iop
.insert_keys
));
788 __closure_init(&s
->cl
, NULL
);
790 s
->iop
.inode
= d
->id
;
794 s
->iop
.write_point
= hash_long((unsigned long) current
, 16);
796 s
->write
= (bio
->bi_rw
& REQ_WRITE
) != 0;
797 s
->iop
.flush_journal
= (bio
->bi_rw
& (REQ_FLUSH
|REQ_FUA
)) != 0;
799 s
->start_time
= jiffies
;
802 if (bio
->bi_size
!= bio_segments(bio
) * PAGE_SIZE
) {
803 bv
= mempool_alloc(d
->unaligned_bvec
, GFP_NOIO
);
804 memcpy(bv
, bio_iovec(bio
),
805 sizeof(struct bio_vec
) * bio_segments(bio
));
807 s
->bio
.bio
.bi_io_vec
= bv
;
808 s
->unaligned_bvec
= 1;
816 static void cached_dev_bio_complete(struct closure
*cl
)
818 struct search
*s
= container_of(cl
, struct search
, cl
);
819 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
827 static void cached_dev_cache_miss_done(struct closure
*cl
)
829 struct search
*s
= container_of(cl
, struct search
, cl
);
831 if (s
->iop
.replace_collision
)
832 bch_mark_cache_miss_collision(s
->iop
.c
, s
->d
);
838 bio_for_each_segment_all(bv
, s
->iop
.bio
, i
)
839 __free_page(bv
->bv_page
);
842 cached_dev_bio_complete(cl
);
845 static void cached_dev_read_error(struct closure
*cl
)
847 struct search
*s
= container_of(cl
, struct search
, cl
);
848 struct bio
*bio
= &s
->bio
.bio
;
852 if (s
->recoverable
) {
853 /* Retry from the backing device: */
854 trace_bcache_read_retry(s
->orig_bio
);
857 bv
= s
->bio
.bio
.bi_io_vec
;
859 s
->bio
.bio
.bi_io_vec
= bv
;
861 if (!s
->unaligned_bvec
)
862 bio_for_each_segment(bv
, s
->orig_bio
, i
)
863 bv
->bv_offset
= 0, bv
->bv_len
= PAGE_SIZE
;
865 memcpy(s
->bio
.bio
.bi_io_vec
,
866 bio_iovec(s
->orig_bio
),
867 sizeof(struct bio_vec
) *
868 bio_segments(s
->orig_bio
));
870 /* XXX: invalidate cache */
872 closure_bio_submit(bio
, cl
, s
->d
);
875 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
878 static void cached_dev_read_done(struct closure
*cl
)
880 struct search
*s
= container_of(cl
, struct search
, cl
);
881 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
884 * We had a cache miss; cache_bio now contains data ready to be inserted
887 * First, we copy the data we just read from cache_bio's bounce buffers
888 * to the buffers the original bio pointed to:
892 bio_reset(s
->iop
.bio
);
893 s
->iop
.bio
->bi_sector
= s
->cache_miss
->bi_sector
;
894 s
->iop
.bio
->bi_bdev
= s
->cache_miss
->bi_bdev
;
895 s
->iop
.bio
->bi_size
= s
->insert_bio_sectors
<< 9;
896 bch_bio_map(s
->iop
.bio
, NULL
);
898 bio_copy_data(s
->cache_miss
, s
->iop
.bio
);
900 bio_put(s
->cache_miss
);
901 s
->cache_miss
= NULL
;
904 if (verify(dc
, &s
->bio
.bio
) && s
->recoverable
&& !s
->unaligned_bvec
)
905 bch_data_verify(dc
, s
->orig_bio
);
910 !test_bit(CACHE_SET_STOPPING
, &s
->iop
.c
->flags
)) {
911 BUG_ON(!s
->iop
.replace
);
912 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
915 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
918 static void cached_dev_read_done_bh(struct closure
*cl
)
920 struct search
*s
= container_of(cl
, struct search
, cl
);
921 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
923 bch_mark_cache_accounting(s
->iop
.c
, s
->d
,
924 !s
->cache_miss
, s
->iop
.bypass
);
925 trace_bcache_read(s
->orig_bio
, !s
->cache_miss
, s
->iop
.bypass
);
928 continue_at_nobarrier(cl
, cached_dev_read_error
, bcache_wq
);
929 else if (s
->iop
.bio
|| verify(dc
, &s
->bio
.bio
))
930 continue_at_nobarrier(cl
, cached_dev_read_done
, bcache_wq
);
932 continue_at_nobarrier(cl
, cached_dev_bio_complete
, NULL
);
935 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
936 struct bio
*bio
, unsigned sectors
)
938 int ret
= MAP_CONTINUE
;
940 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
941 struct bio
*miss
, *cache_bio
;
943 if (s
->cache_miss
|| s
->iop
.bypass
) {
944 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
945 ret
= miss
== bio
? MAP_DONE
: MAP_CONTINUE
;
949 if (!(bio
->bi_rw
& REQ_RAHEAD
) &&
950 !(bio
->bi_rw
& REQ_META
) &&
951 s
->iop
.c
->gc_stats
.in_use
< CUTOFF_CACHE_READA
)
952 reada
= min_t(sector_t
, dc
->readahead
>> 9,
953 bdev_sectors(bio
->bi_bdev
) - bio_end_sector(bio
));
955 s
->insert_bio_sectors
= min(sectors
, bio_sectors(bio
) + reada
);
957 s
->iop
.replace_key
= KEY(s
->iop
.inode
,
958 bio
->bi_sector
+ s
->insert_bio_sectors
,
959 s
->insert_bio_sectors
);
961 ret
= bch_btree_insert_check_key(b
, &s
->op
, &s
->iop
.replace_key
);
965 s
->iop
.replace
= true;
967 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
969 /* btree_search_recurse()'s btree iterator is no good anymore */
970 ret
= miss
== bio
? MAP_DONE
: -EINTR
;
972 cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
973 DIV_ROUND_UP(s
->insert_bio_sectors
, PAGE_SECTORS
),
978 cache_bio
->bi_sector
= miss
->bi_sector
;
979 cache_bio
->bi_bdev
= miss
->bi_bdev
;
980 cache_bio
->bi_size
= s
->insert_bio_sectors
<< 9;
982 cache_bio
->bi_end_io
= request_endio
;
983 cache_bio
->bi_private
= &s
->cl
;
985 bch_bio_map(cache_bio
, NULL
);
986 if (bio_alloc_pages(cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
990 bch_mark_cache_readahead(s
->iop
.c
, s
->d
);
992 s
->cache_miss
= miss
;
993 s
->iop
.bio
= cache_bio
;
995 closure_bio_submit(cache_bio
, &s
->cl
, s
->d
);
1001 miss
->bi_end_io
= request_endio
;
1002 miss
->bi_private
= &s
->cl
;
1003 closure_bio_submit(miss
, &s
->cl
, s
->d
);
1007 static void cached_dev_read(struct cached_dev
*dc
, struct search
*s
)
1009 struct closure
*cl
= &s
->cl
;
1011 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
1012 continue_at(cl
, cached_dev_read_done_bh
, NULL
);
1015 /* Process writes */
1017 static void cached_dev_write_complete(struct closure
*cl
)
1019 struct search
*s
= container_of(cl
, struct search
, cl
);
1020 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1022 up_read_non_owner(&dc
->writeback_lock
);
1023 cached_dev_bio_complete(cl
);
1026 static void cached_dev_write(struct cached_dev
*dc
, struct search
*s
)
1028 struct closure
*cl
= &s
->cl
;
1029 struct bio
*bio
= &s
->bio
.bio
;
1030 struct bkey start
= KEY(dc
->disk
.id
, bio
->bi_sector
, 0);
1031 struct bkey end
= KEY(dc
->disk
.id
, bio_end_sector(bio
), 0);
1033 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
, &start
, &end
);
1035 down_read_non_owner(&dc
->writeback_lock
);
1036 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
1038 * We overlap with some dirty data undergoing background
1039 * writeback, force this write to writeback
1041 s
->iop
.bypass
= false;
1042 s
->iop
.writeback
= true;
1046 * Discards aren't _required_ to do anything, so skipping if
1047 * check_overlapping returned true is ok
1049 * But check_overlapping drops dirty keys for which io hasn't started,
1050 * so we still want to call it.
1052 if (bio
->bi_rw
& REQ_DISCARD
)
1053 s
->iop
.bypass
= true;
1055 if (should_writeback(dc
, s
->orig_bio
,
1056 cache_mode(dc
, bio
),
1058 s
->iop
.bypass
= false;
1059 s
->iop
.writeback
= true;
1062 if (s
->iop
.bypass
) {
1063 s
->iop
.bio
= s
->orig_bio
;
1064 bio_get(s
->iop
.bio
);
1066 if (!(bio
->bi_rw
& REQ_DISCARD
) ||
1067 blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1068 closure_bio_submit(bio
, cl
, s
->d
);
1069 } else if (s
->iop
.writeback
) {
1070 bch_writeback_add(dc
);
1073 if (bio
->bi_rw
& REQ_FLUSH
) {
1074 /* Also need to send a flush to the backing device */
1075 struct bio
*flush
= bio_alloc_bioset(GFP_NOIO
, 0,
1076 dc
->disk
.bio_split
);
1078 flush
->bi_rw
= WRITE_FLUSH
;
1079 flush
->bi_bdev
= bio
->bi_bdev
;
1080 flush
->bi_end_io
= request_endio
;
1081 flush
->bi_private
= cl
;
1083 closure_bio_submit(flush
, cl
, s
->d
);
1086 s
->iop
.bio
= bio_clone_bioset(bio
, GFP_NOIO
,
1087 dc
->disk
.bio_split
);
1089 closure_bio_submit(bio
, cl
, s
->d
);
1092 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
1093 continue_at(cl
, cached_dev_write_complete
, NULL
);
1096 static void cached_dev_nodata(struct closure
*cl
)
1098 struct search
*s
= container_of(cl
, struct search
, cl
);
1099 struct bio
*bio
= &s
->bio
.bio
;
1101 if (s
->iop
.flush_journal
)
1102 bch_journal_meta(s
->iop
.c
, cl
);
1104 /* If it's a flush, we send the flush to the backing device too */
1105 closure_bio_submit(bio
, cl
, s
->d
);
1107 continue_at(cl
, cached_dev_bio_complete
, NULL
);
1110 /* Cached devices - read & write stuff */
1112 static void cached_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1115 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1116 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1117 int cpu
, rw
= bio_data_dir(bio
);
1119 cpu
= part_stat_lock();
1120 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1121 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1124 bio
->bi_bdev
= dc
->bdev
;
1125 bio
->bi_sector
+= dc
->sb
.data_offset
;
1127 if (cached_dev_get(dc
)) {
1128 s
= search_alloc(bio
, d
);
1129 trace_bcache_request_start(s
->d
, bio
);
1131 if (!bio
->bi_size
) {
1133 * can't call bch_journal_meta from under
1134 * generic_make_request
1136 continue_at_nobarrier(&s
->cl
,
1140 s
->iop
.bypass
= check_should_bypass(dc
, bio
);
1143 cached_dev_write(dc
, s
);
1145 cached_dev_read(dc
, s
);
1148 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1149 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1152 bch_generic_make_request(bio
, &d
->bio_split_hook
);
1156 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1157 unsigned int cmd
, unsigned long arg
)
1159 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1160 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1163 static int cached_dev_congested(void *data
, int bits
)
1165 struct bcache_device
*d
= data
;
1166 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1167 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1170 if (bdi_congested(&q
->backing_dev_info
, bits
))
1173 if (cached_dev_get(dc
)) {
1177 for_each_cache(ca
, d
->c
, i
) {
1178 q
= bdev_get_queue(ca
->bdev
);
1179 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1188 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1190 struct gendisk
*g
= dc
->disk
.disk
;
1192 g
->queue
->make_request_fn
= cached_dev_make_request
;
1193 g
->queue
->backing_dev_info
.congested_fn
= cached_dev_congested
;
1194 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1195 dc
->disk
.ioctl
= cached_dev_ioctl
;
1198 /* Flash backed devices */
1200 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1201 struct bio
*bio
, unsigned sectors
)
1208 bio_for_each_segment(bv
, bio
, i
) {
1209 unsigned j
= min(bv
->bv_len
>> 9, sectors
);
1211 void *p
= kmap(bv
->bv_page
);
1212 memset(p
+ bv
->bv_offset
, 0, j
<< 9);
1213 kunmap(bv
->bv_page
);
1218 bio_advance(bio
, min(sectors
<< 9, bio
->bi_size
));
1223 return MAP_CONTINUE
;
1226 static void flash_dev_nodata(struct closure
*cl
)
1228 struct search
*s
= container_of(cl
, struct search
, cl
);
1230 if (s
->iop
.flush_journal
)
1231 bch_journal_meta(s
->iop
.c
, cl
);
1233 continue_at(cl
, search_free
, NULL
);
1236 static void flash_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1240 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1241 int cpu
, rw
= bio_data_dir(bio
);
1243 cpu
= part_stat_lock();
1244 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1245 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1248 s
= search_alloc(bio
, d
);
1252 trace_bcache_request_start(s
->d
, bio
);
1254 if (!bio
->bi_size
) {
1256 * can't call bch_journal_meta from under
1257 * generic_make_request
1259 continue_at_nobarrier(&s
->cl
,
1263 bch_keybuf_check_overlapping(&s
->iop
.c
->moving_gc_keys
,
1264 &KEY(d
->id
, bio
->bi_sector
, 0),
1265 &KEY(d
->id
, bio_end_sector(bio
), 0));
1267 s
->iop
.bypass
= (bio
->bi_rw
& REQ_DISCARD
) != 0;
1268 s
->iop
.writeback
= true;
1271 closure_call(&s
->iop
.cl
, bch_data_insert
, NULL
, cl
);
1273 closure_call(&s
->iop
.cl
, cache_lookup
, NULL
, cl
);
1276 continue_at(cl
, search_free
, NULL
);
1279 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1280 unsigned int cmd
, unsigned long arg
)
1285 static int flash_dev_congested(void *data
, int bits
)
1287 struct bcache_device
*d
= data
;
1288 struct request_queue
*q
;
1293 for_each_cache(ca
, d
->c
, i
) {
1294 q
= bdev_get_queue(ca
->bdev
);
1295 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1301 void bch_flash_dev_request_init(struct bcache_device
*d
)
1303 struct gendisk
*g
= d
->disk
;
1305 g
->queue
->make_request_fn
= flash_dev_make_request
;
1306 g
->queue
->backing_dev_info
.congested_fn
= flash_dev_congested
;
1307 d
->cache_miss
= flash_dev_cache_miss
;
1308 d
->ioctl
= flash_dev_ioctl
;
1311 void bch_request_exit(void)
1313 #ifdef CONFIG_CGROUP_BCACHE
1314 cgroup_unload_subsys(&bcache_subsys
);
1316 if (bch_search_cache
)
1317 kmem_cache_destroy(bch_search_cache
);
1320 int __init
bch_request_init(void)
1322 bch_search_cache
= KMEM_CACHE(search
, 0);
1323 if (!bch_search_cache
)
1326 #ifdef CONFIG_CGROUP_BCACHE
1327 cgroup_load_subsys(&bcache_subsys
);
1328 init_bch_cgroup(&bcache_default_cgroup
);
1330 cgroup_add_cftypes(&bcache_subsys
, bch_files
);