2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
15 * Journal replay/recovery:
17 * This code is all driven from run_cache_set(); we first read the journal
18 * entries, do some other stuff, then we mark all the keys in the journal
19 * entries (same as garbage collection would), then we replay them - reinserting
20 * them into the cache in precisely the same order as they appear in the
23 * We only journal keys that go in leaf nodes, which simplifies things quite a
27 static void journal_read_endio(struct bio
*bio
, int error
)
29 struct closure
*cl
= bio
->bi_private
;
33 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
34 struct btree_op
*op
, unsigned bucket_index
)
36 struct journal_device
*ja
= &ca
->journal
;
37 struct bio
*bio
= &ja
->bio
;
39 struct journal_replay
*i
;
40 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
41 unsigned len
, left
, offset
= 0;
43 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
45 pr_debug("reading %llu", (uint64_t) bucket
);
47 while (offset
< ca
->sb
.bucket_size
) {
48 reread
: left
= ca
->sb
.bucket_size
- offset
;
49 len
= min_t(unsigned, left
, PAGE_SECTORS
* 8);
52 bio
->bi_sector
= bucket
+ offset
;
53 bio
->bi_bdev
= ca
->bdev
;
55 bio
->bi_size
= len
<< 9;
57 bio
->bi_end_io
= journal_read_endio
;
58 bio
->bi_private
= &op
->cl
;
59 bch_bio_map(bio
, data
);
61 closure_bio_submit(bio
, &op
->cl
, ca
);
62 closure_sync(&op
->cl
);
64 /* This function could be simpler now since we no longer write
65 * journal entries that overlap bucket boundaries; this means
66 * the start of a bucket will always have a valid journal entry
67 * if it has any journal entries at all.
72 struct list_head
*where
;
73 size_t blocks
, bytes
= set_bytes(j
);
75 if (j
->magic
!= jset_magic(ca
->set
))
78 if (bytes
> left
<< 9)
84 if (j
->csum
!= csum_set(j
))
87 blocks
= set_blocks(j
, ca
->set
);
89 while (!list_empty(list
)) {
90 i
= list_first_entry(list
,
91 struct journal_replay
, list
);
92 if (i
->j
.seq
>= j
->last_seq
)
98 list_for_each_entry_reverse(i
, list
, list
) {
99 if (j
->seq
== i
->j
.seq
)
102 if (j
->seq
< i
->j
.last_seq
)
105 if (j
->seq
> i
->j
.seq
) {
113 i
= kmalloc(offsetof(struct journal_replay
, j
) +
117 memcpy(&i
->j
, j
, bytes
);
118 list_add(&i
->list
, where
);
121 ja
->seq
[bucket_index
] = j
->seq
;
123 offset
+= blocks
* ca
->sb
.block_size
;
124 len
-= blocks
* ca
->sb
.block_size
;
125 j
= ((void *) j
) + blocks
* block_bytes(ca
);
132 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
,
135 #define read_bucket(b) \
137 int ret = journal_read_bucket(ca, list, op, b); \
138 __set_bit(b, bitmap); \
147 for_each_cache(ca
, c
, iter
) {
148 struct journal_device
*ja
= &ca
->journal
;
149 unsigned long bitmap
[SB_JOURNAL_BUCKETS
/ BITS_PER_LONG
];
153 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
154 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
157 * Read journal buckets ordered by golden ratio hash to quickly
158 * find a sequence of buckets with valid journal entries
160 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
161 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
163 if (test_bit(l
, bitmap
))
171 * If that fails, check all the buckets we haven't checked
174 pr_debug("falling back to linear search");
176 for (l
= find_first_zero_bit(bitmap
, ca
->sb
.njournal_buckets
);
177 l
< ca
->sb
.njournal_buckets
;
178 l
= find_next_zero_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1))
182 if (list_empty(list
))
186 m
= r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
187 pr_debug("starting binary search, l %u r %u", l
, r
);
190 seq
= list_entry(list
->prev
, struct journal_replay
,
196 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
204 * Read buckets in reverse order until we stop finding more
207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m
, ca
->sb
.njournal_buckets
);
213 l
= ca
->sb
.njournal_buckets
- 1;
218 if (test_bit(l
, bitmap
))
227 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
228 if (ja
->seq
[i
] > seq
) {
230 ja
->cur_idx
= ja
->discard_idx
=
236 if (!list_empty(list
))
237 c
->journal
.seq
= list_entry(list
->prev
,
238 struct journal_replay
,
245 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
249 struct journal_replay
*i
;
250 struct journal
*j
= &c
->journal
;
251 uint64_t last
= j
->seq
;
254 * journal.pin should never fill up - we never write a journal
255 * entry when it would fill up. But if for some reason it does, we
256 * iterate over the list in reverse order so that we can just skip that
257 * refcount instead of bugging.
260 list_for_each_entry_reverse(i
, list
, list
) {
261 BUG_ON(last
< i
->j
.seq
);
264 while (last
-- != i
->j
.seq
)
265 if (fifo_free(&j
->pin
) > 1) {
266 fifo_push_front(&j
->pin
, p
);
267 atomic_set(&fifo_front(&j
->pin
), 0);
270 if (fifo_free(&j
->pin
) > 1) {
271 fifo_push_front(&j
->pin
, p
);
272 i
->pin
= &fifo_front(&j
->pin
);
273 atomic_set(i
->pin
, 1);
281 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
282 struct bucket
*g
= PTR_BUCKET(c
, k
, j
);
285 if (g
->prio
== BTREE_PRIO
&&
287 g
->prio
= INITIAL_PRIO
;
290 __bch_btree_mark_key(c
, 0, k
);
295 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
,
298 int ret
= 0, keys
= 0, entries
= 0;
300 struct journal_replay
*i
=
301 list_entry(list
->prev
, struct journal_replay
, list
);
303 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
304 struct keylist keylist
;
306 bch_keylist_init(&keylist
);
308 list_for_each_entry(i
, list
, list
) {
309 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
311 cache_set_err_on(n
!= i
->j
.seq
, s
,
312 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
313 n
, i
->j
.seq
- 1, start
, end
);
318 trace_bcache_journal_replay_key(k
);
320 bkey_copy(keylist
.top
, k
);
321 bch_keylist_push(&keylist
);
323 op
->journal
= i
->pin
;
325 ret
= bch_btree_insert(op
, s
, &keylist
);
329 BUG_ON(!bch_keylist_empty(&keylist
));
341 pr_info("journal replay done, %i keys in %i entries, seq %llu",
344 while (!list_empty(list
)) {
345 i
= list_first_entry(list
, struct journal_replay
, list
);
350 closure_sync(&op
->cl
);
356 static void btree_flush_write(struct cache_set
*c
)
359 * Try to find the btree node with that references the oldest journal
360 * entry, best is our current candidate and is locked if non NULL:
362 struct btree
*b
, *best
;
367 for_each_cached_btree(b
, c
, i
)
368 if (btree_current_write(b
)->journal
) {
371 else if (journal_pin_cmp(c
,
372 btree_current_write(best
),
373 btree_current_write(b
))) {
380 rw_lock(true, b
, b
->level
);
382 if (!btree_current_write(b
)->journal
) {
388 bch_btree_node_write(b
, NULL
);
393 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
395 static void journal_discard_endio(struct bio
*bio
, int error
)
397 struct journal_device
*ja
=
398 container_of(bio
, struct journal_device
, discard_bio
);
399 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
401 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
403 closure_wake_up(&ca
->set
->journal
.wait
);
404 closure_put(&ca
->set
->cl
);
407 static void journal_discard_work(struct work_struct
*work
)
409 struct journal_device
*ja
=
410 container_of(work
, struct journal_device
, discard_work
);
412 submit_bio(0, &ja
->discard_bio
);
415 static void do_journal_discard(struct cache
*ca
)
417 struct journal_device
*ja
= &ca
->journal
;
418 struct bio
*bio
= &ja
->discard_bio
;
421 ja
->discard_idx
= ja
->last_idx
;
425 switch (atomic_read(&ja
->discard_in_flight
)) {
426 case DISCARD_IN_FLIGHT
:
430 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
431 ca
->sb
.njournal_buckets
;
433 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
437 if (ja
->discard_idx
== ja
->last_idx
)
440 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
443 bio
->bi_sector
= bucket_to_sector(ca
->set
,
444 ca
->sb
.d
[ja
->discard_idx
]);
445 bio
->bi_bdev
= ca
->bdev
;
446 bio
->bi_rw
= REQ_WRITE
|REQ_DISCARD
;
447 bio
->bi_max_vecs
= 1;
448 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
449 bio
->bi_size
= bucket_bytes(ca
);
450 bio
->bi_end_io
= journal_discard_endio
;
452 closure_get(&ca
->set
->cl
);
453 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
454 schedule_work(&ja
->discard_work
);
458 static void journal_reclaim(struct cache_set
*c
)
460 struct bkey
*k
= &c
->journal
.key
;
463 unsigned iter
, n
= 0;
466 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
467 fifo_pop(&c
->journal
.pin
, p
);
469 last_seq
= last_seq(&c
->journal
);
471 /* Update last_idx */
473 for_each_cache(ca
, c
, iter
) {
474 struct journal_device
*ja
= &ca
->journal
;
476 while (ja
->last_idx
!= ja
->cur_idx
&&
477 ja
->seq
[ja
->last_idx
] < last_seq
)
478 ja
->last_idx
= (ja
->last_idx
+ 1) %
479 ca
->sb
.njournal_buckets
;
482 for_each_cache(ca
, c
, iter
)
483 do_journal_discard(ca
);
485 if (c
->journal
.blocks_free
)
490 * XXX: Sort by free journal space
493 for_each_cache(ca
, c
, iter
) {
494 struct journal_device
*ja
= &ca
->journal
;
495 unsigned next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
497 /* No space available on this device */
498 if (next
== ja
->discard_idx
)
503 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
511 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
513 if (!journal_full(&c
->journal
))
514 __closure_wake_up(&c
->journal
.wait
);
517 void bch_journal_next(struct journal
*j
)
521 j
->cur
= (j
->cur
== j
->w
)
526 * The fifo_push() needs to happen at the same time as j->seq is
527 * incremented for last_seq() to be calculated correctly
529 BUG_ON(!fifo_push(&j
->pin
, p
));
530 atomic_set(&fifo_back(&j
->pin
), 1);
532 j
->cur
->data
->seq
= ++j
->seq
;
533 j
->cur
->need_write
= false;
534 j
->cur
->data
->keys
= 0;
536 if (fifo_full(&j
->pin
))
537 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
540 static void journal_write_endio(struct bio
*bio
, int error
)
542 struct journal_write
*w
= bio
->bi_private
;
544 cache_set_err_on(error
, w
->c
, "journal io error");
545 closure_put(&w
->c
->journal
.io
);
548 static void journal_write(struct closure
*);
550 static void journal_write_done(struct closure
*cl
)
552 struct journal
*j
= container_of(cl
, struct journal
, io
);
553 struct journal_write
*w
= (j
->cur
== j
->w
)
557 __closure_wake_up(&w
->wait
);
558 continue_at_nobarrier(cl
, journal_write
, system_wq
);
561 static void journal_write_unlocked(struct closure
*cl
)
562 __releases(c
->journal
.lock
)
564 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
566 struct journal_write
*w
= c
->journal
.cur
;
567 struct bkey
*k
= &c
->journal
.key
;
568 unsigned i
, sectors
= set_blocks(w
->data
, c
) * c
->sb
.block_size
;
571 struct bio_list list
;
572 bio_list_init(&list
);
574 if (!w
->need_write
) {
576 * XXX: have to unlock closure before we unlock journal lock,
577 * else we race with bch_journal(). But this way we race
578 * against cache set unregister. Doh.
580 set_closure_fn(cl
, NULL
, NULL
);
581 closure_sub(cl
, CLOSURE_RUNNING
+ 1);
582 spin_unlock(&c
->journal
.lock
);
584 } else if (journal_full(&c
->journal
)) {
586 spin_unlock(&c
->journal
.lock
);
588 btree_flush_write(c
);
589 continue_at(cl
, journal_write
, system_wq
);
592 c
->journal
.blocks_free
-= set_blocks(w
->data
, c
);
594 w
->data
->btree_level
= c
->root
->level
;
596 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
597 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
599 for_each_cache(ca
, c
, i
)
600 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
602 w
->data
->magic
= jset_magic(c
);
603 w
->data
->version
= BCACHE_JSET_VERSION
;
604 w
->data
->last_seq
= last_seq(&c
->journal
);
605 w
->data
->csum
= csum_set(w
->data
);
607 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
608 ca
= PTR_CACHE(c
, k
, i
);
609 bio
= &ca
->journal
.bio
;
611 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
614 bio
->bi_sector
= PTR_OFFSET(k
, i
);
615 bio
->bi_bdev
= ca
->bdev
;
616 bio
->bi_rw
= REQ_WRITE
|REQ_SYNC
|REQ_META
|REQ_FLUSH
|REQ_FUA
;
617 bio
->bi_size
= sectors
<< 9;
619 bio
->bi_end_io
= journal_write_endio
;
621 bch_bio_map(bio
, w
->data
);
623 trace_bcache_journal_write(bio
);
624 bio_list_add(&list
, bio
);
626 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
628 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
631 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
632 bch_journal_next(&c
->journal
);
635 spin_unlock(&c
->journal
.lock
);
637 while ((bio
= bio_list_pop(&list
)))
638 closure_bio_submit(bio
, cl
, c
->cache
[0]);
640 continue_at(cl
, journal_write_done
, NULL
);
643 static void journal_write(struct closure
*cl
)
645 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
647 spin_lock(&c
->journal
.lock
);
648 journal_write_unlocked(cl
);
651 static void journal_try_write(struct cache_set
*c
)
652 __releases(c
->journal
.lock
)
654 struct closure
*cl
= &c
->journal
.io
;
655 struct journal_write
*w
= c
->journal
.cur
;
657 w
->need_write
= true;
659 if (closure_trylock(cl
, &c
->cl
))
660 journal_write_unlocked(cl
);
662 spin_unlock(&c
->journal
.lock
);
665 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
671 closure_init_stack(&cl
);
673 spin_lock(&c
->journal
.lock
);
676 struct journal_write
*w
= c
->journal
.cur
;
678 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
679 c
) * c
->sb
.block_size
;
681 if (sectors
<= min_t(size_t,
682 c
->journal
.blocks_free
* c
->sb
.block_size
,
683 PAGE_SECTORS
<< JSET_BITS
))
686 /* XXX: tracepoint */
687 if (!journal_full(&c
->journal
)) {
688 trace_bcache_journal_entry_full(c
);
691 * XXX: If we were inserting so many keys that they
692 * won't fit in an _empty_ journal write, we'll
693 * deadlock. For now, handle this in
694 * bch_keylist_realloc() - but something to think about.
696 BUG_ON(!w
->data
->keys
);
698 closure_wait(&w
->wait
, &cl
);
699 journal_try_write(c
); /* unlocks */
701 trace_bcache_journal_full(c
);
703 closure_wait(&c
->journal
.wait
, &cl
);
705 spin_unlock(&c
->journal
.lock
);
707 btree_flush_write(c
);
711 spin_lock(&c
->journal
.lock
);
715 static void journal_write_work(struct work_struct
*work
)
717 struct cache_set
*c
= container_of(to_delayed_work(work
),
720 spin_lock(&c
->journal
.lock
);
721 journal_try_write(c
);
725 * Entry point to the journalling code - bio_insert() and btree_invalidate()
726 * pass bch_journal() a list of keys to be journalled, and then
727 * bch_journal() hands those same keys off to btree_insert_async()
730 atomic_t
*bch_journal(struct cache_set
*c
,
731 struct keylist
*keys
,
732 struct closure
*parent
)
734 struct journal_write
*w
;
737 if (!CACHE_SYNC(&c
->sb
))
740 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
742 memcpy(end(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
743 w
->data
->keys
+= bch_keylist_nkeys(keys
);
745 ret
= &fifo_back(&c
->journal
.pin
);
749 closure_wait(&w
->wait
, parent
);
750 journal_try_write(c
);
751 } else if (!w
->need_write
) {
752 schedule_delayed_work(&c
->journal
.work
,
753 msecs_to_jiffies(c
->journal_delay_ms
));
754 spin_unlock(&c
->journal
.lock
);
756 spin_unlock(&c
->journal
.lock
);
763 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
768 bch_keylist_init(&keys
);
770 ref
= bch_journal(c
, &keys
, cl
);
775 void bch_journal_free(struct cache_set
*c
)
777 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
778 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
779 free_fifo(&c
->journal
.pin
);
782 int bch_journal_alloc(struct cache_set
*c
)
784 struct journal
*j
= &c
->journal
;
786 closure_init_unlocked(&j
->io
);
787 spin_lock_init(&j
->lock
);
788 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
790 c
->journal_delay_ms
= 100;
795 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
796 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
797 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))
This page took 0.04639 seconds and 6 git commands to generate.