Merge tag 'for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[deliverable/linux.git] / drivers / md / bcache / btree.c
CommitLineData
cafe5635
KO
1/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
65d45231 26#include "extents.h"
cafe5635
KO
27
28#include <linux/slab.h>
29#include <linux/bitops.h>
72a44517 30#include <linux/freezer.h>
cafe5635 31#include <linux/hash.h>
72a44517 32#include <linux/kthread.h>
cd953ed0 33#include <linux/prefetch.h>
cafe5635
KO
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
cafe5635
KO
71 * Plugging?
72 *
73 * If data write is less than hard sector size of ssd, round up offset in open
74 * bucket to the next whole sector
75 *
cafe5635
KO
76 * Superblock needs to be fleshed out for multiple cache devices
77 *
78 * Add a sysfs tunable for the number of writeback IOs in flight
79 *
80 * Add a sysfs tunable for the number of open data buckets
81 *
82 * IO tracking: Can we track when one process is doing io on behalf of another?
83 * IO tracking: Don't use just an average, weigh more recent stuff higher
84 *
85 * Test module load/unload
86 */
87
cafe5635
KO
88#define MAX_NEED_GC 64
89#define MAX_SAVE_PRIO 72
90
91#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
92
93#define PTR_HASH(c, k) \
94 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
95
df8e8970
KO
96#define insert_lock(s, b) ((b)->level <= (s)->lock)
97
98/*
99 * These macros are for recursing down the btree - they handle the details of
100 * locking and looking up nodes in the cache for you. They're best treated as
101 * mere syntax when reading code that uses them.
102 *
103 * op->lock determines whether we take a read or a write lock at a given depth.
104 * If you've got a read lock and find that you need a write lock (i.e. you're
105 * going to have to split), set op->lock and return -EINTR; btree_root() will
106 * call you again and you'll have the correct lock.
107 */
108
109/**
110 * btree - recurse down the btree on a specified key
111 * @fn: function to call, which will be passed the child node
112 * @key: key to recurse on
113 * @b: parent btree node
114 * @op: pointer to struct btree_op
115 */
116#define btree(fn, key, b, op, ...) \
117({ \
118 int _r, l = (b)->level - 1; \
119 bool _w = l <= (op)->lock; \
2452cc89
SP
120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
121 _w, b); \
df8e8970 122 if (!IS_ERR(_child)) { \
df8e8970
KO
123 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
124 rw_unlock(_w, _child); \
125 } else \
126 _r = PTR_ERR(_child); \
127 _r; \
128})
129
130/**
131 * btree_root - call a function on the root of the btree
132 * @fn: function to call, which will be passed the child node
133 * @c: cache set
134 * @op: pointer to struct btree_op
135 */
136#define btree_root(fn, c, op, ...) \
137({ \
138 int _r = -EINTR; \
139 do { \
140 struct btree *_b = (c)->root; \
141 bool _w = insert_lock(op, _b); \
142 rw_lock(_w, _b, _b->level); \
143 if (_b == (c)->root && \
144 _w == insert_lock(op, _b)) { \
df8e8970
KO
145 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
146 } \
147 rw_unlock(_w, _b); \
0a63b66d 148 bch_cannibalize_unlock(c); \
78365411
KO
149 if (_r == -EINTR) \
150 schedule(); \
df8e8970
KO
151 } while (_r == -EINTR); \
152 \
0a63b66d 153 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
df8e8970
KO
154 _r; \
155})
156
a85e968e
KO
157static inline struct bset *write_block(struct btree *b)
158{
159 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
160}
161
2a285686
KO
162static void bch_btree_init_next(struct btree *b)
163{
164 /* If not a leaf node, always sort */
165 if (b->level && b->keys.nsets)
166 bch_btree_sort(&b->keys, &b->c->sort);
167 else
168 bch_btree_sort_lazy(&b->keys, &b->c->sort);
169
170 if (b->written < btree_blocks(b))
171 bch_bset_init_next(&b->keys, write_block(b),
172 bset_magic(&b->c->sb));
173
174}
175
cafe5635
KO
176/* Btree key manipulation */
177
3a3b6a4e 178void bkey_put(struct cache_set *c, struct bkey *k)
e7c590eb
KO
179{
180 unsigned i;
181
182 for (i = 0; i < KEY_PTRS(k); i++)
183 if (ptr_available(c, k, i))
184 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
185}
186
cafe5635
KO
187/* Btree IO */
188
189static uint64_t btree_csum_set(struct btree *b, struct bset *i)
190{
191 uint64_t crc = b->key.ptr[0];
fafff81c 192 void *data = (void *) i + 8, *end = bset_bkey_last(i);
cafe5635 193
169ef1cf 194 crc = bch_crc64_update(crc, data, end - data);
c19ed23a 195 return crc ^ 0xffffffffffffffffULL;
cafe5635
KO
196}
197
78b77bf8 198void bch_btree_node_read_done(struct btree *b)
cafe5635 199{
cafe5635 200 const char *err = "bad btree header";
ee811287 201 struct bset *i = btree_bset_first(b);
57943511 202 struct btree_iter *iter;
cafe5635 203
bcf090e0 204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
57943511 205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
cafe5635
KO
206 iter->used = 0;
207
280481d0 208#ifdef CONFIG_BCACHE_DEBUG
c052dd9a 209 iter->b = &b->keys;
280481d0
KO
210#endif
211
57943511 212 if (!i->seq)
cafe5635
KO
213 goto err;
214
215 for (;
a85e968e 216 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
cafe5635
KO
217 i = write_block(b)) {
218 err = "unsupported bset version";
219 if (i->version > BCACHE_BSET_VERSION)
220 goto err;
221
222 err = "bad btree header";
ee811287
KO
223 if (b->written + set_blocks(i, block_bytes(b->c)) >
224 btree_blocks(b))
cafe5635
KO
225 goto err;
226
227 err = "bad magic";
81ab4190 228 if (i->magic != bset_magic(&b->c->sb))
cafe5635
KO
229 goto err;
230
231 err = "bad checksum";
232 switch (i->version) {
233 case 0:
234 if (i->csum != csum_set(i))
235 goto err;
236 break;
237 case BCACHE_BSET_VERSION:
238 if (i->csum != btree_csum_set(b, i))
239 goto err;
240 break;
241 }
242
243 err = "empty set";
a85e968e 244 if (i != b->keys.set[0].data && !i->keys)
cafe5635
KO
245 goto err;
246
fafff81c 247 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
cafe5635 248
ee811287 249 b->written += set_blocks(i, block_bytes(b->c));
cafe5635
KO
250 }
251
252 err = "corrupted btree";
253 for (i = write_block(b);
a85e968e 254 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
cafe5635 255 i = ((void *) i) + block_bytes(b->c))
a85e968e 256 if (i->seq == b->keys.set[0].data->seq)
cafe5635
KO
257 goto err;
258
a85e968e 259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
cafe5635 260
a85e968e 261 i = b->keys.set[0].data;
cafe5635 262 err = "short btree key";
a85e968e
KO
263 if (b->keys.set[0].size &&
264 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
cafe5635
KO
265 goto err;
266
267 if (b->written < btree_blocks(b))
a85e968e
KO
268 bch_bset_init_next(&b->keys, write_block(b),
269 bset_magic(&b->c->sb));
cafe5635 270out:
57943511
KO
271 mempool_free(iter, b->c->fill_iter);
272 return;
cafe5635
KO
273err:
274 set_btree_node_io_error(b);
88b9f8c4 275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
cafe5635 276 err, PTR_BUCKET_NR(b->c, &b->key, 0),
88b9f8c4 277 bset_block_offset(b, i), i->keys);
cafe5635
KO
278 goto out;
279}
280
57943511 281static void btree_node_read_endio(struct bio *bio, int error)
cafe5635 282{
57943511
KO
283 struct closure *cl = bio->bi_private;
284 closure_put(cl);
285}
cafe5635 286
78b77bf8 287static void bch_btree_node_read(struct btree *b)
57943511
KO
288{
289 uint64_t start_time = local_clock();
290 struct closure cl;
291 struct bio *bio;
cafe5635 292
c37511b8
KO
293 trace_bcache_btree_read(b);
294
57943511 295 closure_init_stack(&cl);
cafe5635 296
57943511
KO
297 bio = bch_bbio_alloc(b->c);
298 bio->bi_rw = REQ_META|READ_SYNC;
4f024f37 299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
57943511
KO
300 bio->bi_end_io = btree_node_read_endio;
301 bio->bi_private = &cl;
cafe5635 302
a85e968e 303 bch_bio_map(bio, b->keys.set[0].data);
cafe5635 304
57943511
KO
305 bch_submit_bbio(bio, b->c, &b->key, 0);
306 closure_sync(&cl);
cafe5635 307
57943511
KO
308 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
309 set_btree_node_io_error(b);
310
311 bch_bbio_free(bio, b->c);
312
313 if (btree_node_io_error(b))
314 goto err;
315
316 bch_btree_node_read_done(b);
57943511 317 bch_time_stats_update(&b->c->btree_read_time, start_time);
57943511
KO
318
319 return;
320err:
61cbd250 321 bch_cache_set_error(b->c, "io error reading bucket %zu",
57943511 322 PTR_BUCKET_NR(b->c, &b->key, 0));
cafe5635
KO
323}
324
325static void btree_complete_write(struct btree *b, struct btree_write *w)
326{
327 if (w->prio_blocked &&
328 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
119ba0f8 329 wake_up_allocators(b->c);
cafe5635
KO
330
331 if (w->journal) {
332 atomic_dec_bug(w->journal);
333 __closure_wake_up(&b->c->journal.wait);
334 }
335
cafe5635
KO
336 w->prio_blocked = 0;
337 w->journal = NULL;
cafe5635
KO
338}
339
cb7a583e
KO
340static void btree_node_write_unlock(struct closure *cl)
341{
342 struct btree *b = container_of(cl, struct btree, io);
343
344 up(&b->io_mutex);
345}
346
57943511 347static void __btree_node_write_done(struct closure *cl)
cafe5635 348{
cb7a583e 349 struct btree *b = container_of(cl, struct btree, io);
cafe5635
KO
350 struct btree_write *w = btree_prev_write(b);
351
352 bch_bbio_free(b->bio, b->c);
353 b->bio = NULL;
354 btree_complete_write(b, w);
355
356 if (btree_node_dirty(b))
56b30770 357 schedule_delayed_work(&b->work, 30 * HZ);
cafe5635 358
cb7a583e 359 closure_return_with_destructor(cl, btree_node_write_unlock);
cafe5635
KO
360}
361
57943511 362static void btree_node_write_done(struct closure *cl)
cafe5635 363{
cb7a583e 364 struct btree *b = container_of(cl, struct btree, io);
cafe5635
KO
365 struct bio_vec *bv;
366 int n;
367
7988613b 368 bio_for_each_segment_all(bv, b->bio, n)
cafe5635
KO
369 __free_page(bv->bv_page);
370
57943511 371 __btree_node_write_done(cl);
cafe5635
KO
372}
373
57943511
KO
374static void btree_node_write_endio(struct bio *bio, int error)
375{
376 struct closure *cl = bio->bi_private;
cb7a583e 377 struct btree *b = container_of(cl, struct btree, io);
57943511
KO
378
379 if (error)
380 set_btree_node_io_error(b);
381
382 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
383 closure_put(cl);
384}
385
386static void do_btree_node_write(struct btree *b)
cafe5635 387{
cb7a583e 388 struct closure *cl = &b->io;
ee811287 389 struct bset *i = btree_bset_last(b);
cafe5635
KO
390 BKEY_PADDED(key) k;
391
392 i->version = BCACHE_BSET_VERSION;
393 i->csum = btree_csum_set(b, i);
394
57943511
KO
395 BUG_ON(b->bio);
396 b->bio = bch_bbio_alloc(b->c);
397
398 b->bio->bi_end_io = btree_node_write_endio;
faadf0c9 399 b->bio->bi_private = cl;
e49c7c37 400 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
ee811287 401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
169ef1cf 402 bch_bio_map(b->bio, i);
cafe5635 403
e49c7c37
KO
404 /*
405 * If we're appending to a leaf node, we don't technically need FUA -
406 * this write just needs to be persisted before the next journal write,
407 * which will be marked FLUSH|FUA.
408 *
409 * Similarly if we're writing a new btree root - the pointer is going to
410 * be in the next journal entry.
411 *
412 * But if we're writing a new btree node (that isn't a root) or
413 * appending to a non leaf btree node, we need either FUA or a flush
414 * when we write the parent with the new pointer. FUA is cheaper than a
415 * flush, and writes appending to leaf nodes aren't blocking anything so
416 * just make all btree node writes FUA to keep things sane.
417 */
418
cafe5635 419 bkey_copy(&k.key, &b->key);
ee811287 420 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
a85e968e 421 bset_sector_offset(&b->keys, i));
cafe5635 422
501d52a9 423 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
cafe5635
KO
424 int j;
425 struct bio_vec *bv;
426 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
427
7988613b 428 bio_for_each_segment_all(bv, b->bio, j)
cafe5635
KO
429 memcpy(page_address(bv->bv_page),
430 base + j * PAGE_SIZE, PAGE_SIZE);
431
cafe5635
KO
432 bch_submit_bbio(b->bio, b->c, &k.key, 0);
433
57943511 434 continue_at(cl, btree_node_write_done, NULL);
cafe5635
KO
435 } else {
436 b->bio->bi_vcnt = 0;
169ef1cf 437 bch_bio_map(b->bio, i);
cafe5635 438
cafe5635
KO
439 bch_submit_bbio(b->bio, b->c, &k.key, 0);
440
441 closure_sync(cl);
cb7a583e 442 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
cafe5635
KO
443 }
444}
445
2a285686 446void __bch_btree_node_write(struct btree *b, struct closure *parent)
cafe5635 447{
ee811287 448 struct bset *i = btree_bset_last(b);
cafe5635 449
2a285686
KO
450 lockdep_assert_held(&b->write_lock);
451
c37511b8
KO
452 trace_bcache_btree_write(b);
453
cafe5635 454 BUG_ON(current->bio_list);
57943511
KO
455 BUG_ON(b->written >= btree_blocks(b));
456 BUG_ON(b->written && !i->keys);
ee811287 457 BUG_ON(btree_bset_first(b)->seq != i->seq);
dc9d98d6 458 bch_check_keys(&b->keys, "writing");
cafe5635 459
cafe5635
KO
460 cancel_delayed_work(&b->work);
461
57943511 462 /* If caller isn't waiting for write, parent refcount is cache set */
cb7a583e
KO
463 down(&b->io_mutex);
464 closure_init(&b->io, parent ?: &b->c->cl);
57943511 465
cafe5635
KO
466 clear_bit(BTREE_NODE_dirty, &b->flags);
467 change_bit(BTREE_NODE_write_idx, &b->flags);
468
57943511 469 do_btree_node_write(b);
cafe5635 470
ee811287 471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
cafe5635
KO
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
473
a85e968e 474 b->written += set_blocks(i, block_bytes(b->c));
2a285686 475}
a85e968e 476
2a285686
KO
477void bch_btree_node_write(struct btree *b, struct closure *parent)
478{
479 unsigned nsets = b->keys.nsets;
480
481 lockdep_assert_held(&b->lock);
482
483 __bch_btree_node_write(b, parent);
cafe5635 484
78b77bf8
KO
485 /*
486 * do verify if there was more than one set initially (i.e. we did a
487 * sort) and we sorted down to a single set:
488 */
2a285686 489 if (nsets && !b->keys.nsets)
78b77bf8
KO
490 bch_btree_verify(b);
491
2a285686 492 bch_btree_init_next(b);
cafe5635
KO
493}
494
f269af5a
KO
495static void bch_btree_node_write_sync(struct btree *b)
496{
497 struct closure cl;
498
499 closure_init_stack(&cl);
2a285686
KO
500
501 mutex_lock(&b->write_lock);
f269af5a 502 bch_btree_node_write(b, &cl);
2a285686
KO
503 mutex_unlock(&b->write_lock);
504
f269af5a
KO
505 closure_sync(&cl);
506}
507
57943511 508static void btree_node_write_work(struct work_struct *w)
cafe5635
KO
509{
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
2a285686 512 mutex_lock(&b->write_lock);
cafe5635 513 if (btree_node_dirty(b))
2a285686
KO
514 __bch_btree_node_write(b, NULL);
515 mutex_unlock(&b->write_lock);
cafe5635
KO
516}
517
c18536a7 518static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
cafe5635 519{
ee811287 520 struct bset *i = btree_bset_last(b);
cafe5635
KO
521 struct btree_write *w = btree_current_write(b);
522
2a285686
KO
523 lockdep_assert_held(&b->write_lock);
524
57943511
KO
525 BUG_ON(!b->written);
526 BUG_ON(!i->keys);
cafe5635 527
57943511 528 if (!btree_node_dirty(b))
56b30770 529 schedule_delayed_work(&b->work, 30 * HZ);
cafe5635 530
57943511 531 set_btree_node_dirty(b);
cafe5635 532
c18536a7 533 if (journal_ref) {
cafe5635 534 if (w->journal &&
c18536a7 535 journal_pin_cmp(b->c, w->journal, journal_ref)) {
cafe5635
KO
536 atomic_dec_bug(w->journal);
537 w->journal = NULL;
538 }
539
540 if (!w->journal) {
c18536a7 541 w->journal = journal_ref;
cafe5635
KO
542 atomic_inc(w->journal);
543 }
544 }
545
cafe5635 546 /* Force write if set is too big */
57943511
KO
547 if (set_bytes(i) > PAGE_SIZE - 48 &&
548 !current->bio_list)
549 bch_btree_node_write(b, NULL);
cafe5635
KO
550}
551
552/*
553 * Btree in memory cache - allocation/freeing
554 * mca -> memory cache
555 */
556
cafe5635
KO
557#define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559#define mca_can_free(c) \
0a63b66d 560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
cafe5635
KO
561
562static void mca_data_free(struct btree *b)
563{
cb7a583e 564 BUG_ON(b->io_mutex.count != 1);
cafe5635 565
a85e968e 566 bch_btree_keys_free(&b->keys);
cafe5635 567
0a63b66d 568 b->c->btree_cache_used--;
ee811287 569 list_move(&b->list, &b->c->btree_cache_freed);
cafe5635
KO
570}
571
572static void mca_bucket_free(struct btree *b)
573{
574 BUG_ON(btree_node_dirty(b));
575
576 b->key.ptr[0] = 0;
577 hlist_del_init_rcu(&b->hash);
578 list_move(&b->list, &b->c->btree_cache_freeable);
579}
580
581static unsigned btree_order(struct bkey *k)
582{
583 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
584}
585
586static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
587{
a85e968e 588 if (!bch_btree_keys_alloc(&b->keys,
ee811287
KO
589 max_t(unsigned,
590 ilog2(b->c->btree_pages),
591 btree_order(k)),
592 gfp)) {
0a63b66d 593 b->c->btree_cache_used++;
ee811287
KO
594 list_move(&b->list, &b->c->btree_cache);
595 } else {
596 list_move(&b->list, &b->c->btree_cache_freed);
597 }
cafe5635
KO
598}
599
600static struct btree *mca_bucket_alloc(struct cache_set *c,
601 struct bkey *k, gfp_t gfp)
602{
603 struct btree *b = kzalloc(sizeof(struct btree), gfp);
604 if (!b)
605 return NULL;
606
607 init_rwsem(&b->lock);
608 lockdep_set_novalidate_class(&b->lock);
2a285686
KO
609 mutex_init(&b->write_lock);
610 lockdep_set_novalidate_class(&b->write_lock);
cafe5635 611 INIT_LIST_HEAD(&b->list);
57943511 612 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
cafe5635 613 b->c = c;
cb7a583e 614 sema_init(&b->io_mutex, 1);
cafe5635
KO
615
616 mca_data_alloc(b, k, gfp);
617 return b;
618}
619
e8e1d468 620static int mca_reap(struct btree *b, unsigned min_order, bool flush)
cafe5635 621{
e8e1d468
KO
622 struct closure cl;
623
624 closure_init_stack(&cl);
cafe5635
KO
625 lockdep_assert_held(&b->c->bucket_lock);
626
627 if (!down_write_trylock(&b->lock))
628 return -ENOMEM;
629
a85e968e 630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
e8e1d468 631
a85e968e 632 if (b->keys.page_order < min_order)
cb7a583e
KO
633 goto out_unlock;
634
635 if (!flush) {
636 if (btree_node_dirty(b))
637 goto out_unlock;
638
639 if (down_trylock(&b->io_mutex))
640 goto out_unlock;
641 up(&b->io_mutex);
cafe5635
KO
642 }
643
2a285686 644 mutex_lock(&b->write_lock);
f269af5a 645 if (btree_node_dirty(b))
2a285686
KO
646 __bch_btree_node_write(b, &cl);
647 mutex_unlock(&b->write_lock);
648
649 closure_sync(&cl);
cafe5635 650
e8e1d468 651 /* wait for any in flight btree write */
cb7a583e
KO
652 down(&b->io_mutex);
653 up(&b->io_mutex);
e8e1d468 654
cafe5635 655 return 0;
cb7a583e
KO
656out_unlock:
657 rw_unlock(true, b);
658 return -ENOMEM;
cafe5635
KO
659}
660
7dc19d5a
DC
661static unsigned long bch_mca_scan(struct shrinker *shrink,
662 struct shrink_control *sc)
cafe5635
KO
663{
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
665 struct btree *b, *t;
666 unsigned long i, nr = sc->nr_to_scan;
7dc19d5a 667 unsigned long freed = 0;
cafe5635
KO
668
669 if (c->shrinker_disabled)
7dc19d5a 670 return SHRINK_STOP;
cafe5635 671
0a63b66d 672 if (c->btree_cache_alloc_lock)
7dc19d5a 673 return SHRINK_STOP;
cafe5635
KO
674
675 /* Return -1 if we can't do anything right now */
a698e08c 676 if (sc->gfp_mask & __GFP_IO)
cafe5635
KO
677 mutex_lock(&c->bucket_lock);
678 else if (!mutex_trylock(&c->bucket_lock))
679 return -1;
680
36c9ea98
KO
681 /*
682 * It's _really_ critical that we don't free too many btree nodes - we
683 * have to always leave ourselves a reserve. The reserve is how we
684 * guarantee that allocating memory for a new btree node can always
685 * succeed, so that inserting keys into the btree can always succeed and
686 * IO can always make forward progress:
687 */
cafe5635
KO
688 nr /= c->btree_pages;
689 nr = min_t(unsigned long, nr, mca_can_free(c));
690
691 i = 0;
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
7dc19d5a 693 if (freed >= nr)
cafe5635
KO
694 break;
695
696 if (++i > 3 &&
e8e1d468 697 !mca_reap(b, 0, false)) {
cafe5635
KO
698 mca_data_free(b);
699 rw_unlock(true, b);
7dc19d5a 700 freed++;
cafe5635
KO
701 }
702 }
703
0a63b66d 704 for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
b0f32a56
KO
705 if (list_empty(&c->btree_cache))
706 goto out;
707
cafe5635
KO
708 b = list_first_entry(&c->btree_cache, struct btree, list);
709 list_rotate_left(&c->btree_cache);
710
711 if (!b->accessed &&
e8e1d468 712 !mca_reap(b, 0, false)) {
cafe5635
KO
713 mca_bucket_free(b);
714 mca_data_free(b);
715 rw_unlock(true, b);
7dc19d5a 716 freed++;
cafe5635
KO
717 } else
718 b->accessed = 0;
719 }
720out:
cafe5635 721 mutex_unlock(&c->bucket_lock);
7dc19d5a
DC
722 return freed;
723}
724
725static unsigned long bch_mca_count(struct shrinker *shrink,
726 struct shrink_control *sc)
727{
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
729
730 if (c->shrinker_disabled)
731 return 0;
732
0a63b66d 733 if (c->btree_cache_alloc_lock)
7dc19d5a
DC
734 return 0;
735
736 return mca_can_free(c) * c->btree_pages;
cafe5635
KO
737}
738
739void bch_btree_cache_free(struct cache_set *c)
740{
741 struct btree *b;
742 struct closure cl;
743 closure_init_stack(&cl);
744
745 if (c->shrink.list.next)
746 unregister_shrinker(&c->shrink);
747
748 mutex_lock(&c->bucket_lock);
749
750#ifdef CONFIG_BCACHE_DEBUG
751 if (c->verify_data)
752 list_move(&c->verify_data->list, &c->btree_cache);
78b77bf8
KO
753
754 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
cafe5635
KO
755#endif
756
757 list_splice(&c->btree_cache_freeable,
758 &c->btree_cache);
759
760 while (!list_empty(&c->btree_cache)) {
761 b = list_first_entry(&c->btree_cache, struct btree, list);
762
763 if (btree_node_dirty(b))
764 btree_complete_write(b, btree_current_write(b));
765 clear_bit(BTREE_NODE_dirty, &b->flags);
766
767 mca_data_free(b);
768 }
769
770 while (!list_empty(&c->btree_cache_freed)) {
771 b = list_first_entry(&c->btree_cache_freed,
772 struct btree, list);
773 list_del(&b->list);
774 cancel_delayed_work_sync(&b->work);
775 kfree(b);
776 }
777
778 mutex_unlock(&c->bucket_lock);
779}
780
781int bch_btree_cache_alloc(struct cache_set *c)
782{
783 unsigned i;
784
cafe5635 785 for (i = 0; i < mca_reserve(c); i++)
72a44517
KO
786 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
787 return -ENOMEM;
cafe5635
KO
788
789 list_splice_init(&c->btree_cache,
790 &c->btree_cache_freeable);
791
792#ifdef CONFIG_BCACHE_DEBUG
793 mutex_init(&c->verify_lock);
794
78b77bf8
KO
795 c->verify_ondisk = (void *)
796 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
797
cafe5635
KO
798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
799
800 if (c->verify_data &&
a85e968e 801 c->verify_data->keys.set->data)
cafe5635
KO
802 list_del_init(&c->verify_data->list);
803 else
804 c->verify_data = NULL;
805#endif
806
7dc19d5a
DC
807 c->shrink.count_objects = bch_mca_count;
808 c->shrink.scan_objects = bch_mca_scan;
cafe5635
KO
809 c->shrink.seeks = 4;
810 c->shrink.batch = c->btree_pages * 2;
811 register_shrinker(&c->shrink);
812
813 return 0;
814}
815
816/* Btree in memory cache - hash table */
817
818static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
819{
820 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
821}
822
823static struct btree *mca_find(struct cache_set *c, struct bkey *k)
824{
825 struct btree *b;
826
827 rcu_read_lock();
828 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
829 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
830 goto out;
831 b = NULL;
832out:
833 rcu_read_unlock();
834 return b;
835}
836
0a63b66d
KO
837static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
838{
839 struct task_struct *old;
840
841 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
842 if (old && old != current) {
843 if (op)
844 prepare_to_wait(&c->btree_cache_wait, &op->wait,
845 TASK_UNINTERRUPTIBLE);
846 return -EINTR;
847 }
848
849 return 0;
850}
851
852static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
853 struct bkey *k)
cafe5635 854{
e8e1d468 855 struct btree *b;
cafe5635 856
c37511b8
KO
857 trace_bcache_btree_cache_cannibalize(c);
858
0a63b66d
KO
859 if (mca_cannibalize_lock(c, op))
860 return ERR_PTR(-EINTR);
cafe5635 861
e8e1d468
KO
862 list_for_each_entry_reverse(b, &c->btree_cache, list)
863 if (!mca_reap(b, btree_order(k), false))
864 return b;
cafe5635 865
e8e1d468
KO
866 list_for_each_entry_reverse(b, &c->btree_cache, list)
867 if (!mca_reap(b, btree_order(k), true))
868 return b;
cafe5635 869
0a63b66d 870 WARN(1, "btree cache cannibalize failed\n");
e8e1d468 871 return ERR_PTR(-ENOMEM);
cafe5635
KO
872}
873
874/*
875 * We can only have one thread cannibalizing other cached btree nodes at a time,
876 * or we'll deadlock. We use an open coded mutex to ensure that, which a
877 * cannibalize_bucket() will take. This means every time we unlock the root of
878 * the btree, we need to release this lock if we have it held.
879 */
df8e8970 880static void bch_cannibalize_unlock(struct cache_set *c)
cafe5635 881{
0a63b66d
KO
882 if (c->btree_cache_alloc_lock == current) {
883 c->btree_cache_alloc_lock = NULL;
884 wake_up(&c->btree_cache_wait);
cafe5635
KO
885 }
886}
887
0a63b66d
KO
888static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
889 struct bkey *k, int level)
cafe5635
KO
890{
891 struct btree *b;
892
e8e1d468
KO
893 BUG_ON(current->bio_list);
894
cafe5635
KO
895 lockdep_assert_held(&c->bucket_lock);
896
897 if (mca_find(c, k))
898 return NULL;
899
900 /* btree_free() doesn't free memory; it sticks the node on the end of
901 * the list. Check if there's any freed nodes there:
902 */
903 list_for_each_entry(b, &c->btree_cache_freeable, list)
e8e1d468 904 if (!mca_reap(b, btree_order(k), false))
cafe5635
KO
905 goto out;
906
907 /* We never free struct btree itself, just the memory that holds the on
908 * disk node. Check the freed list before allocating a new one:
909 */
910 list_for_each_entry(b, &c->btree_cache_freed, list)
e8e1d468 911 if (!mca_reap(b, 0, false)) {
cafe5635 912 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
a85e968e 913 if (!b->keys.set[0].data)
cafe5635
KO
914 goto err;
915 else
916 goto out;
917 }
918
919 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
920 if (!b)
921 goto err;
922
923 BUG_ON(!down_write_trylock(&b->lock));
a85e968e 924 if (!b->keys.set->data)
cafe5635
KO
925 goto err;
926out:
cb7a583e 927 BUG_ON(b->io_mutex.count != 1);
cafe5635
KO
928
929 bkey_copy(&b->key, k);
930 list_move(&b->list, &c->btree_cache);
931 hlist_del_init_rcu(&b->hash);
932 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
933
934 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
d6fd3b11 935 b->parent = (void *) ~0UL;
a85e968e
KO
936 b->flags = 0;
937 b->written = 0;
938 b->level = level;
cafe5635 939
65d45231 940 if (!b->level)
a85e968e
KO
941 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
942 &b->c->expensive_debug_checks);
65d45231 943 else
a85e968e
KO
944 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
945 &b->c->expensive_debug_checks);
cafe5635
KO
946
947 return b;
948err:
949 if (b)
950 rw_unlock(true, b);
951
0a63b66d 952 b = mca_cannibalize(c, op, k);
cafe5635
KO
953 if (!IS_ERR(b))
954 goto out;
955
956 return b;
957}
958
959/**
960 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
961 * in from disk if necessary.
962 *
b54d6934 963 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
cafe5635
KO
964 *
965 * The btree node will have either a read or a write lock held, depending on
966 * level and op->lock.
967 */
0a63b66d 968struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
2452cc89
SP
969 struct bkey *k, int level, bool write,
970 struct btree *parent)
cafe5635
KO
971{
972 int i = 0;
cafe5635
KO
973 struct btree *b;
974
975 BUG_ON(level < 0);
976retry:
977 b = mca_find(c, k);
978
979 if (!b) {
57943511
KO
980 if (current->bio_list)
981 return ERR_PTR(-EAGAIN);
982
cafe5635 983 mutex_lock(&c->bucket_lock);
0a63b66d 984 b = mca_alloc(c, op, k, level);
cafe5635
KO
985 mutex_unlock(&c->bucket_lock);
986
987 if (!b)
988 goto retry;
989 if (IS_ERR(b))
990 return b;
991
57943511 992 bch_btree_node_read(b);
cafe5635
KO
993
994 if (!write)
995 downgrade_write(&b->lock);
996 } else {
997 rw_lock(write, b, level);
998 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
999 rw_unlock(write, b);
1000 goto retry;
1001 }
1002 BUG_ON(b->level != level);
1003 }
1004
2452cc89 1005 b->parent = parent;
cafe5635
KO
1006 b->accessed = 1;
1007
a85e968e
KO
1008 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1009 prefetch(b->keys.set[i].tree);
1010 prefetch(b->keys.set[i].data);
cafe5635
KO
1011 }
1012
a85e968e
KO
1013 for (; i <= b->keys.nsets; i++)
1014 prefetch(b->keys.set[i].data);
cafe5635 1015
57943511 1016 if (btree_node_io_error(b)) {
cafe5635 1017 rw_unlock(write, b);
57943511
KO
1018 return ERR_PTR(-EIO);
1019 }
1020
1021 BUG_ON(!b->written);
cafe5635
KO
1022
1023 return b;
1024}
1025
2452cc89 1026static void btree_node_prefetch(struct btree *parent, struct bkey *k)
cafe5635
KO
1027{
1028 struct btree *b;
1029
2452cc89
SP
1030 mutex_lock(&parent->c->bucket_lock);
1031 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1032 mutex_unlock(&parent->c->bucket_lock);
cafe5635
KO
1033
1034 if (!IS_ERR_OR_NULL(b)) {
2452cc89 1035 b->parent = parent;
57943511 1036 bch_btree_node_read(b);
cafe5635
KO
1037 rw_unlock(true, b);
1038 }
1039}
1040
1041/* Btree alloc */
1042
e8e1d468 1043static void btree_node_free(struct btree *b)
cafe5635 1044{
c37511b8
KO
1045 trace_bcache_btree_node_free(b);
1046
cafe5635 1047 BUG_ON(b == b->c->root);
cafe5635 1048
2a285686
KO
1049 mutex_lock(&b->write_lock);
1050
cafe5635
KO
1051 if (btree_node_dirty(b))
1052 btree_complete_write(b, btree_current_write(b));
1053 clear_bit(BTREE_NODE_dirty, &b->flags);
1054
2a285686
KO
1055 mutex_unlock(&b->write_lock);
1056
cafe5635
KO
1057 cancel_delayed_work(&b->work);
1058
1059 mutex_lock(&b->c->bucket_lock);
cafe5635
KO
1060 bch_bucket_free(b->c, &b->key);
1061 mca_bucket_free(b);
1062 mutex_unlock(&b->c->bucket_lock);
1063}
1064
c5aa4a31 1065struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
2452cc89
SP
1066 int level, bool wait,
1067 struct btree *parent)
cafe5635
KO
1068{
1069 BKEY_PADDED(key) k;
1070 struct btree *b = ERR_PTR(-EAGAIN);
1071
1072 mutex_lock(&c->bucket_lock);
1073retry:
c5aa4a31 1074 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
cafe5635
KO
1075 goto err;
1076
3a3b6a4e 1077 bkey_put(c, &k.key);
cafe5635
KO
1078 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1079
0a63b66d 1080 b = mca_alloc(c, op, &k.key, level);
cafe5635
KO
1081 if (IS_ERR(b))
1082 goto err_free;
1083
1084 if (!b) {
b1a67b0f
KO
1085 cache_bug(c,
1086 "Tried to allocate bucket that was in btree cache");
cafe5635
KO
1087 goto retry;
1088 }
1089
cafe5635 1090 b->accessed = 1;
2452cc89 1091 b->parent = parent;
a85e968e 1092 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
cafe5635
KO
1093
1094 mutex_unlock(&c->bucket_lock);
c37511b8
KO
1095
1096 trace_bcache_btree_node_alloc(b);
cafe5635
KO
1097 return b;
1098err_free:
1099 bch_bucket_free(c, &k.key);
cafe5635
KO
1100err:
1101 mutex_unlock(&c->bucket_lock);
c37511b8 1102
913dc33f 1103 trace_bcache_btree_node_alloc_fail(c);
cafe5635
KO
1104 return b;
1105}
1106
c5aa4a31 1107static struct btree *bch_btree_node_alloc(struct cache_set *c,
2452cc89
SP
1108 struct btree_op *op, int level,
1109 struct btree *parent)
c5aa4a31 1110{
2452cc89 1111 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
c5aa4a31
SP
1112}
1113
0a63b66d
KO
1114static struct btree *btree_node_alloc_replacement(struct btree *b,
1115 struct btree_op *op)
cafe5635 1116{
2452cc89 1117 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
67539e85 1118 if (!IS_ERR_OR_NULL(n)) {
2a285686 1119 mutex_lock(&n->write_lock);
89ebb4a2 1120 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
67539e85 1121 bkey_copy_key(&n->key, &b->key);
2a285686 1122 mutex_unlock(&n->write_lock);
67539e85 1123 }
cafe5635
KO
1124
1125 return n;
1126}
1127
8835c123
KO
1128static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1129{
1130 unsigned i;
1131
05335cff
KO
1132 mutex_lock(&b->c->bucket_lock);
1133
1134 atomic_inc(&b->c->prio_blocked);
1135
8835c123
KO
1136 bkey_copy(k, &b->key);
1137 bkey_copy_key(k, &ZERO_KEY);
1138
05335cff
KO
1139 for (i = 0; i < KEY_PTRS(k); i++)
1140 SET_PTR_GEN(k, i,
1141 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1142 PTR_BUCKET(b->c, &b->key, i)));
8835c123 1143
05335cff 1144 mutex_unlock(&b->c->bucket_lock);
8835c123
KO
1145}
1146
78365411
KO
1147static int btree_check_reserve(struct btree *b, struct btree_op *op)
1148{
1149 struct cache_set *c = b->c;
1150 struct cache *ca;
0a63b66d 1151 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
78365411
KO
1152
1153 mutex_lock(&c->bucket_lock);
1154
1155 for_each_cache(ca, c, i)
1156 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1157 if (op)
0a63b66d 1158 prepare_to_wait(&c->btree_cache_wait, &op->wait,
78365411 1159 TASK_UNINTERRUPTIBLE);
0a63b66d
KO
1160 mutex_unlock(&c->bucket_lock);
1161 return -EINTR;
78365411
KO
1162 }
1163
1164 mutex_unlock(&c->bucket_lock);
0a63b66d
KO
1165
1166 return mca_cannibalize_lock(b->c, op);
78365411
KO
1167}
1168
cafe5635
KO
1169/* Garbage collection */
1170
487dded8
KO
1171static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1172 struct bkey *k)
cafe5635
KO
1173{
1174 uint8_t stale = 0;
1175 unsigned i;
1176 struct bucket *g;
1177
1178 /*
1179 * ptr_invalid() can't return true for the keys that mark btree nodes as
1180 * freed, but since ptr_bad() returns true we'll never actually use them
1181 * for anything and thus we don't want mark their pointers here
1182 */
1183 if (!bkey_cmp(k, &ZERO_KEY))
1184 return stale;
1185
1186 for (i = 0; i < KEY_PTRS(k); i++) {
1187 if (!ptr_available(c, k, i))
1188 continue;
1189
1190 g = PTR_BUCKET(c, k, i);
1191
3a2fd9d5
KO
1192 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1193 g->last_gc = PTR_GEN(k, i);
cafe5635
KO
1194
1195 if (ptr_stale(c, k, i)) {
1196 stale = max(stale, ptr_stale(c, k, i));
1197 continue;
1198 }
1199
1200 cache_bug_on(GC_MARK(g) &&
1201 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1202 c, "inconsistent ptrs: mark = %llu, level = %i",
1203 GC_MARK(g), level);
1204
1205 if (level)
1206 SET_GC_MARK(g, GC_MARK_METADATA);
1207 else if (KEY_DIRTY(k))
1208 SET_GC_MARK(g, GC_MARK_DIRTY);
4fe6a816
KO
1209 else if (!GC_MARK(g))
1210 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
cafe5635
KO
1211
1212 /* guard against overflow */
1213 SET_GC_SECTORS_USED(g, min_t(unsigned,
1214 GC_SECTORS_USED(g) + KEY_SIZE(k),
94717447 1215 MAX_GC_SECTORS_USED));
cafe5635
KO
1216
1217 BUG_ON(!GC_SECTORS_USED(g));
1218 }
1219
1220 return stale;
1221}
1222
1223#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1224
487dded8
KO
1225void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1226{
1227 unsigned i;
1228
1229 for (i = 0; i < KEY_PTRS(k); i++)
1230 if (ptr_available(c, k, i) &&
1231 !ptr_stale(c, k, i)) {
1232 struct bucket *b = PTR_BUCKET(c, k, i);
1233
1234 b->gen = PTR_GEN(k, i);
1235
1236 if (level && bkey_cmp(k, &ZERO_KEY))
1237 b->prio = BTREE_PRIO;
1238 else if (!level && b->prio == BTREE_PRIO)
1239 b->prio = INITIAL_PRIO;
1240 }
1241
1242 __bch_btree_mark_key(c, level, k);
1243}
1244
a1f0358b 1245static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
cafe5635
KO
1246{
1247 uint8_t stale = 0;
a1f0358b 1248 unsigned keys = 0, good_keys = 0;
cafe5635
KO
1249 struct bkey *k;
1250 struct btree_iter iter;
1251 struct bset_tree *t;
1252
1253 gc->nodes++;
1254
c052dd9a 1255 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
cafe5635 1256 stale = max(stale, btree_mark_key(b, k));
a1f0358b 1257 keys++;
cafe5635 1258
a85e968e 1259 if (bch_ptr_bad(&b->keys, k))
cafe5635
KO
1260 continue;
1261
cafe5635
KO
1262 gc->key_bytes += bkey_u64s(k);
1263 gc->nkeys++;
a1f0358b 1264 good_keys++;
cafe5635
KO
1265
1266 gc->data += KEY_SIZE(k);
cafe5635
KO
1267 }
1268
a85e968e 1269 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
cafe5635 1270 btree_bug_on(t->size &&
a85e968e 1271 bset_written(&b->keys, t) &&
cafe5635
KO
1272 bkey_cmp(&b->key, &t->end) < 0,
1273 b, "found short btree key in gc");
1274
a1f0358b
KO
1275 if (b->c->gc_always_rewrite)
1276 return true;
cafe5635 1277
a1f0358b
KO
1278 if (stale > 10)
1279 return true;
cafe5635 1280
a1f0358b
KO
1281 if ((keys - good_keys) * 2 > keys)
1282 return true;
cafe5635 1283
a1f0358b 1284 return false;
cafe5635
KO
1285}
1286
a1f0358b 1287#define GC_MERGE_NODES 4U
cafe5635
KO
1288
1289struct gc_merge_info {
1290 struct btree *b;
cafe5635
KO
1291 unsigned keys;
1292};
1293
a1f0358b
KO
1294static int bch_btree_insert_node(struct btree *, struct btree_op *,
1295 struct keylist *, atomic_t *, struct bkey *);
1296
1297static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
0a63b66d 1298 struct gc_stat *gc, struct gc_merge_info *r)
cafe5635 1299{
a1f0358b
KO
1300 unsigned i, nodes = 0, keys = 0, blocks;
1301 struct btree *new_nodes[GC_MERGE_NODES];
0a63b66d 1302 struct keylist keylist;
b54d6934 1303 struct closure cl;
a1f0358b 1304 struct bkey *k;
b54d6934 1305
0a63b66d
KO
1306 bch_keylist_init(&keylist);
1307
1308 if (btree_check_reserve(b, NULL))
1309 return 0;
1310
a1f0358b 1311 memset(new_nodes, 0, sizeof(new_nodes));
b54d6934 1312 closure_init_stack(&cl);
cafe5635 1313
a1f0358b 1314 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
cafe5635
KO
1315 keys += r[nodes++].keys;
1316
1317 blocks = btree_default_blocks(b->c) * 2 / 3;
1318
1319 if (nodes < 2 ||
a85e968e 1320 __set_blocks(b->keys.set[0].data, keys,
ee811287 1321 block_bytes(b->c)) > blocks * (nodes - 1))
a1f0358b 1322 return 0;
cafe5635 1323
a1f0358b 1324 for (i = 0; i < nodes; i++) {
0a63b66d 1325 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
a1f0358b
KO
1326 if (IS_ERR_OR_NULL(new_nodes[i]))
1327 goto out_nocoalesce;
cafe5635
KO
1328 }
1329
0a63b66d
KO
1330 /*
1331 * We have to check the reserve here, after we've allocated our new
1332 * nodes, to make sure the insert below will succeed - we also check
1333 * before as an optimization to potentially avoid a bunch of expensive
1334 * allocs/sorts
1335 */
1336 if (btree_check_reserve(b, NULL))
1337 goto out_nocoalesce;
1338
2a285686
KO
1339 for (i = 0; i < nodes; i++)
1340 mutex_lock(&new_nodes[i]->write_lock);
1341
cafe5635 1342 for (i = nodes - 1; i > 0; --i) {
ee811287
KO
1343 struct bset *n1 = btree_bset_first(new_nodes[i]);
1344 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
cafe5635
KO
1345 struct bkey *k, *last = NULL;
1346
1347 keys = 0;
1348
a1f0358b
KO
1349 if (i > 1) {
1350 for (k = n2->start;
fafff81c 1351 k < bset_bkey_last(n2);
a1f0358b
KO
1352 k = bkey_next(k)) {
1353 if (__set_blocks(n1, n1->keys + keys +
ee811287
KO
1354 bkey_u64s(k),
1355 block_bytes(b->c)) > blocks)
a1f0358b
KO
1356 break;
1357
1358 last = k;
1359 keys += bkey_u64s(k);
1360 }
1361 } else {
cafe5635
KO
1362 /*
1363 * Last node we're not getting rid of - we're getting
1364 * rid of the node at r[0]. Have to try and fit all of
1365 * the remaining keys into this node; we can't ensure
1366 * they will always fit due to rounding and variable
1367 * length keys (shouldn't be possible in practice,
1368 * though)
1369 */
a1f0358b 1370 if (__set_blocks(n1, n1->keys + n2->keys,
ee811287
KO
1371 block_bytes(b->c)) >
1372 btree_blocks(new_nodes[i]))
a1f0358b 1373 goto out_nocoalesce;
cafe5635
KO
1374
1375 keys = n2->keys;
a1f0358b 1376 /* Take the key of the node we're getting rid of */
cafe5635 1377 last = &r->b->key;
a1f0358b 1378 }
cafe5635 1379
ee811287
KO
1380 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1381 btree_blocks(new_nodes[i]));
cafe5635 1382
a1f0358b
KO
1383 if (last)
1384 bkey_copy_key(&new_nodes[i]->key, last);
cafe5635 1385
fafff81c 1386 memcpy(bset_bkey_last(n1),
cafe5635 1387 n2->start,
fafff81c 1388 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
cafe5635
KO
1389
1390 n1->keys += keys;
a1f0358b 1391 r[i].keys = n1->keys;
cafe5635
KO
1392
1393 memmove(n2->start,
fafff81c
KO
1394 bset_bkey_idx(n2, keys),
1395 (void *) bset_bkey_last(n2) -
1396 (void *) bset_bkey_idx(n2, keys));
cafe5635
KO
1397
1398 n2->keys -= keys;
1399
0a63b66d 1400 if (__bch_keylist_realloc(&keylist,
085d2a3d 1401 bkey_u64s(&new_nodes[i]->key)))
a1f0358b
KO
1402 goto out_nocoalesce;
1403
1404 bch_btree_node_write(new_nodes[i], &cl);
0a63b66d 1405 bch_keylist_add(&keylist, &new_nodes[i]->key);
cafe5635
KO
1406 }
1407
2a285686
KO
1408 for (i = 0; i < nodes; i++)
1409 mutex_unlock(&new_nodes[i]->write_lock);
1410
05335cff
KO
1411 closure_sync(&cl);
1412
1413 /* We emptied out this node */
1414 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1415 btree_node_free(new_nodes[0]);
1416 rw_unlock(true, new_nodes[0]);
400ffaa2 1417 new_nodes[0] = NULL;
05335cff 1418
a1f0358b 1419 for (i = 0; i < nodes; i++) {
0a63b66d 1420 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
a1f0358b 1421 goto out_nocoalesce;
cafe5635 1422
0a63b66d
KO
1423 make_btree_freeing_key(r[i].b, keylist.top);
1424 bch_keylist_push(&keylist);
a1f0358b 1425 }
cafe5635 1426
0a63b66d
KO
1427 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1428 BUG_ON(!bch_keylist_empty(&keylist));
a1f0358b
KO
1429
1430 for (i = 0; i < nodes; i++) {
1431 btree_node_free(r[i].b);
1432 rw_unlock(true, r[i].b);
1433
1434 r[i].b = new_nodes[i];
1435 }
1436
a1f0358b
KO
1437 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1438 r[nodes - 1].b = ERR_PTR(-EINTR);
1439
1440 trace_bcache_btree_gc_coalesce(nodes);
cafe5635 1441 gc->nodes--;
cafe5635 1442
0a63b66d
KO
1443 bch_keylist_free(&keylist);
1444
a1f0358b
KO
1445 /* Invalidated our iterator */
1446 return -EINTR;
1447
1448out_nocoalesce:
1449 closure_sync(&cl);
0a63b66d 1450 bch_keylist_free(&keylist);
a1f0358b 1451
0a63b66d 1452 while ((k = bch_keylist_pop(&keylist)))
a1f0358b
KO
1453 if (!bkey_cmp(k, &ZERO_KEY))
1454 atomic_dec(&b->c->prio_blocked);
1455
1456 for (i = 0; i < nodes; i++)
1457 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1458 btree_node_free(new_nodes[i]);
1459 rw_unlock(true, new_nodes[i]);
1460 }
1461 return 0;
cafe5635
KO
1462}
1463
0a63b66d
KO
1464static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1465 struct btree *replace)
1466{
1467 struct keylist keys;
1468 struct btree *n;
1469
1470 if (btree_check_reserve(b, NULL))
1471 return 0;
1472
1473 n = btree_node_alloc_replacement(replace, NULL);
1474
1475 /* recheck reserve after allocating replacement node */
1476 if (btree_check_reserve(b, NULL)) {
1477 btree_node_free(n);
1478 rw_unlock(true, n);
1479 return 0;
1480 }
1481
1482 bch_btree_node_write_sync(n);
1483
1484 bch_keylist_init(&keys);
1485 bch_keylist_add(&keys, &n->key);
1486
1487 make_btree_freeing_key(replace, keys.top);
1488 bch_keylist_push(&keys);
1489
1490 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1491 BUG_ON(!bch_keylist_empty(&keys));
1492
1493 btree_node_free(replace);
1494 rw_unlock(true, n);
1495
1496 /* Invalidated our iterator */
1497 return -EINTR;
1498}
1499
a1f0358b 1500static unsigned btree_gc_count_keys(struct btree *b)
cafe5635 1501{
a1f0358b
KO
1502 struct bkey *k;
1503 struct btree_iter iter;
1504 unsigned ret = 0;
cafe5635 1505
c052dd9a 1506 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
a1f0358b
KO
1507 ret += bkey_u64s(k);
1508
1509 return ret;
1510}
cafe5635 1511
a1f0358b
KO
1512static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1513 struct closure *writes, struct gc_stat *gc)
1514{
a1f0358b
KO
1515 int ret = 0;
1516 bool should_rewrite;
a1f0358b 1517 struct bkey *k;
a1f0358b 1518 struct btree_iter iter;
cafe5635 1519 struct gc_merge_info r[GC_MERGE_NODES];
2a285686 1520 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
cafe5635 1521
c052dd9a 1522 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
cafe5635 1523
2a285686
KO
1524 for (i = r; i < r + ARRAY_SIZE(r); i++)
1525 i->b = ERR_PTR(-EINTR);
cafe5635 1526
a1f0358b 1527 while (1) {
a85e968e 1528 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
a1f0358b 1529 if (k) {
0a63b66d 1530 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
2452cc89 1531 true, b);
a1f0358b
KO
1532 if (IS_ERR(r->b)) {
1533 ret = PTR_ERR(r->b);
1534 break;
1535 }
1536
1537 r->keys = btree_gc_count_keys(r->b);
1538
0a63b66d 1539 ret = btree_gc_coalesce(b, op, gc, r);
a1f0358b
KO
1540 if (ret)
1541 break;
cafe5635
KO
1542 }
1543
a1f0358b
KO
1544 if (!last->b)
1545 break;
cafe5635 1546
a1f0358b
KO
1547 if (!IS_ERR(last->b)) {
1548 should_rewrite = btree_gc_mark_node(last->b, gc);
0a63b66d
KO
1549 if (should_rewrite) {
1550 ret = btree_gc_rewrite_node(b, op, last->b);
1551 if (ret)
a1f0358b 1552 break;
a1f0358b
KO
1553 }
1554
1555 if (last->b->level) {
1556 ret = btree_gc_recurse(last->b, op, writes, gc);
1557 if (ret)
1558 break;
1559 }
cafe5635 1560
a1f0358b
KO
1561 bkey_copy_key(&b->c->gc_done, &last->b->key);
1562
1563 /*
1564 * Must flush leaf nodes before gc ends, since replace
1565 * operations aren't journalled
1566 */
2a285686 1567 mutex_lock(&last->b->write_lock);
a1f0358b
KO
1568 if (btree_node_dirty(last->b))
1569 bch_btree_node_write(last->b, writes);
2a285686 1570 mutex_unlock(&last->b->write_lock);
a1f0358b
KO
1571 rw_unlock(true, last->b);
1572 }
1573
1574 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1575 r->b = NULL;
cafe5635 1576
cafe5635
KO
1577 if (need_resched()) {
1578 ret = -EAGAIN;
1579 break;
1580 }
cafe5635
KO
1581 }
1582
2a285686
KO
1583 for (i = r; i < r + ARRAY_SIZE(r); i++)
1584 if (!IS_ERR_OR_NULL(i->b)) {
1585 mutex_lock(&i->b->write_lock);
1586 if (btree_node_dirty(i->b))
1587 bch_btree_node_write(i->b, writes);
1588 mutex_unlock(&i->b->write_lock);
1589 rw_unlock(true, i->b);
a1f0358b 1590 }
cafe5635 1591
cafe5635
KO
1592 return ret;
1593}
1594
1595static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1596 struct closure *writes, struct gc_stat *gc)
1597{
1598 struct btree *n = NULL;
a1f0358b
KO
1599 int ret = 0;
1600 bool should_rewrite;
cafe5635 1601
a1f0358b
KO
1602 should_rewrite = btree_gc_mark_node(b, gc);
1603 if (should_rewrite) {
0a63b66d 1604 n = btree_node_alloc_replacement(b, NULL);
cafe5635 1605
a1f0358b
KO
1606 if (!IS_ERR_OR_NULL(n)) {
1607 bch_btree_node_write_sync(n);
2a285686 1608
a1f0358b
KO
1609 bch_btree_set_root(n);
1610 btree_node_free(b);
1611 rw_unlock(true, n);
cafe5635 1612
a1f0358b
KO
1613 return -EINTR;
1614 }
1615 }
cafe5635 1616
487dded8
KO
1617 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1618
a1f0358b
KO
1619 if (b->level) {
1620 ret = btree_gc_recurse(b, op, writes, gc);
1621 if (ret)
1622 return ret;
cafe5635
KO
1623 }
1624
a1f0358b
KO
1625 bkey_copy_key(&b->c->gc_done, &b->key);
1626
cafe5635
KO
1627 return ret;
1628}
1629
1630static void btree_gc_start(struct cache_set *c)
1631{
1632 struct cache *ca;
1633 struct bucket *b;
cafe5635
KO
1634 unsigned i;
1635
1636 if (!c->gc_mark_valid)
1637 return;
1638
1639 mutex_lock(&c->bucket_lock);
1640
1641 c->gc_mark_valid = 0;
1642 c->gc_done = ZERO_KEY;
1643
1644 for_each_cache(ca, c, i)
1645 for_each_bucket(b, ca) {
3a2fd9d5 1646 b->last_gc = b->gen;
29ebf465 1647 if (!atomic_read(&b->pin)) {
4fe6a816 1648 SET_GC_MARK(b, 0);
29ebf465
KO
1649 SET_GC_SECTORS_USED(b, 0);
1650 }
cafe5635
KO
1651 }
1652
cafe5635
KO
1653 mutex_unlock(&c->bucket_lock);
1654}
1655
2531d9ee 1656static size_t bch_btree_gc_finish(struct cache_set *c)
cafe5635
KO
1657{
1658 size_t available = 0;
1659 struct bucket *b;
1660 struct cache *ca;
cafe5635
KO
1661 unsigned i;
1662
1663 mutex_lock(&c->bucket_lock);
1664
1665 set_gc_sectors(c);
1666 c->gc_mark_valid = 1;
1667 c->need_gc = 0;
1668
cafe5635
KO
1669 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1670 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1671 GC_MARK_METADATA);
1672
bf0a628a
NS
1673 /* don't reclaim buckets to which writeback keys point */
1674 rcu_read_lock();
1675 for (i = 0; i < c->nr_uuids; i++) {
1676 struct bcache_device *d = c->devices[i];
1677 struct cached_dev *dc;
1678 struct keybuf_key *w, *n;
1679 unsigned j;
1680
1681 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1682 continue;
1683 dc = container_of(d, struct cached_dev, disk);
1684
1685 spin_lock(&dc->writeback_keys.lock);
1686 rbtree_postorder_for_each_entry_safe(w, n,
1687 &dc->writeback_keys.keys, node)
1688 for (j = 0; j < KEY_PTRS(&w->key); j++)
1689 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1690 GC_MARK_DIRTY);
1691 spin_unlock(&dc->writeback_keys.lock);
1692 }
1693 rcu_read_unlock();
1694
cafe5635
KO
1695 for_each_cache(ca, c, i) {
1696 uint64_t *i;
1697
1698 ca->invalidate_needs_gc = 0;
1699
1700 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1701 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1702
1703 for (i = ca->prio_buckets;
1704 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1705 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1706
1707 for_each_bucket(b, ca) {
cafe5635
KO
1708 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1709
4fe6a816
KO
1710 if (atomic_read(&b->pin))
1711 continue;
1712
1713 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1714
1715 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
cafe5635 1716 available++;
cafe5635
KO
1717 }
1718 }
1719
cafe5635
KO
1720 mutex_unlock(&c->bucket_lock);
1721 return available;
1722}
1723
72a44517 1724static void bch_btree_gc(struct cache_set *c)
cafe5635 1725{
cafe5635
KO
1726 int ret;
1727 unsigned long available;
1728 struct gc_stat stats;
1729 struct closure writes;
1730 struct btree_op op;
cafe5635 1731 uint64_t start_time = local_clock();
57943511 1732
c37511b8 1733 trace_bcache_gc_start(c);
cafe5635
KO
1734
1735 memset(&stats, 0, sizeof(struct gc_stat));
1736 closure_init_stack(&writes);
b54d6934 1737 bch_btree_op_init(&op, SHRT_MAX);
cafe5635
KO
1738
1739 btree_gc_start(c);
1740
a1f0358b
KO
1741 do {
1742 ret = btree_root(gc_root, c, &op, &writes, &stats);
1743 closure_sync(&writes);
cafe5635 1744
a1f0358b
KO
1745 if (ret && ret != -EAGAIN)
1746 pr_warn("gc failed!");
1747 } while (ret);
cafe5635
KO
1748
1749 available = bch_btree_gc_finish(c);
57943511
KO
1750 wake_up_allocators(c);
1751
169ef1cf 1752 bch_time_stats_update(&c->btree_gc_time, start_time);
cafe5635
KO
1753
1754 stats.key_bytes *= sizeof(uint64_t);
cafe5635
KO
1755 stats.data <<= 9;
1756 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1757 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
cafe5635 1758
c37511b8 1759 trace_bcache_gc_end(c);
cafe5635 1760
72a44517
KO
1761 bch_moving_gc(c);
1762}
1763
1764static int bch_gc_thread(void *arg)
1765{
1766 struct cache_set *c = arg;
a1f0358b
KO
1767 struct cache *ca;
1768 unsigned i;
72a44517
KO
1769
1770 while (1) {
a1f0358b 1771again:
72a44517
KO
1772 bch_btree_gc(c);
1773
1774 set_current_state(TASK_INTERRUPTIBLE);
1775 if (kthread_should_stop())
1776 break;
1777
a1f0358b
KO
1778 mutex_lock(&c->bucket_lock);
1779
1780 for_each_cache(ca, c, i)
1781 if (ca->invalidate_needs_gc) {
1782 mutex_unlock(&c->bucket_lock);
1783 set_current_state(TASK_RUNNING);
1784 goto again;
1785 }
1786
1787 mutex_unlock(&c->bucket_lock);
1788
72a44517
KO
1789 try_to_freeze();
1790 schedule();
1791 }
1792
1793 return 0;
cafe5635
KO
1794}
1795
72a44517 1796int bch_gc_thread_start(struct cache_set *c)
cafe5635 1797{
72a44517
KO
1798 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1799 if (IS_ERR(c->gc_thread))
1800 return PTR_ERR(c->gc_thread);
1801
1802 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1803 return 0;
cafe5635
KO
1804}
1805
1806/* Initial partial gc */
1807
487dded8 1808static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
cafe5635 1809{
50310164 1810 int ret = 0;
50310164 1811 struct bkey *k, *p = NULL;
cafe5635
KO
1812 struct btree_iter iter;
1813
487dded8
KO
1814 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1815 bch_initial_mark_key(b->c, b->level, k);
cafe5635 1816
487dded8 1817 bch_initial_mark_key(b->c, b->level + 1, &b->key);
cafe5635
KO
1818
1819 if (b->level) {
c052dd9a 1820 bch_btree_iter_init(&b->keys, &iter, NULL);
cafe5635 1821
50310164 1822 do {
a85e968e
KO
1823 k = bch_btree_iter_next_filter(&iter, &b->keys,
1824 bch_ptr_bad);
50310164 1825 if (k)
2452cc89 1826 btree_node_prefetch(b, k);
cafe5635 1827
50310164 1828 if (p)
487dded8 1829 ret = btree(check_recurse, p, b, op);
cafe5635 1830
50310164
KO
1831 p = k;
1832 } while (p && !ret);
cafe5635
KO
1833 }
1834
487dded8 1835 return ret;
cafe5635
KO
1836}
1837
c18536a7 1838int bch_btree_check(struct cache_set *c)
cafe5635 1839{
c18536a7 1840 struct btree_op op;
cafe5635 1841
b54d6934 1842 bch_btree_op_init(&op, SHRT_MAX);
cafe5635 1843
487dded8 1844 return btree_root(check_recurse, c, &op);
cafe5635
KO
1845}
1846
2531d9ee
KO
1847void bch_initial_gc_finish(struct cache_set *c)
1848{
1849 struct cache *ca;
1850 struct bucket *b;
1851 unsigned i;
1852
1853 bch_btree_gc_finish(c);
1854
1855 mutex_lock(&c->bucket_lock);
1856
1857 /*
1858 * We need to put some unused buckets directly on the prio freelist in
1859 * order to get the allocator thread started - it needs freed buckets in
1860 * order to rewrite the prios and gens, and it needs to rewrite prios
1861 * and gens in order to free buckets.
1862 *
1863 * This is only safe for buckets that have no live data in them, which
1864 * there should always be some of.
1865 */
1866 for_each_cache(ca, c, i) {
1867 for_each_bucket(b, ca) {
1868 if (fifo_full(&ca->free[RESERVE_PRIO]))
1869 break;
1870
1871 if (bch_can_invalidate_bucket(ca, b) &&
1872 !GC_MARK(b)) {
1873 __bch_invalidate_one_bucket(ca, b);
1874 fifo_push(&ca->free[RESERVE_PRIO],
1875 b - ca->buckets);
1876 }
1877 }
1878 }
1879
1880 mutex_unlock(&c->bucket_lock);
1881}
1882
cafe5635
KO
1883/* Btree insertion */
1884
829a60b9
KO
1885static bool btree_insert_key(struct btree *b, struct bkey *k,
1886 struct bkey *replace_key)
cafe5635 1887{
829a60b9 1888 unsigned status;
cafe5635
KO
1889
1890 BUG_ON(bkey_cmp(k, &b->key) > 0);
1fa8455d 1891
829a60b9
KO
1892 status = bch_btree_insert_key(&b->keys, k, replace_key);
1893 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1894 bch_check_keys(&b->keys, "%u for %s", status,
1895 replace_key ? "replace" : "insert");
cafe5635 1896
829a60b9
KO
1897 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1898 status);
1899 return true;
1900 } else
1901 return false;
cafe5635
KO
1902}
1903
59158fde
KO
1904static size_t insert_u64s_remaining(struct btree *b)
1905{
3572324a 1906 long ret = bch_btree_keys_u64s_remaining(&b->keys);
59158fde
KO
1907
1908 /*
1909 * Might land in the middle of an existing extent and have to split it
1910 */
1911 if (b->keys.ops->is_extents)
1912 ret -= KEY_MAX_U64S;
1913
1914 return max(ret, 0L);
1915}
1916
26c949f8 1917static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1b207d80
KO
1918 struct keylist *insert_keys,
1919 struct bkey *replace_key)
cafe5635
KO
1920{
1921 bool ret = false;
dc9d98d6 1922 int oldsize = bch_count_data(&b->keys);
cafe5635 1923
26c949f8 1924 while (!bch_keylist_empty(insert_keys)) {
c2f95ae2 1925 struct bkey *k = insert_keys->keys;
26c949f8 1926
59158fde 1927 if (bkey_u64s(k) > insert_u64s_remaining(b))
403b6cde
KO
1928 break;
1929
1930 if (bkey_cmp(k, &b->key) <= 0) {
3a3b6a4e
KO
1931 if (!b->level)
1932 bkey_put(b->c, k);
26c949f8 1933
829a60b9 1934 ret |= btree_insert_key(b, k, replace_key);
26c949f8
KO
1935 bch_keylist_pop_front(insert_keys);
1936 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
26c949f8 1937 BKEY_PADDED(key) temp;
c2f95ae2 1938 bkey_copy(&temp.key, insert_keys->keys);
26c949f8
KO
1939
1940 bch_cut_back(&b->key, &temp.key);
c2f95ae2 1941 bch_cut_front(&b->key, insert_keys->keys);
26c949f8 1942
829a60b9 1943 ret |= btree_insert_key(b, &temp.key, replace_key);
26c949f8
KO
1944 break;
1945 } else {
1946 break;
1947 }
cafe5635
KO
1948 }
1949
829a60b9
KO
1950 if (!ret)
1951 op->insert_collision = true;
1952
403b6cde
KO
1953 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1954
dc9d98d6 1955 BUG_ON(bch_count_data(&b->keys) < oldsize);
cafe5635
KO
1956 return ret;
1957}
1958
26c949f8
KO
1959static int btree_split(struct btree *b, struct btree_op *op,
1960 struct keylist *insert_keys,
1b207d80 1961 struct bkey *replace_key)
cafe5635 1962{
d6fd3b11 1963 bool split;
cafe5635
KO
1964 struct btree *n1, *n2 = NULL, *n3 = NULL;
1965 uint64_t start_time = local_clock();
b54d6934 1966 struct closure cl;
17e21a9f 1967 struct keylist parent_keys;
b54d6934
KO
1968
1969 closure_init_stack(&cl);
17e21a9f 1970 bch_keylist_init(&parent_keys);
cafe5635 1971
0a63b66d
KO
1972 if (btree_check_reserve(b, op)) {
1973 if (!b->level)
1974 return -EINTR;
1975 else
1976 WARN(1, "insufficient reserve for split\n");
1977 }
78365411 1978
0a63b66d 1979 n1 = btree_node_alloc_replacement(b, op);
cafe5635
KO
1980 if (IS_ERR(n1))
1981 goto err;
1982
ee811287
KO
1983 split = set_blocks(btree_bset_first(n1),
1984 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
cafe5635 1985
cafe5635
KO
1986 if (split) {
1987 unsigned keys = 0;
1988
ee811287 1989 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
c37511b8 1990
2452cc89 1991 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
cafe5635
KO
1992 if (IS_ERR(n2))
1993 goto err_free1;
1994
d6fd3b11 1995 if (!b->parent) {
2452cc89 1996 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
cafe5635
KO
1997 if (IS_ERR(n3))
1998 goto err_free2;
1999 }
2000
2a285686
KO
2001 mutex_lock(&n1->write_lock);
2002 mutex_lock(&n2->write_lock);
2003
1b207d80 2004 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
cafe5635 2005
d6fd3b11
KO
2006 /*
2007 * Has to be a linear search because we don't have an auxiliary
cafe5635
KO
2008 * search tree yet
2009 */
2010
ee811287
KO
2011 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2012 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
fafff81c 2013 keys));
cafe5635 2014
fafff81c 2015 bkey_copy_key(&n1->key,
ee811287
KO
2016 bset_bkey_idx(btree_bset_first(n1), keys));
2017 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
cafe5635 2018
ee811287
KO
2019 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2020 btree_bset_first(n1)->keys = keys;
cafe5635 2021
ee811287
KO
2022 memcpy(btree_bset_first(n2)->start,
2023 bset_bkey_last(btree_bset_first(n1)),
2024 btree_bset_first(n2)->keys * sizeof(uint64_t));
cafe5635
KO
2025
2026 bkey_copy_key(&n2->key, &b->key);
2027
17e21a9f 2028 bch_keylist_add(&parent_keys, &n2->key);
b54d6934 2029 bch_btree_node_write(n2, &cl);
2a285686 2030 mutex_unlock(&n2->write_lock);
cafe5635 2031 rw_unlock(true, n2);
c37511b8 2032 } else {
ee811287 2033 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
c37511b8 2034
2a285686 2035 mutex_lock(&n1->write_lock);
1b207d80 2036 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
c37511b8 2037 }
cafe5635 2038
17e21a9f 2039 bch_keylist_add(&parent_keys, &n1->key);
b54d6934 2040 bch_btree_node_write(n1, &cl);
2a285686 2041 mutex_unlock(&n1->write_lock);
cafe5635
KO
2042
2043 if (n3) {
d6fd3b11 2044 /* Depth increases, make a new root */
2a285686 2045 mutex_lock(&n3->write_lock);
cafe5635 2046 bkey_copy_key(&n3->key, &MAX_KEY);
17e21a9f 2047 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
b54d6934 2048 bch_btree_node_write(n3, &cl);
2a285686 2049 mutex_unlock(&n3->write_lock);
cafe5635 2050
b54d6934 2051 closure_sync(&cl);
cafe5635
KO
2052 bch_btree_set_root(n3);
2053 rw_unlock(true, n3);
d6fd3b11
KO
2054 } else if (!b->parent) {
2055 /* Root filled up but didn't need to be split */
b54d6934 2056 closure_sync(&cl);
cafe5635
KO
2057 bch_btree_set_root(n1);
2058 } else {
17e21a9f 2059 /* Split a non root node */
b54d6934 2060 closure_sync(&cl);
17e21a9f
KO
2061 make_btree_freeing_key(b, parent_keys.top);
2062 bch_keylist_push(&parent_keys);
2063
17e21a9f
KO
2064 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2065 BUG_ON(!bch_keylist_empty(&parent_keys));
cafe5635
KO
2066 }
2067
05335cff 2068 btree_node_free(b);
cafe5635 2069 rw_unlock(true, n1);
cafe5635 2070
169ef1cf 2071 bch_time_stats_update(&b->c->btree_split_time, start_time);
cafe5635
KO
2072
2073 return 0;
2074err_free2:
5f5837d2 2075 bkey_put(b->c, &n2->key);
e8e1d468 2076 btree_node_free(n2);
cafe5635
KO
2077 rw_unlock(true, n2);
2078err_free1:
5f5837d2 2079 bkey_put(b->c, &n1->key);
e8e1d468 2080 btree_node_free(n1);
cafe5635
KO
2081 rw_unlock(true, n1);
2082err:
0a63b66d 2083 WARN(1, "bcache: btree split failed (level %u)", b->level);
5f5837d2 2084
cafe5635
KO
2085 if (n3 == ERR_PTR(-EAGAIN) ||
2086 n2 == ERR_PTR(-EAGAIN) ||
2087 n1 == ERR_PTR(-EAGAIN))
2088 return -EAGAIN;
2089
cafe5635
KO
2090 return -ENOMEM;
2091}
2092
26c949f8 2093static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
c18536a7 2094 struct keylist *insert_keys,
1b207d80
KO
2095 atomic_t *journal_ref,
2096 struct bkey *replace_key)
cafe5635 2097{
2a285686
KO
2098 struct closure cl;
2099
17e21a9f
KO
2100 BUG_ON(b->level && replace_key);
2101
2a285686
KO
2102 closure_init_stack(&cl);
2103
2104 mutex_lock(&b->write_lock);
2105
2106 if (write_block(b) != btree_bset_last(b) &&
2107 b->keys.last_set_unwritten)
2108 bch_btree_init_next(b); /* just wrote a set */
2109
59158fde 2110 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2a285686
KO
2111 mutex_unlock(&b->write_lock);
2112 goto split;
2113 }
3b3e9e50 2114
2a285686 2115 BUG_ON(write_block(b) != btree_bset_last(b));
cafe5635 2116
2a285686
KO
2117 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2118 if (!b->level)
2119 bch_btree_leaf_dirty(b, journal_ref);
2120 else
2121 bch_btree_node_write(b, &cl);
2122 }
17e21a9f 2123
2a285686
KO
2124 mutex_unlock(&b->write_lock);
2125
2126 /* wait for btree node write if necessary, after unlock */
2127 closure_sync(&cl);
2128
2129 return 0;
2130split:
2131 if (current->bio_list) {
2132 op->lock = b->c->root->level + 1;
2133 return -EAGAIN;
2134 } else if (op->lock <= b->c->root->level) {
2135 op->lock = b->c->root->level + 1;
2136 return -EINTR;
2137 } else {
2138 /* Invalidated all iterators */
2139 int ret = btree_split(b, op, insert_keys, replace_key);
2140
2141 if (bch_keylist_empty(insert_keys))
2142 return 0;
2143 else if (!ret)
2144 return -EINTR;
2145 return ret;
17e21a9f 2146 }
26c949f8 2147}
cafe5635 2148
e7c590eb
KO
2149int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2150 struct bkey *check_key)
2151{
2152 int ret = -EINTR;
2153 uint64_t btree_ptr = b->key.ptr[0];
2154 unsigned long seq = b->seq;
2155 struct keylist insert;
2156 bool upgrade = op->lock == -1;
2157
2158 bch_keylist_init(&insert);
2159
2160 if (upgrade) {
2161 rw_unlock(false, b);
2162 rw_lock(true, b, b->level);
2163
2164 if (b->key.ptr[0] != btree_ptr ||
2165 b->seq != seq + 1)
2166 goto out;
2167 }
2168
2169 SET_KEY_PTRS(check_key, 1);
2170 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2171
2172 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2173
2174 bch_keylist_add(&insert, check_key);
2175
1b207d80 2176 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
e7c590eb
KO
2177
2178 BUG_ON(!ret && !bch_keylist_empty(&insert));
2179out:
2180 if (upgrade)
2181 downgrade_write(&b->lock);
2182 return ret;
2183}
2184
cc7b8819
KO
2185struct btree_insert_op {
2186 struct btree_op op;
2187 struct keylist *keys;
2188 atomic_t *journal_ref;
2189 struct bkey *replace_key;
2190};
cafe5635 2191
08239ca2 2192static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
cc7b8819
KO
2193{
2194 struct btree_insert_op *op = container_of(b_op,
2195 struct btree_insert_op, op);
cafe5635 2196
cc7b8819
KO
2197 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2198 op->journal_ref, op->replace_key);
2199 if (ret && !bch_keylist_empty(op->keys))
2200 return ret;
2201 else
2202 return MAP_DONE;
cafe5635
KO
2203}
2204
cc7b8819
KO
2205int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2206 atomic_t *journal_ref, struct bkey *replace_key)
cafe5635 2207{
cc7b8819 2208 struct btree_insert_op op;
cafe5635 2209 int ret = 0;
cafe5635 2210
cc7b8819 2211 BUG_ON(current->bio_list);
4f3d4014 2212 BUG_ON(bch_keylist_empty(keys));
cafe5635 2213
cc7b8819
KO
2214 bch_btree_op_init(&op.op, 0);
2215 op.keys = keys;
2216 op.journal_ref = journal_ref;
2217 op.replace_key = replace_key;
cafe5635 2218
cc7b8819
KO
2219 while (!ret && !bch_keylist_empty(keys)) {
2220 op.op.lock = 0;
2221 ret = bch_btree_map_leaf_nodes(&op.op, c,
2222 &START_KEY(keys->keys),
2223 btree_insert_fn);
2224 }
cafe5635 2225
cc7b8819
KO
2226 if (ret) {
2227 struct bkey *k;
cafe5635 2228
cc7b8819 2229 pr_err("error %i", ret);
cafe5635 2230
cc7b8819 2231 while ((k = bch_keylist_pop(keys)))
3a3b6a4e 2232 bkey_put(c, k);
cc7b8819
KO
2233 } else if (op.op.insert_collision)
2234 ret = -ESRCH;
6054c6d4 2235
cafe5635
KO
2236 return ret;
2237}
2238
2239void bch_btree_set_root(struct btree *b)
2240{
2241 unsigned i;
e49c7c37
KO
2242 struct closure cl;
2243
2244 closure_init_stack(&cl);
cafe5635 2245
c37511b8
KO
2246 trace_bcache_btree_set_root(b);
2247
cafe5635
KO
2248 BUG_ON(!b->written);
2249
2250 for (i = 0; i < KEY_PTRS(&b->key); i++)
2251 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2252
2253 mutex_lock(&b->c->bucket_lock);
2254 list_del_init(&b->list);
2255 mutex_unlock(&b->c->bucket_lock);
2256
2257 b->c->root = b;
cafe5635 2258
e49c7c37
KO
2259 bch_journal_meta(b->c, &cl);
2260 closure_sync(&cl);
cafe5635
KO
2261}
2262
48dad8ba
KO
2263/* Map across nodes or keys */
2264
2265static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2266 struct bkey *from,
2267 btree_map_nodes_fn *fn, int flags)
2268{
2269 int ret = MAP_CONTINUE;
2270
2271 if (b->level) {
2272 struct bkey *k;
2273 struct btree_iter iter;
2274
c052dd9a 2275 bch_btree_iter_init(&b->keys, &iter, from);
48dad8ba 2276
a85e968e 2277 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
48dad8ba
KO
2278 bch_ptr_bad))) {
2279 ret = btree(map_nodes_recurse, k, b,
2280 op, from, fn, flags);
2281 from = NULL;
2282
2283 if (ret != MAP_CONTINUE)
2284 return ret;
2285 }
2286 }
2287
2288 if (!b->level || flags == MAP_ALL_NODES)
2289 ret = fn(op, b);
2290
2291 return ret;
2292}
2293
2294int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2295 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2296{
b54d6934 2297 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
48dad8ba
KO
2298}
2299
2300static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2301 struct bkey *from, btree_map_keys_fn *fn,
2302 int flags)
2303{
2304 int ret = MAP_CONTINUE;
2305 struct bkey *k;
2306 struct btree_iter iter;
2307
c052dd9a 2308 bch_btree_iter_init(&b->keys, &iter, from);
48dad8ba 2309
a85e968e 2310 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
48dad8ba
KO
2311 ret = !b->level
2312 ? fn(op, b, k)
2313 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2314 from = NULL;
2315
2316 if (ret != MAP_CONTINUE)
2317 return ret;
2318 }
2319
2320 if (!b->level && (flags & MAP_END_KEY))
2321 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2322 KEY_OFFSET(&b->key), 0));
2323
2324 return ret;
2325}
2326
2327int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2328 struct bkey *from, btree_map_keys_fn *fn, int flags)
2329{
b54d6934 2330 return btree_root(map_keys_recurse, c, op, from, fn, flags);
48dad8ba
KO
2331}
2332
cafe5635
KO
2333/* Keybuf code */
2334
2335static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2336{
2337 /* Overlapping keys compare equal */
2338 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2339 return -1;
2340 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2341 return 1;
2342 return 0;
2343}
2344
2345static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2346 struct keybuf_key *r)
2347{
2348 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2349}
2350
48dad8ba
KO
2351struct refill {
2352 struct btree_op op;
48a915a8 2353 unsigned nr_found;
48dad8ba
KO
2354 struct keybuf *buf;
2355 struct bkey *end;
2356 keybuf_pred_fn *pred;
2357};
cafe5635 2358
48dad8ba
KO
2359static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2360 struct bkey *k)
2361{
2362 struct refill *refill = container_of(op, struct refill, op);
2363 struct keybuf *buf = refill->buf;
2364 int ret = MAP_CONTINUE;
cafe5635 2365
48dad8ba
KO
2366 if (bkey_cmp(k, refill->end) >= 0) {
2367 ret = MAP_DONE;
2368 goto out;
2369 }
cafe5635 2370
48dad8ba
KO
2371 if (!KEY_SIZE(k)) /* end key */
2372 goto out;
cafe5635 2373
48dad8ba
KO
2374 if (refill->pred(buf, k)) {
2375 struct keybuf_key *w;
cafe5635 2376
48dad8ba 2377 spin_lock(&buf->lock);
cafe5635 2378
48dad8ba
KO
2379 w = array_alloc(&buf->freelist);
2380 if (!w) {
2381 spin_unlock(&buf->lock);
2382 return MAP_DONE;
2383 }
cafe5635 2384
48dad8ba
KO
2385 w->private = NULL;
2386 bkey_copy(&w->key, k);
cafe5635 2387
48dad8ba
KO
2388 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2389 array_free(&buf->freelist, w);
48a915a8
KO
2390 else
2391 refill->nr_found++;
cafe5635 2392
48dad8ba
KO
2393 if (array_freelist_empty(&buf->freelist))
2394 ret = MAP_DONE;
cafe5635 2395
48dad8ba 2396 spin_unlock(&buf->lock);
cafe5635 2397 }
48dad8ba
KO
2398out:
2399 buf->last_scanned = *k;
2400 return ret;
cafe5635
KO
2401}
2402
2403void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
72c27061 2404 struct bkey *end, keybuf_pred_fn *pred)
cafe5635
KO
2405{
2406 struct bkey start = buf->last_scanned;
48dad8ba 2407 struct refill refill;
cafe5635
KO
2408
2409 cond_resched();
2410
b54d6934 2411 bch_btree_op_init(&refill.op, -1);
48a915a8
KO
2412 refill.nr_found = 0;
2413 refill.buf = buf;
2414 refill.end = end;
2415 refill.pred = pred;
48dad8ba
KO
2416
2417 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2418 refill_keybuf_fn, MAP_END_KEY);
cafe5635 2419
48a915a8
KO
2420 trace_bcache_keyscan(refill.nr_found,
2421 KEY_INODE(&start), KEY_OFFSET(&start),
2422 KEY_INODE(&buf->last_scanned),
2423 KEY_OFFSET(&buf->last_scanned));
cafe5635
KO
2424
2425 spin_lock(&buf->lock);
2426
2427 if (!RB_EMPTY_ROOT(&buf->keys)) {
2428 struct keybuf_key *w;
2429 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2430 buf->start = START_KEY(&w->key);
2431
2432 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2433 buf->end = w->key;
2434 } else {
2435 buf->start = MAX_KEY;
2436 buf->end = MAX_KEY;
2437 }
2438
2439 spin_unlock(&buf->lock);
2440}
2441
2442static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2443{
2444 rb_erase(&w->node, &buf->keys);
2445 array_free(&buf->freelist, w);
2446}
2447
2448void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2449{
2450 spin_lock(&buf->lock);
2451 __bch_keybuf_del(buf, w);
2452 spin_unlock(&buf->lock);
2453}
2454
2455bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2456 struct bkey *end)
2457{
2458 bool ret = false;
2459 struct keybuf_key *p, *w, s;
2460 s.key = *start;
2461
2462 if (bkey_cmp(end, &buf->start) <= 0 ||
2463 bkey_cmp(start, &buf->end) >= 0)
2464 return false;
2465
2466 spin_lock(&buf->lock);
2467 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2468
2469 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2470 p = w;
2471 w = RB_NEXT(w, node);
2472
2473 if (p->private)
2474 ret = true;
2475 else
2476 __bch_keybuf_del(buf, p);
2477 }
2478
2479 spin_unlock(&buf->lock);
2480 return ret;
2481}
2482
2483struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2484{
2485 struct keybuf_key *w;
2486 spin_lock(&buf->lock);
2487
2488 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2489
2490 while (w && w->private)
2491 w = RB_NEXT(w, node);
2492
2493 if (w)
2494 w->private = ERR_PTR(-EINTR);
2495
2496 spin_unlock(&buf->lock);
2497 return w;
2498}
2499
2500struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
48dad8ba
KO
2501 struct keybuf *buf,
2502 struct bkey *end,
2503 keybuf_pred_fn *pred)
cafe5635
KO
2504{
2505 struct keybuf_key *ret;
2506
2507 while (1) {
2508 ret = bch_keybuf_next(buf);
2509 if (ret)
2510 break;
2511
2512 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2513 pr_debug("scan finished");
2514 break;
2515 }
2516
72c27061 2517 bch_refill_keybuf(c, buf, end, pred);
cafe5635
KO
2518 }
2519
2520 return ret;
2521}
2522
72c27061 2523void bch_keybuf_init(struct keybuf *buf)
cafe5635 2524{
cafe5635
KO
2525 buf->last_scanned = MAX_KEY;
2526 buf->keys = RB_ROOT;
2527
2528 spin_lock_init(&buf->lock);
2529 array_allocator_init(&buf->freelist);
2530}
This page took 0.248344 seconds and 5 git commands to generate.