Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> | |
3 | * | |
4 | * Uses a block device as cache for other block devices; optimized for SSDs. | |
5 | * All allocation is done in buckets, which should match the erase block size | |
6 | * of the device. | |
7 | * | |
8 | * Buckets containing cached data are kept on a heap sorted by priority; | |
9 | * bucket priority is increased on cache hit, and periodically all the buckets | |
10 | * on the heap have their priority scaled down. This currently is just used as | |
11 | * an LRU but in the future should allow for more intelligent heuristics. | |
12 | * | |
13 | * Buckets have an 8 bit counter; freeing is accomplished by incrementing the | |
14 | * counter. Garbage collection is used to remove stale pointers. | |
15 | * | |
16 | * Indexing is done via a btree; nodes are not necessarily fully sorted, rather | |
17 | * as keys are inserted we only sort the pages that have not yet been written. | |
18 | * When garbage collection is run, we resort the entire node. | |
19 | * | |
20 | * All configuration is done via sysfs; see Documentation/bcache.txt. | |
21 | */ | |
22 | ||
23 | #include "bcache.h" | |
24 | #include "btree.h" | |
25 | #include "debug.h" | |
26 | #include "request.h" | |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/bitops.h> | |
30 | #include <linux/hash.h> | |
31 | #include <linux/random.h> | |
32 | #include <linux/rcupdate.h> | |
33 | #include <trace/events/bcache.h> | |
34 | ||
35 | /* | |
36 | * Todo: | |
37 | * register_bcache: Return errors out to userspace correctly | |
38 | * | |
39 | * Writeback: don't undirty key until after a cache flush | |
40 | * | |
41 | * Create an iterator for key pointers | |
42 | * | |
43 | * On btree write error, mark bucket such that it won't be freed from the cache | |
44 | * | |
45 | * Journalling: | |
46 | * Check for bad keys in replay | |
47 | * Propagate barriers | |
48 | * Refcount journal entries in journal_replay | |
49 | * | |
50 | * Garbage collection: | |
51 | * Finish incremental gc | |
52 | * Gc should free old UUIDs, data for invalid UUIDs | |
53 | * | |
54 | * Provide a way to list backing device UUIDs we have data cached for, and | |
55 | * probably how long it's been since we've seen them, and a way to invalidate | |
56 | * dirty data for devices that will never be attached again | |
57 | * | |
58 | * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so | |
59 | * that based on that and how much dirty data we have we can keep writeback | |
60 | * from being starved | |
61 | * | |
62 | * Add a tracepoint or somesuch to watch for writeback starvation | |
63 | * | |
64 | * When btree depth > 1 and splitting an interior node, we have to make sure | |
65 | * alloc_bucket() cannot fail. This should be true but is not completely | |
66 | * obvious. | |
67 | * | |
68 | * Make sure all allocations get charged to the root cgroup | |
69 | * | |
70 | * Plugging? | |
71 | * | |
72 | * If data write is less than hard sector size of ssd, round up offset in open | |
73 | * bucket to the next whole sector | |
74 | * | |
75 | * Also lookup by cgroup in get_open_bucket() | |
76 | * | |
77 | * Superblock needs to be fleshed out for multiple cache devices | |
78 | * | |
79 | * Add a sysfs tunable for the number of writeback IOs in flight | |
80 | * | |
81 | * Add a sysfs tunable for the number of open data buckets | |
82 | * | |
83 | * IO tracking: Can we track when one process is doing io on behalf of another? | |
84 | * IO tracking: Don't use just an average, weigh more recent stuff higher | |
85 | * | |
86 | * Test module load/unload | |
87 | */ | |
88 | ||
89 | static const char * const op_types[] = { | |
90 | "insert", "replace" | |
91 | }; | |
92 | ||
93 | static const char *op_type(struct btree_op *op) | |
94 | { | |
95 | return op_types[op->type]; | |
96 | } | |
97 | ||
98 | #define MAX_NEED_GC 64 | |
99 | #define MAX_SAVE_PRIO 72 | |
100 | ||
101 | #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) | |
102 | ||
103 | #define PTR_HASH(c, k) \ | |
104 | (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) | |
105 | ||
106 | struct workqueue_struct *bch_gc_wq; | |
107 | static struct workqueue_struct *btree_io_wq; | |
108 | ||
109 | void bch_btree_op_init_stack(struct btree_op *op) | |
110 | { | |
111 | memset(op, 0, sizeof(struct btree_op)); | |
112 | closure_init_stack(&op->cl); | |
113 | op->lock = -1; | |
114 | bch_keylist_init(&op->keys); | |
115 | } | |
116 | ||
117 | /* Btree key manipulation */ | |
118 | ||
119 | static void bkey_put(struct cache_set *c, struct bkey *k, int level) | |
120 | { | |
121 | if ((level && KEY_OFFSET(k)) || !level) | |
122 | __bkey_put(c, k); | |
123 | } | |
124 | ||
125 | /* Btree IO */ | |
126 | ||
127 | static uint64_t btree_csum_set(struct btree *b, struct bset *i) | |
128 | { | |
129 | uint64_t crc = b->key.ptr[0]; | |
130 | void *data = (void *) i + 8, *end = end(i); | |
131 | ||
132 | crc = crc64_update(crc, data, end - data); | |
133 | return crc ^ 0xffffffffffffffff; | |
134 | } | |
135 | ||
136 | static void btree_bio_endio(struct bio *bio, int error) | |
137 | { | |
138 | struct closure *cl = bio->bi_private; | |
139 | struct btree *b = container_of(cl, struct btree, io.cl); | |
140 | ||
141 | if (error) | |
142 | set_btree_node_io_error(b); | |
143 | ||
144 | bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE) | |
145 | ? "writing btree" : "reading btree"); | |
146 | closure_put(cl); | |
147 | } | |
148 | ||
149 | static void btree_bio_init(struct btree *b) | |
150 | { | |
151 | BUG_ON(b->bio); | |
152 | b->bio = bch_bbio_alloc(b->c); | |
153 | ||
154 | b->bio->bi_end_io = btree_bio_endio; | |
155 | b->bio->bi_private = &b->io.cl; | |
156 | } | |
157 | ||
158 | void bch_btree_read_done(struct closure *cl) | |
159 | { | |
160 | struct btree *b = container_of(cl, struct btree, io.cl); | |
161 | struct bset *i = b->sets[0].data; | |
162 | struct btree_iter *iter = b->c->fill_iter; | |
163 | const char *err = "bad btree header"; | |
164 | BUG_ON(b->nsets || b->written); | |
165 | ||
166 | bch_bbio_free(b->bio, b->c); | |
167 | b->bio = NULL; | |
168 | ||
169 | mutex_lock(&b->c->fill_lock); | |
170 | iter->used = 0; | |
171 | ||
172 | if (btree_node_io_error(b) || | |
173 | !i->seq) | |
174 | goto err; | |
175 | ||
176 | for (; | |
177 | b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq; | |
178 | i = write_block(b)) { | |
179 | err = "unsupported bset version"; | |
180 | if (i->version > BCACHE_BSET_VERSION) | |
181 | goto err; | |
182 | ||
183 | err = "bad btree header"; | |
184 | if (b->written + set_blocks(i, b->c) > btree_blocks(b)) | |
185 | goto err; | |
186 | ||
187 | err = "bad magic"; | |
188 | if (i->magic != bset_magic(b->c)) | |
189 | goto err; | |
190 | ||
191 | err = "bad checksum"; | |
192 | switch (i->version) { | |
193 | case 0: | |
194 | if (i->csum != csum_set(i)) | |
195 | goto err; | |
196 | break; | |
197 | case BCACHE_BSET_VERSION: | |
198 | if (i->csum != btree_csum_set(b, i)) | |
199 | goto err; | |
200 | break; | |
201 | } | |
202 | ||
203 | err = "empty set"; | |
204 | if (i != b->sets[0].data && !i->keys) | |
205 | goto err; | |
206 | ||
207 | bch_btree_iter_push(iter, i->start, end(i)); | |
208 | ||
209 | b->written += set_blocks(i, b->c); | |
210 | } | |
211 | ||
212 | err = "corrupted btree"; | |
213 | for (i = write_block(b); | |
214 | index(i, b) < btree_blocks(b); | |
215 | i = ((void *) i) + block_bytes(b->c)) | |
216 | if (i->seq == b->sets[0].data->seq) | |
217 | goto err; | |
218 | ||
219 | bch_btree_sort_and_fix_extents(b, iter); | |
220 | ||
221 | i = b->sets[0].data; | |
222 | err = "short btree key"; | |
223 | if (b->sets[0].size && | |
224 | bkey_cmp(&b->key, &b->sets[0].end) < 0) | |
225 | goto err; | |
226 | ||
227 | if (b->written < btree_blocks(b)) | |
228 | bch_bset_init_next(b); | |
229 | out: | |
230 | ||
231 | mutex_unlock(&b->c->fill_lock); | |
232 | ||
233 | spin_lock(&b->c->btree_read_time_lock); | |
234 | time_stats_update(&b->c->btree_read_time, b->io_start_time); | |
235 | spin_unlock(&b->c->btree_read_time_lock); | |
236 | ||
237 | smp_wmb(); /* read_done is our write lock */ | |
238 | set_btree_node_read_done(b); | |
239 | ||
240 | closure_return(cl); | |
241 | err: | |
242 | set_btree_node_io_error(b); | |
07e86ccb | 243 | bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", |
cafe5635 KO |
244 | err, PTR_BUCKET_NR(b->c, &b->key, 0), |
245 | index(i, b), i->keys); | |
246 | goto out; | |
247 | } | |
248 | ||
249 | void bch_btree_read(struct btree *b) | |
250 | { | |
251 | BUG_ON(b->nsets || b->written); | |
252 | ||
253 | if (!closure_trylock(&b->io.cl, &b->c->cl)) | |
254 | BUG(); | |
255 | ||
256 | b->io_start_time = local_clock(); | |
257 | ||
258 | btree_bio_init(b); | |
259 | b->bio->bi_rw = REQ_META|READ_SYNC; | |
260 | b->bio->bi_size = KEY_SIZE(&b->key) << 9; | |
261 | ||
262 | bio_map(b->bio, b->sets[0].data); | |
263 | ||
264 | pr_debug("%s", pbtree(b)); | |
265 | trace_bcache_btree_read(b->bio); | |
266 | bch_submit_bbio(b->bio, b->c, &b->key, 0); | |
267 | ||
268 | continue_at(&b->io.cl, bch_btree_read_done, system_wq); | |
269 | } | |
270 | ||
271 | static void btree_complete_write(struct btree *b, struct btree_write *w) | |
272 | { | |
273 | if (w->prio_blocked && | |
274 | !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) | |
275 | wake_up(&b->c->alloc_wait); | |
276 | ||
277 | if (w->journal) { | |
278 | atomic_dec_bug(w->journal); | |
279 | __closure_wake_up(&b->c->journal.wait); | |
280 | } | |
281 | ||
282 | if (w->owner) | |
283 | closure_put(w->owner); | |
284 | ||
285 | w->prio_blocked = 0; | |
286 | w->journal = NULL; | |
287 | w->owner = NULL; | |
288 | } | |
289 | ||
290 | static void __btree_write_done(struct closure *cl) | |
291 | { | |
292 | struct btree *b = container_of(cl, struct btree, io.cl); | |
293 | struct btree_write *w = btree_prev_write(b); | |
294 | ||
295 | bch_bbio_free(b->bio, b->c); | |
296 | b->bio = NULL; | |
297 | btree_complete_write(b, w); | |
298 | ||
299 | if (btree_node_dirty(b)) | |
300 | queue_delayed_work(btree_io_wq, &b->work, | |
301 | msecs_to_jiffies(30000)); | |
302 | ||
303 | closure_return(cl); | |
304 | } | |
305 | ||
306 | static void btree_write_done(struct closure *cl) | |
307 | { | |
308 | struct btree *b = container_of(cl, struct btree, io.cl); | |
309 | struct bio_vec *bv; | |
310 | int n; | |
311 | ||
312 | __bio_for_each_segment(bv, b->bio, n, 0) | |
313 | __free_page(bv->bv_page); | |
314 | ||
315 | __btree_write_done(cl); | |
316 | } | |
317 | ||
318 | static void do_btree_write(struct btree *b) | |
319 | { | |
320 | struct closure *cl = &b->io.cl; | |
321 | struct bset *i = b->sets[b->nsets].data; | |
322 | BKEY_PADDED(key) k; | |
323 | ||
324 | i->version = BCACHE_BSET_VERSION; | |
325 | i->csum = btree_csum_set(b, i); | |
326 | ||
327 | btree_bio_init(b); | |
328 | b->bio->bi_rw = REQ_META|WRITE_SYNC; | |
329 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | |
330 | bio_map(b->bio, i); | |
331 | ||
332 | bkey_copy(&k.key, &b->key); | |
333 | SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); | |
334 | ||
335 | if (!bio_alloc_pages(b->bio, GFP_NOIO)) { | |
336 | int j; | |
337 | struct bio_vec *bv; | |
338 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); | |
339 | ||
340 | bio_for_each_segment(bv, b->bio, j) | |
341 | memcpy(page_address(bv->bv_page), | |
342 | base + j * PAGE_SIZE, PAGE_SIZE); | |
343 | ||
344 | trace_bcache_btree_write(b->bio); | |
345 | bch_submit_bbio(b->bio, b->c, &k.key, 0); | |
346 | ||
347 | continue_at(cl, btree_write_done, NULL); | |
348 | } else { | |
349 | b->bio->bi_vcnt = 0; | |
350 | bio_map(b->bio, i); | |
351 | ||
352 | trace_bcache_btree_write(b->bio); | |
353 | bch_submit_bbio(b->bio, b->c, &k.key, 0); | |
354 | ||
355 | closure_sync(cl); | |
356 | __btree_write_done(cl); | |
357 | } | |
358 | } | |
359 | ||
360 | static void __btree_write(struct btree *b) | |
361 | { | |
362 | struct bset *i = b->sets[b->nsets].data; | |
363 | ||
364 | BUG_ON(current->bio_list); | |
365 | ||
366 | closure_lock(&b->io, &b->c->cl); | |
367 | cancel_delayed_work(&b->work); | |
368 | ||
369 | clear_bit(BTREE_NODE_dirty, &b->flags); | |
370 | change_bit(BTREE_NODE_write_idx, &b->flags); | |
371 | ||
372 | bch_check_key_order(b, i); | |
373 | BUG_ON(b->written && !i->keys); | |
374 | ||
375 | do_btree_write(b); | |
376 | ||
377 | pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys); | |
378 | ||
379 | b->written += set_blocks(i, b->c); | |
380 | atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, | |
381 | &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); | |
382 | ||
383 | bch_btree_sort_lazy(b); | |
384 | ||
385 | if (b->written < btree_blocks(b)) | |
386 | bch_bset_init_next(b); | |
387 | } | |
388 | ||
389 | static void btree_write_work(struct work_struct *w) | |
390 | { | |
391 | struct btree *b = container_of(to_delayed_work(w), struct btree, work); | |
392 | ||
393 | down_write(&b->lock); | |
394 | ||
395 | if (btree_node_dirty(b)) | |
396 | __btree_write(b); | |
397 | up_write(&b->lock); | |
398 | } | |
399 | ||
400 | void bch_btree_write(struct btree *b, bool now, struct btree_op *op) | |
401 | { | |
402 | struct bset *i = b->sets[b->nsets].data; | |
403 | struct btree_write *w = btree_current_write(b); | |
404 | ||
405 | BUG_ON(b->written && | |
406 | (b->written >= btree_blocks(b) || | |
407 | i->seq != b->sets[0].data->seq || | |
408 | !i->keys)); | |
409 | ||
410 | if (!btree_node_dirty(b)) { | |
411 | set_btree_node_dirty(b); | |
412 | queue_delayed_work(btree_io_wq, &b->work, | |
413 | msecs_to_jiffies(30000)); | |
414 | } | |
415 | ||
416 | w->prio_blocked += b->prio_blocked; | |
417 | b->prio_blocked = 0; | |
418 | ||
419 | if (op && op->journal && !b->level) { | |
420 | if (w->journal && | |
421 | journal_pin_cmp(b->c, w, op)) { | |
422 | atomic_dec_bug(w->journal); | |
423 | w->journal = NULL; | |
424 | } | |
425 | ||
426 | if (!w->journal) { | |
427 | w->journal = op->journal; | |
428 | atomic_inc(w->journal); | |
429 | } | |
430 | } | |
431 | ||
432 | if (current->bio_list) | |
433 | return; | |
434 | ||
435 | /* Force write if set is too big */ | |
436 | if (now || | |
437 | b->level || | |
438 | set_bytes(i) > PAGE_SIZE - 48) { | |
439 | if (op && now) { | |
440 | /* Must wait on multiple writes */ | |
441 | BUG_ON(w->owner); | |
442 | w->owner = &op->cl; | |
443 | closure_get(&op->cl); | |
444 | } | |
445 | ||
446 | __btree_write(b); | |
447 | } | |
448 | BUG_ON(!b->written); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Btree in memory cache - allocation/freeing | |
453 | * mca -> memory cache | |
454 | */ | |
455 | ||
456 | static void mca_reinit(struct btree *b) | |
457 | { | |
458 | unsigned i; | |
459 | ||
460 | b->flags = 0; | |
461 | b->written = 0; | |
462 | b->nsets = 0; | |
463 | ||
464 | for (i = 0; i < MAX_BSETS; i++) | |
465 | b->sets[i].size = 0; | |
466 | /* | |
467 | * Second loop starts at 1 because b->sets[0]->data is the memory we | |
468 | * allocated | |
469 | */ | |
470 | for (i = 1; i < MAX_BSETS; i++) | |
471 | b->sets[i].data = NULL; | |
472 | } | |
473 | ||
474 | #define mca_reserve(c) (((c->root && c->root->level) \ | |
475 | ? c->root->level : 1) * 8 + 16) | |
476 | #define mca_can_free(c) \ | |
477 | max_t(int, 0, c->bucket_cache_used - mca_reserve(c)) | |
478 | ||
479 | static void mca_data_free(struct btree *b) | |
480 | { | |
481 | struct bset_tree *t = b->sets; | |
482 | BUG_ON(!closure_is_unlocked(&b->io.cl)); | |
483 | ||
484 | if (bset_prev_bytes(b) < PAGE_SIZE) | |
485 | kfree(t->prev); | |
486 | else | |
487 | free_pages((unsigned long) t->prev, | |
488 | get_order(bset_prev_bytes(b))); | |
489 | ||
490 | if (bset_tree_bytes(b) < PAGE_SIZE) | |
491 | kfree(t->tree); | |
492 | else | |
493 | free_pages((unsigned long) t->tree, | |
494 | get_order(bset_tree_bytes(b))); | |
495 | ||
496 | free_pages((unsigned long) t->data, b->page_order); | |
497 | ||
498 | t->prev = NULL; | |
499 | t->tree = NULL; | |
500 | t->data = NULL; | |
501 | list_move(&b->list, &b->c->btree_cache_freed); | |
502 | b->c->bucket_cache_used--; | |
503 | } | |
504 | ||
505 | static void mca_bucket_free(struct btree *b) | |
506 | { | |
507 | BUG_ON(btree_node_dirty(b)); | |
508 | ||
509 | b->key.ptr[0] = 0; | |
510 | hlist_del_init_rcu(&b->hash); | |
511 | list_move(&b->list, &b->c->btree_cache_freeable); | |
512 | } | |
513 | ||
514 | static unsigned btree_order(struct bkey *k) | |
515 | { | |
516 | return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); | |
517 | } | |
518 | ||
519 | static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) | |
520 | { | |
521 | struct bset_tree *t = b->sets; | |
522 | BUG_ON(t->data); | |
523 | ||
524 | b->page_order = max_t(unsigned, | |
525 | ilog2(b->c->btree_pages), | |
526 | btree_order(k)); | |
527 | ||
528 | t->data = (void *) __get_free_pages(gfp, b->page_order); | |
529 | if (!t->data) | |
530 | goto err; | |
531 | ||
532 | t->tree = bset_tree_bytes(b) < PAGE_SIZE | |
533 | ? kmalloc(bset_tree_bytes(b), gfp) | |
534 | : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); | |
535 | if (!t->tree) | |
536 | goto err; | |
537 | ||
538 | t->prev = bset_prev_bytes(b) < PAGE_SIZE | |
539 | ? kmalloc(bset_prev_bytes(b), gfp) | |
540 | : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); | |
541 | if (!t->prev) | |
542 | goto err; | |
543 | ||
544 | list_move(&b->list, &b->c->btree_cache); | |
545 | b->c->bucket_cache_used++; | |
546 | return; | |
547 | err: | |
548 | mca_data_free(b); | |
549 | } | |
550 | ||
551 | static struct btree *mca_bucket_alloc(struct cache_set *c, | |
552 | struct bkey *k, gfp_t gfp) | |
553 | { | |
554 | struct btree *b = kzalloc(sizeof(struct btree), gfp); | |
555 | if (!b) | |
556 | return NULL; | |
557 | ||
558 | init_rwsem(&b->lock); | |
559 | lockdep_set_novalidate_class(&b->lock); | |
560 | INIT_LIST_HEAD(&b->list); | |
561 | INIT_DELAYED_WORK(&b->work, btree_write_work); | |
562 | b->c = c; | |
563 | closure_init_unlocked(&b->io); | |
564 | ||
565 | mca_data_alloc(b, k, gfp); | |
566 | return b; | |
567 | } | |
568 | ||
569 | static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) | |
570 | { | |
571 | lockdep_assert_held(&b->c->bucket_lock); | |
572 | ||
573 | if (!down_write_trylock(&b->lock)) | |
574 | return -ENOMEM; | |
575 | ||
576 | if (b->page_order < min_order) { | |
577 | rw_unlock(true, b); | |
578 | return -ENOMEM; | |
579 | } | |
580 | ||
581 | BUG_ON(btree_node_dirty(b) && !b->sets[0].data); | |
582 | ||
583 | if (cl && btree_node_dirty(b)) | |
584 | bch_btree_write(b, true, NULL); | |
585 | ||
586 | if (cl) | |
587 | closure_wait_event_async(&b->io.wait, cl, | |
588 | atomic_read(&b->io.cl.remaining) == -1); | |
589 | ||
590 | if (btree_node_dirty(b) || | |
591 | !closure_is_unlocked(&b->io.cl) || | |
592 | work_pending(&b->work.work)) { | |
593 | rw_unlock(true, b); | |
594 | return -EAGAIN; | |
595 | } | |
596 | ||
597 | return 0; | |
598 | } | |
599 | ||
600 | static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) | |
601 | { | |
602 | struct cache_set *c = container_of(shrink, struct cache_set, shrink); | |
603 | struct btree *b, *t; | |
604 | unsigned long i, nr = sc->nr_to_scan; | |
605 | ||
606 | if (c->shrinker_disabled) | |
607 | return 0; | |
608 | ||
609 | if (c->try_harder) | |
610 | return 0; | |
611 | ||
612 | /* | |
613 | * If nr == 0, we're supposed to return the number of items we have | |
614 | * cached. Not allowed to return -1. | |
615 | */ | |
616 | if (!nr) | |
617 | return mca_can_free(c) * c->btree_pages; | |
618 | ||
619 | /* Return -1 if we can't do anything right now */ | |
620 | if (sc->gfp_mask & __GFP_WAIT) | |
621 | mutex_lock(&c->bucket_lock); | |
622 | else if (!mutex_trylock(&c->bucket_lock)) | |
623 | return -1; | |
624 | ||
625 | nr /= c->btree_pages; | |
626 | nr = min_t(unsigned long, nr, mca_can_free(c)); | |
627 | ||
628 | i = 0; | |
629 | list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { | |
630 | if (!nr) | |
631 | break; | |
632 | ||
633 | if (++i > 3 && | |
634 | !mca_reap(b, NULL, 0)) { | |
635 | mca_data_free(b); | |
636 | rw_unlock(true, b); | |
637 | --nr; | |
638 | } | |
639 | } | |
640 | ||
641 | /* | |
642 | * Can happen right when we first start up, before we've read in any | |
643 | * btree nodes | |
644 | */ | |
645 | if (list_empty(&c->btree_cache)) | |
646 | goto out; | |
647 | ||
648 | for (i = 0; nr && i < c->bucket_cache_used; i++) { | |
649 | b = list_first_entry(&c->btree_cache, struct btree, list); | |
650 | list_rotate_left(&c->btree_cache); | |
651 | ||
652 | if (!b->accessed && | |
653 | !mca_reap(b, NULL, 0)) { | |
654 | mca_bucket_free(b); | |
655 | mca_data_free(b); | |
656 | rw_unlock(true, b); | |
657 | --nr; | |
658 | } else | |
659 | b->accessed = 0; | |
660 | } | |
661 | out: | |
662 | nr = mca_can_free(c) * c->btree_pages; | |
663 | mutex_unlock(&c->bucket_lock); | |
664 | return nr; | |
665 | } | |
666 | ||
667 | void bch_btree_cache_free(struct cache_set *c) | |
668 | { | |
669 | struct btree *b; | |
670 | struct closure cl; | |
671 | closure_init_stack(&cl); | |
672 | ||
673 | if (c->shrink.list.next) | |
674 | unregister_shrinker(&c->shrink); | |
675 | ||
676 | mutex_lock(&c->bucket_lock); | |
677 | ||
678 | #ifdef CONFIG_BCACHE_DEBUG | |
679 | if (c->verify_data) | |
680 | list_move(&c->verify_data->list, &c->btree_cache); | |
681 | #endif | |
682 | ||
683 | list_splice(&c->btree_cache_freeable, | |
684 | &c->btree_cache); | |
685 | ||
686 | while (!list_empty(&c->btree_cache)) { | |
687 | b = list_first_entry(&c->btree_cache, struct btree, list); | |
688 | ||
689 | if (btree_node_dirty(b)) | |
690 | btree_complete_write(b, btree_current_write(b)); | |
691 | clear_bit(BTREE_NODE_dirty, &b->flags); | |
692 | ||
693 | mca_data_free(b); | |
694 | } | |
695 | ||
696 | while (!list_empty(&c->btree_cache_freed)) { | |
697 | b = list_first_entry(&c->btree_cache_freed, | |
698 | struct btree, list); | |
699 | list_del(&b->list); | |
700 | cancel_delayed_work_sync(&b->work); | |
701 | kfree(b); | |
702 | } | |
703 | ||
704 | mutex_unlock(&c->bucket_lock); | |
705 | } | |
706 | ||
707 | int bch_btree_cache_alloc(struct cache_set *c) | |
708 | { | |
709 | unsigned i; | |
710 | ||
711 | /* XXX: doesn't check for errors */ | |
712 | ||
713 | closure_init_unlocked(&c->gc); | |
714 | ||
715 | for (i = 0; i < mca_reserve(c); i++) | |
716 | mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); | |
717 | ||
718 | list_splice_init(&c->btree_cache, | |
719 | &c->btree_cache_freeable); | |
720 | ||
721 | #ifdef CONFIG_BCACHE_DEBUG | |
722 | mutex_init(&c->verify_lock); | |
723 | ||
724 | c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); | |
725 | ||
726 | if (c->verify_data && | |
727 | c->verify_data->sets[0].data) | |
728 | list_del_init(&c->verify_data->list); | |
729 | else | |
730 | c->verify_data = NULL; | |
731 | #endif | |
732 | ||
733 | c->shrink.shrink = bch_mca_shrink; | |
734 | c->shrink.seeks = 4; | |
735 | c->shrink.batch = c->btree_pages * 2; | |
736 | register_shrinker(&c->shrink); | |
737 | ||
738 | return 0; | |
739 | } | |
740 | ||
741 | /* Btree in memory cache - hash table */ | |
742 | ||
743 | static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) | |
744 | { | |
745 | return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; | |
746 | } | |
747 | ||
748 | static struct btree *mca_find(struct cache_set *c, struct bkey *k) | |
749 | { | |
750 | struct btree *b; | |
751 | ||
752 | rcu_read_lock(); | |
753 | hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) | |
754 | if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) | |
755 | goto out; | |
756 | b = NULL; | |
757 | out: | |
758 | rcu_read_unlock(); | |
759 | return b; | |
760 | } | |
761 | ||
762 | static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, | |
763 | int level, struct closure *cl) | |
764 | { | |
765 | int ret = -ENOMEM; | |
766 | struct btree *i; | |
767 | ||
768 | if (!cl) | |
769 | return ERR_PTR(-ENOMEM); | |
770 | ||
771 | /* | |
772 | * Trying to free up some memory - i.e. reuse some btree nodes - may | |
773 | * require initiating IO to flush the dirty part of the node. If we're | |
774 | * running under generic_make_request(), that IO will never finish and | |
775 | * we would deadlock. Returning -EAGAIN causes the cache lookup code to | |
776 | * punt to workqueue and retry. | |
777 | */ | |
778 | if (current->bio_list) | |
779 | return ERR_PTR(-EAGAIN); | |
780 | ||
781 | if (c->try_harder && c->try_harder != cl) { | |
782 | closure_wait_event_async(&c->try_wait, cl, !c->try_harder); | |
783 | return ERR_PTR(-EAGAIN); | |
784 | } | |
785 | ||
786 | /* XXX: tracepoint */ | |
787 | c->try_harder = cl; | |
788 | c->try_harder_start = local_clock(); | |
789 | retry: | |
790 | list_for_each_entry_reverse(i, &c->btree_cache, list) { | |
791 | int r = mca_reap(i, cl, btree_order(k)); | |
792 | if (!r) | |
793 | return i; | |
794 | if (r != -ENOMEM) | |
795 | ret = r; | |
796 | } | |
797 | ||
798 | if (ret == -EAGAIN && | |
799 | closure_blocking(cl)) { | |
800 | mutex_unlock(&c->bucket_lock); | |
801 | closure_sync(cl); | |
802 | mutex_lock(&c->bucket_lock); | |
803 | goto retry; | |
804 | } | |
805 | ||
806 | return ERR_PTR(ret); | |
807 | } | |
808 | ||
809 | /* | |
810 | * We can only have one thread cannibalizing other cached btree nodes at a time, | |
811 | * or we'll deadlock. We use an open coded mutex to ensure that, which a | |
812 | * cannibalize_bucket() will take. This means every time we unlock the root of | |
813 | * the btree, we need to release this lock if we have it held. | |
814 | */ | |
815 | void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) | |
816 | { | |
817 | if (c->try_harder == cl) { | |
818 | time_stats_update(&c->try_harder_time, c->try_harder_start); | |
819 | c->try_harder = NULL; | |
820 | __closure_wake_up(&c->try_wait); | |
821 | } | |
822 | } | |
823 | ||
824 | static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, | |
825 | int level, struct closure *cl) | |
826 | { | |
827 | struct btree *b; | |
828 | ||
829 | lockdep_assert_held(&c->bucket_lock); | |
830 | ||
831 | if (mca_find(c, k)) | |
832 | return NULL; | |
833 | ||
834 | /* btree_free() doesn't free memory; it sticks the node on the end of | |
835 | * the list. Check if there's any freed nodes there: | |
836 | */ | |
837 | list_for_each_entry(b, &c->btree_cache_freeable, list) | |
838 | if (!mca_reap(b, NULL, btree_order(k))) | |
839 | goto out; | |
840 | ||
841 | /* We never free struct btree itself, just the memory that holds the on | |
842 | * disk node. Check the freed list before allocating a new one: | |
843 | */ | |
844 | list_for_each_entry(b, &c->btree_cache_freed, list) | |
845 | if (!mca_reap(b, NULL, 0)) { | |
846 | mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); | |
847 | if (!b->sets[0].data) | |
848 | goto err; | |
849 | else | |
850 | goto out; | |
851 | } | |
852 | ||
853 | b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); | |
854 | if (!b) | |
855 | goto err; | |
856 | ||
857 | BUG_ON(!down_write_trylock(&b->lock)); | |
858 | if (!b->sets->data) | |
859 | goto err; | |
860 | out: | |
861 | BUG_ON(!closure_is_unlocked(&b->io.cl)); | |
862 | ||
863 | bkey_copy(&b->key, k); | |
864 | list_move(&b->list, &c->btree_cache); | |
865 | hlist_del_init_rcu(&b->hash); | |
866 | hlist_add_head_rcu(&b->hash, mca_hash(c, k)); | |
867 | ||
868 | lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); | |
869 | b->level = level; | |
870 | ||
871 | mca_reinit(b); | |
872 | ||
873 | return b; | |
874 | err: | |
875 | if (b) | |
876 | rw_unlock(true, b); | |
877 | ||
878 | b = mca_cannibalize(c, k, level, cl); | |
879 | if (!IS_ERR(b)) | |
880 | goto out; | |
881 | ||
882 | return b; | |
883 | } | |
884 | ||
885 | /** | |
886 | * bch_btree_node_get - find a btree node in the cache and lock it, reading it | |
887 | * in from disk if necessary. | |
888 | * | |
889 | * If IO is necessary, it uses the closure embedded in struct btree_op to wait; | |
890 | * if that closure is in non blocking mode, will return -EAGAIN. | |
891 | * | |
892 | * The btree node will have either a read or a write lock held, depending on | |
893 | * level and op->lock. | |
894 | */ | |
895 | struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, | |
896 | int level, struct btree_op *op) | |
897 | { | |
898 | int i = 0; | |
899 | bool write = level <= op->lock; | |
900 | struct btree *b; | |
901 | ||
902 | BUG_ON(level < 0); | |
903 | retry: | |
904 | b = mca_find(c, k); | |
905 | ||
906 | if (!b) { | |
907 | mutex_lock(&c->bucket_lock); | |
908 | b = mca_alloc(c, k, level, &op->cl); | |
909 | mutex_unlock(&c->bucket_lock); | |
910 | ||
911 | if (!b) | |
912 | goto retry; | |
913 | if (IS_ERR(b)) | |
914 | return b; | |
915 | ||
916 | bch_btree_read(b); | |
917 | ||
918 | if (!write) | |
919 | downgrade_write(&b->lock); | |
920 | } else { | |
921 | rw_lock(write, b, level); | |
922 | if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { | |
923 | rw_unlock(write, b); | |
924 | goto retry; | |
925 | } | |
926 | BUG_ON(b->level != level); | |
927 | } | |
928 | ||
929 | b->accessed = 1; | |
930 | ||
931 | for (; i <= b->nsets && b->sets[i].size; i++) { | |
932 | prefetch(b->sets[i].tree); | |
933 | prefetch(b->sets[i].data); | |
934 | } | |
935 | ||
936 | for (; i <= b->nsets; i++) | |
937 | prefetch(b->sets[i].data); | |
938 | ||
939 | if (!closure_wait_event(&b->io.wait, &op->cl, | |
940 | btree_node_read_done(b))) { | |
941 | rw_unlock(write, b); | |
942 | b = ERR_PTR(-EAGAIN); | |
943 | } else if (btree_node_io_error(b)) { | |
944 | rw_unlock(write, b); | |
945 | b = ERR_PTR(-EIO); | |
946 | } else | |
947 | BUG_ON(!b->written); | |
948 | ||
949 | return b; | |
950 | } | |
951 | ||
952 | static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) | |
953 | { | |
954 | struct btree *b; | |
955 | ||
956 | mutex_lock(&c->bucket_lock); | |
957 | b = mca_alloc(c, k, level, NULL); | |
958 | mutex_unlock(&c->bucket_lock); | |
959 | ||
960 | if (!IS_ERR_OR_NULL(b)) { | |
961 | bch_btree_read(b); | |
962 | rw_unlock(true, b); | |
963 | } | |
964 | } | |
965 | ||
966 | /* Btree alloc */ | |
967 | ||
968 | static void btree_node_free(struct btree *b, struct btree_op *op) | |
969 | { | |
970 | unsigned i; | |
971 | ||
972 | /* | |
973 | * The BUG_ON() in btree_node_get() implies that we must have a write | |
974 | * lock on parent to free or even invalidate a node | |
975 | */ | |
976 | BUG_ON(op->lock <= b->level); | |
977 | BUG_ON(b == b->c->root); | |
978 | pr_debug("bucket %s", pbtree(b)); | |
979 | ||
980 | if (btree_node_dirty(b)) | |
981 | btree_complete_write(b, btree_current_write(b)); | |
982 | clear_bit(BTREE_NODE_dirty, &b->flags); | |
983 | ||
984 | if (b->prio_blocked && | |
985 | !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked)) | |
986 | closure_wake_up(&b->c->bucket_wait); | |
987 | ||
988 | b->prio_blocked = 0; | |
989 | ||
990 | cancel_delayed_work(&b->work); | |
991 | ||
992 | mutex_lock(&b->c->bucket_lock); | |
993 | ||
994 | for (i = 0; i < KEY_PTRS(&b->key); i++) { | |
995 | BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin)); | |
996 | ||
997 | bch_inc_gen(PTR_CACHE(b->c, &b->key, i), | |
998 | PTR_BUCKET(b->c, &b->key, i)); | |
999 | } | |
1000 | ||
1001 | bch_bucket_free(b->c, &b->key); | |
1002 | mca_bucket_free(b); | |
1003 | mutex_unlock(&b->c->bucket_lock); | |
1004 | } | |
1005 | ||
1006 | struct btree *bch_btree_node_alloc(struct cache_set *c, int level, | |
1007 | struct closure *cl) | |
1008 | { | |
1009 | BKEY_PADDED(key) k; | |
1010 | struct btree *b = ERR_PTR(-EAGAIN); | |
1011 | ||
1012 | mutex_lock(&c->bucket_lock); | |
1013 | retry: | |
1014 | if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl)) | |
1015 | goto err; | |
1016 | ||
1017 | SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); | |
1018 | ||
1019 | b = mca_alloc(c, &k.key, level, cl); | |
1020 | if (IS_ERR(b)) | |
1021 | goto err_free; | |
1022 | ||
1023 | if (!b) { | |
1024 | cache_bug(c, "Tried to allocate bucket" | |
1025 | " that was in btree cache"); | |
1026 | __bkey_put(c, &k.key); | |
1027 | goto retry; | |
1028 | } | |
1029 | ||
1030 | set_btree_node_read_done(b); | |
1031 | b->accessed = 1; | |
1032 | bch_bset_init_next(b); | |
1033 | ||
1034 | mutex_unlock(&c->bucket_lock); | |
1035 | return b; | |
1036 | err_free: | |
1037 | bch_bucket_free(c, &k.key); | |
1038 | __bkey_put(c, &k.key); | |
1039 | err: | |
1040 | mutex_unlock(&c->bucket_lock); | |
1041 | return b; | |
1042 | } | |
1043 | ||
1044 | static struct btree *btree_node_alloc_replacement(struct btree *b, | |
1045 | struct closure *cl) | |
1046 | { | |
1047 | struct btree *n = bch_btree_node_alloc(b->c, b->level, cl); | |
1048 | if (!IS_ERR_OR_NULL(n)) | |
1049 | bch_btree_sort_into(b, n); | |
1050 | ||
1051 | return n; | |
1052 | } | |
1053 | ||
1054 | /* Garbage collection */ | |
1055 | ||
1056 | uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) | |
1057 | { | |
1058 | uint8_t stale = 0; | |
1059 | unsigned i; | |
1060 | struct bucket *g; | |
1061 | ||
1062 | /* | |
1063 | * ptr_invalid() can't return true for the keys that mark btree nodes as | |
1064 | * freed, but since ptr_bad() returns true we'll never actually use them | |
1065 | * for anything and thus we don't want mark their pointers here | |
1066 | */ | |
1067 | if (!bkey_cmp(k, &ZERO_KEY)) | |
1068 | return stale; | |
1069 | ||
1070 | for (i = 0; i < KEY_PTRS(k); i++) { | |
1071 | if (!ptr_available(c, k, i)) | |
1072 | continue; | |
1073 | ||
1074 | g = PTR_BUCKET(c, k, i); | |
1075 | ||
1076 | if (gen_after(g->gc_gen, PTR_GEN(k, i))) | |
1077 | g->gc_gen = PTR_GEN(k, i); | |
1078 | ||
1079 | if (ptr_stale(c, k, i)) { | |
1080 | stale = max(stale, ptr_stale(c, k, i)); | |
1081 | continue; | |
1082 | } | |
1083 | ||
1084 | cache_bug_on(GC_MARK(g) && | |
1085 | (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), | |
1086 | c, "inconsistent ptrs: mark = %llu, level = %i", | |
1087 | GC_MARK(g), level); | |
1088 | ||
1089 | if (level) | |
1090 | SET_GC_MARK(g, GC_MARK_METADATA); | |
1091 | else if (KEY_DIRTY(k)) | |
1092 | SET_GC_MARK(g, GC_MARK_DIRTY); | |
1093 | ||
1094 | /* guard against overflow */ | |
1095 | SET_GC_SECTORS_USED(g, min_t(unsigned, | |
1096 | GC_SECTORS_USED(g) + KEY_SIZE(k), | |
1097 | (1 << 14) - 1)); | |
1098 | ||
1099 | BUG_ON(!GC_SECTORS_USED(g)); | |
1100 | } | |
1101 | ||
1102 | return stale; | |
1103 | } | |
1104 | ||
1105 | #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) | |
1106 | ||
1107 | static int btree_gc_mark_node(struct btree *b, unsigned *keys, | |
1108 | struct gc_stat *gc) | |
1109 | { | |
1110 | uint8_t stale = 0; | |
1111 | unsigned last_dev = -1; | |
1112 | struct bcache_device *d = NULL; | |
1113 | struct bkey *k; | |
1114 | struct btree_iter iter; | |
1115 | struct bset_tree *t; | |
1116 | ||
1117 | gc->nodes++; | |
1118 | ||
1119 | for_each_key_filter(b, k, &iter, bch_ptr_invalid) { | |
1120 | if (last_dev != KEY_INODE(k)) { | |
1121 | last_dev = KEY_INODE(k); | |
1122 | ||
1123 | d = KEY_INODE(k) < b->c->nr_uuids | |
1124 | ? b->c->devices[last_dev] | |
1125 | : NULL; | |
1126 | } | |
1127 | ||
1128 | stale = max(stale, btree_mark_key(b, k)); | |
1129 | ||
1130 | if (bch_ptr_bad(b, k)) | |
1131 | continue; | |
1132 | ||
1133 | *keys += bkey_u64s(k); | |
1134 | ||
1135 | gc->key_bytes += bkey_u64s(k); | |
1136 | gc->nkeys++; | |
1137 | ||
1138 | gc->data += KEY_SIZE(k); | |
1139 | if (KEY_DIRTY(k)) { | |
1140 | gc->dirty += KEY_SIZE(k); | |
1141 | if (d) | |
1142 | d->sectors_dirty_gc += KEY_SIZE(k); | |
1143 | } | |
1144 | } | |
1145 | ||
1146 | for (t = b->sets; t <= &b->sets[b->nsets]; t++) | |
1147 | btree_bug_on(t->size && | |
1148 | bset_written(b, t) && | |
1149 | bkey_cmp(&b->key, &t->end) < 0, | |
1150 | b, "found short btree key in gc"); | |
1151 | ||
1152 | return stale; | |
1153 | } | |
1154 | ||
1155 | static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, | |
1156 | struct btree_op *op) | |
1157 | { | |
1158 | /* | |
1159 | * We block priorities from being written for the duration of garbage | |
1160 | * collection, so we can't sleep in btree_alloc() -> | |
1161 | * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it | |
1162 | * our closure. | |
1163 | */ | |
1164 | struct btree *n = btree_node_alloc_replacement(b, NULL); | |
1165 | ||
1166 | if (!IS_ERR_OR_NULL(n)) { | |
1167 | swap(b, n); | |
1168 | ||
1169 | memcpy(k->ptr, b->key.ptr, | |
1170 | sizeof(uint64_t) * KEY_PTRS(&b->key)); | |
1171 | ||
1172 | __bkey_put(b->c, &b->key); | |
1173 | atomic_inc(&b->c->prio_blocked); | |
1174 | b->prio_blocked++; | |
1175 | ||
1176 | btree_node_free(n, op); | |
1177 | up_write(&n->lock); | |
1178 | } | |
1179 | ||
1180 | return b; | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * Leaving this at 2 until we've got incremental garbage collection done; it | |
1185 | * could be higher (and has been tested with 4) except that garbage collection | |
1186 | * could take much longer, adversely affecting latency. | |
1187 | */ | |
1188 | #define GC_MERGE_NODES 2U | |
1189 | ||
1190 | struct gc_merge_info { | |
1191 | struct btree *b; | |
1192 | struct bkey *k; | |
1193 | unsigned keys; | |
1194 | }; | |
1195 | ||
1196 | static void btree_gc_coalesce(struct btree *b, struct btree_op *op, | |
1197 | struct gc_stat *gc, struct gc_merge_info *r) | |
1198 | { | |
1199 | unsigned nodes = 0, keys = 0, blocks; | |
1200 | int i; | |
1201 | ||
1202 | while (nodes < GC_MERGE_NODES && r[nodes].b) | |
1203 | keys += r[nodes++].keys; | |
1204 | ||
1205 | blocks = btree_default_blocks(b->c) * 2 / 3; | |
1206 | ||
1207 | if (nodes < 2 || | |
1208 | __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1)) | |
1209 | return; | |
1210 | ||
1211 | for (i = nodes - 1; i >= 0; --i) { | |
1212 | if (r[i].b->written) | |
1213 | r[i].b = btree_gc_alloc(r[i].b, r[i].k, op); | |
1214 | ||
1215 | if (r[i].b->written) | |
1216 | return; | |
1217 | } | |
1218 | ||
1219 | for (i = nodes - 1; i > 0; --i) { | |
1220 | struct bset *n1 = r[i].b->sets->data; | |
1221 | struct bset *n2 = r[i - 1].b->sets->data; | |
1222 | struct bkey *k, *last = NULL; | |
1223 | ||
1224 | keys = 0; | |
1225 | ||
1226 | if (i == 1) { | |
1227 | /* | |
1228 | * Last node we're not getting rid of - we're getting | |
1229 | * rid of the node at r[0]. Have to try and fit all of | |
1230 | * the remaining keys into this node; we can't ensure | |
1231 | * they will always fit due to rounding and variable | |
1232 | * length keys (shouldn't be possible in practice, | |
1233 | * though) | |
1234 | */ | |
1235 | if (__set_blocks(n1, n1->keys + r->keys, | |
1236 | b->c) > btree_blocks(r[i].b)) | |
1237 | return; | |
1238 | ||
1239 | keys = n2->keys; | |
1240 | last = &r->b->key; | |
1241 | } else | |
1242 | for (k = n2->start; | |
1243 | k < end(n2); | |
1244 | k = bkey_next(k)) { | |
1245 | if (__set_blocks(n1, n1->keys + keys + | |
1246 | bkey_u64s(k), b->c) > blocks) | |
1247 | break; | |
1248 | ||
1249 | last = k; | |
1250 | keys += bkey_u64s(k); | |
1251 | } | |
1252 | ||
1253 | BUG_ON(__set_blocks(n1, n1->keys + keys, | |
1254 | b->c) > btree_blocks(r[i].b)); | |
1255 | ||
1256 | if (last) { | |
1257 | bkey_copy_key(&r[i].b->key, last); | |
1258 | bkey_copy_key(r[i].k, last); | |
1259 | } | |
1260 | ||
1261 | memcpy(end(n1), | |
1262 | n2->start, | |
1263 | (void *) node(n2, keys) - (void *) n2->start); | |
1264 | ||
1265 | n1->keys += keys; | |
1266 | ||
1267 | memmove(n2->start, | |
1268 | node(n2, keys), | |
1269 | (void *) end(n2) - (void *) node(n2, keys)); | |
1270 | ||
1271 | n2->keys -= keys; | |
1272 | ||
1273 | r[i].keys = n1->keys; | |
1274 | r[i - 1].keys = n2->keys; | |
1275 | } | |
1276 | ||
1277 | btree_node_free(r->b, op); | |
1278 | up_write(&r->b->lock); | |
1279 | ||
1280 | pr_debug("coalesced %u nodes", nodes); | |
1281 | ||
1282 | gc->nodes--; | |
1283 | nodes--; | |
1284 | ||
1285 | memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes); | |
1286 | memset(&r[nodes], 0, sizeof(struct gc_merge_info)); | |
1287 | } | |
1288 | ||
1289 | static int btree_gc_recurse(struct btree *b, struct btree_op *op, | |
1290 | struct closure *writes, struct gc_stat *gc) | |
1291 | { | |
1292 | void write(struct btree *r) | |
1293 | { | |
1294 | if (!r->written) | |
1295 | bch_btree_write(r, true, op); | |
1296 | else if (btree_node_dirty(r)) { | |
1297 | BUG_ON(btree_current_write(r)->owner); | |
1298 | btree_current_write(r)->owner = writes; | |
1299 | closure_get(writes); | |
1300 | ||
1301 | bch_btree_write(r, true, NULL); | |
1302 | } | |
1303 | ||
1304 | up_write(&r->lock); | |
1305 | } | |
1306 | ||
1307 | int ret = 0, stale; | |
1308 | unsigned i; | |
1309 | struct gc_merge_info r[GC_MERGE_NODES]; | |
1310 | ||
1311 | memset(r, 0, sizeof(r)); | |
1312 | ||
1313 | while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) { | |
1314 | r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op); | |
1315 | ||
1316 | if (IS_ERR(r->b)) { | |
1317 | ret = PTR_ERR(r->b); | |
1318 | break; | |
1319 | } | |
1320 | ||
1321 | r->keys = 0; | |
1322 | stale = btree_gc_mark_node(r->b, &r->keys, gc); | |
1323 | ||
1324 | if (!b->written && | |
1325 | (r->b->level || stale > 10 || | |
1326 | b->c->gc_always_rewrite)) | |
1327 | r->b = btree_gc_alloc(r->b, r->k, op); | |
1328 | ||
1329 | if (r->b->level) | |
1330 | ret = btree_gc_recurse(r->b, op, writes, gc); | |
1331 | ||
1332 | if (ret) { | |
1333 | write(r->b); | |
1334 | break; | |
1335 | } | |
1336 | ||
1337 | bkey_copy_key(&b->c->gc_done, r->k); | |
1338 | ||
1339 | if (!b->written) | |
1340 | btree_gc_coalesce(b, op, gc, r); | |
1341 | ||
1342 | if (r[GC_MERGE_NODES - 1].b) | |
1343 | write(r[GC_MERGE_NODES - 1].b); | |
1344 | ||
1345 | memmove(&r[1], &r[0], | |
1346 | sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1)); | |
1347 | ||
1348 | /* When we've got incremental GC working, we'll want to do | |
1349 | * if (should_resched()) | |
1350 | * return -EAGAIN; | |
1351 | */ | |
1352 | cond_resched(); | |
1353 | #if 0 | |
1354 | if (need_resched()) { | |
1355 | ret = -EAGAIN; | |
1356 | break; | |
1357 | } | |
1358 | #endif | |
1359 | } | |
1360 | ||
1361 | for (i = 1; i < GC_MERGE_NODES && r[i].b; i++) | |
1362 | write(r[i].b); | |
1363 | ||
1364 | /* Might have freed some children, must remove their keys */ | |
1365 | if (!b->written) | |
1366 | bch_btree_sort(b); | |
1367 | ||
1368 | return ret; | |
1369 | } | |
1370 | ||
1371 | static int bch_btree_gc_root(struct btree *b, struct btree_op *op, | |
1372 | struct closure *writes, struct gc_stat *gc) | |
1373 | { | |
1374 | struct btree *n = NULL; | |
1375 | unsigned keys = 0; | |
1376 | int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); | |
1377 | ||
1378 | if (b->level || stale > 10) | |
1379 | n = btree_node_alloc_replacement(b, NULL); | |
1380 | ||
1381 | if (!IS_ERR_OR_NULL(n)) | |
1382 | swap(b, n); | |
1383 | ||
1384 | if (b->level) | |
1385 | ret = btree_gc_recurse(b, op, writes, gc); | |
1386 | ||
1387 | if (!b->written || btree_node_dirty(b)) { | |
1388 | atomic_inc(&b->c->prio_blocked); | |
1389 | b->prio_blocked++; | |
1390 | bch_btree_write(b, true, n ? op : NULL); | |
1391 | } | |
1392 | ||
1393 | if (!IS_ERR_OR_NULL(n)) { | |
1394 | closure_sync(&op->cl); | |
1395 | bch_btree_set_root(b); | |
1396 | btree_node_free(n, op); | |
1397 | rw_unlock(true, b); | |
1398 | } | |
1399 | ||
1400 | return ret; | |
1401 | } | |
1402 | ||
1403 | static void btree_gc_start(struct cache_set *c) | |
1404 | { | |
1405 | struct cache *ca; | |
1406 | struct bucket *b; | |
1407 | struct bcache_device **d; | |
1408 | unsigned i; | |
1409 | ||
1410 | if (!c->gc_mark_valid) | |
1411 | return; | |
1412 | ||
1413 | mutex_lock(&c->bucket_lock); | |
1414 | ||
1415 | c->gc_mark_valid = 0; | |
1416 | c->gc_done = ZERO_KEY; | |
1417 | ||
1418 | for_each_cache(ca, c, i) | |
1419 | for_each_bucket(b, ca) { | |
1420 | b->gc_gen = b->gen; | |
1421 | if (!atomic_read(&b->pin)) | |
1422 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | |
1423 | } | |
1424 | ||
1425 | for (d = c->devices; | |
1426 | d < c->devices + c->nr_uuids; | |
1427 | d++) | |
1428 | if (*d) | |
1429 | (*d)->sectors_dirty_gc = 0; | |
1430 | ||
1431 | mutex_unlock(&c->bucket_lock); | |
1432 | } | |
1433 | ||
1434 | size_t bch_btree_gc_finish(struct cache_set *c) | |
1435 | { | |
1436 | size_t available = 0; | |
1437 | struct bucket *b; | |
1438 | struct cache *ca; | |
1439 | struct bcache_device **d; | |
1440 | unsigned i; | |
1441 | ||
1442 | mutex_lock(&c->bucket_lock); | |
1443 | ||
1444 | set_gc_sectors(c); | |
1445 | c->gc_mark_valid = 1; | |
1446 | c->need_gc = 0; | |
1447 | ||
1448 | if (c->root) | |
1449 | for (i = 0; i < KEY_PTRS(&c->root->key); i++) | |
1450 | SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i), | |
1451 | GC_MARK_METADATA); | |
1452 | ||
1453 | for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) | |
1454 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), | |
1455 | GC_MARK_METADATA); | |
1456 | ||
1457 | for_each_cache(ca, c, i) { | |
1458 | uint64_t *i; | |
1459 | ||
1460 | ca->invalidate_needs_gc = 0; | |
1461 | ||
1462 | for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) | |
1463 | SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); | |
1464 | ||
1465 | for (i = ca->prio_buckets; | |
1466 | i < ca->prio_buckets + prio_buckets(ca) * 2; i++) | |
1467 | SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); | |
1468 | ||
1469 | for_each_bucket(b, ca) { | |
1470 | b->last_gc = b->gc_gen; | |
1471 | c->need_gc = max(c->need_gc, bucket_gc_gen(b)); | |
1472 | ||
1473 | if (!atomic_read(&b->pin) && | |
1474 | GC_MARK(b) == GC_MARK_RECLAIMABLE) { | |
1475 | available++; | |
1476 | if (!GC_SECTORS_USED(b)) | |
1477 | bch_bucket_add_unused(ca, b); | |
1478 | } | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | for (d = c->devices; | |
1483 | d < c->devices + c->nr_uuids; | |
1484 | d++) | |
1485 | if (*d) { | |
1486 | unsigned long last = | |
1487 | atomic_long_read(&((*d)->sectors_dirty)); | |
1488 | long difference = (*d)->sectors_dirty_gc - last; | |
1489 | ||
1490 | pr_debug("sectors dirty off by %li", difference); | |
1491 | ||
1492 | (*d)->sectors_dirty_last += difference; | |
1493 | ||
1494 | atomic_long_set(&((*d)->sectors_dirty), | |
1495 | (*d)->sectors_dirty_gc); | |
1496 | } | |
1497 | ||
1498 | mutex_unlock(&c->bucket_lock); | |
1499 | return available; | |
1500 | } | |
1501 | ||
1502 | static void bch_btree_gc(struct closure *cl) | |
1503 | { | |
1504 | struct cache_set *c = container_of(cl, struct cache_set, gc.cl); | |
1505 | int ret; | |
1506 | unsigned long available; | |
1507 | struct gc_stat stats; | |
1508 | struct closure writes; | |
1509 | struct btree_op op; | |
1510 | ||
1511 | uint64_t start_time = local_clock(); | |
1512 | trace_bcache_gc_start(c->sb.set_uuid); | |
1513 | blktrace_msg_all(c, "Starting gc"); | |
1514 | ||
1515 | memset(&stats, 0, sizeof(struct gc_stat)); | |
1516 | closure_init_stack(&writes); | |
1517 | bch_btree_op_init_stack(&op); | |
1518 | op.lock = SHRT_MAX; | |
1519 | ||
1520 | btree_gc_start(c); | |
1521 | ||
1522 | ret = btree_root(gc_root, c, &op, &writes, &stats); | |
1523 | closure_sync(&op.cl); | |
1524 | closure_sync(&writes); | |
1525 | ||
1526 | if (ret) { | |
1527 | blktrace_msg_all(c, "Stopped gc"); | |
1528 | pr_warn("gc failed!"); | |
1529 | ||
1530 | continue_at(cl, bch_btree_gc, bch_gc_wq); | |
1531 | } | |
1532 | ||
1533 | /* Possibly wait for new UUIDs or whatever to hit disk */ | |
1534 | bch_journal_meta(c, &op.cl); | |
1535 | closure_sync(&op.cl); | |
1536 | ||
1537 | available = bch_btree_gc_finish(c); | |
1538 | ||
1539 | time_stats_update(&c->btree_gc_time, start_time); | |
1540 | ||
1541 | stats.key_bytes *= sizeof(uint64_t); | |
1542 | stats.dirty <<= 9; | |
1543 | stats.data <<= 9; | |
1544 | stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; | |
1545 | memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); | |
1546 | blktrace_msg_all(c, "Finished gc"); | |
1547 | ||
1548 | trace_bcache_gc_end(c->sb.set_uuid); | |
1549 | wake_up(&c->alloc_wait); | |
1550 | closure_wake_up(&c->bucket_wait); | |
1551 | ||
1552 | continue_at(cl, bch_moving_gc, bch_gc_wq); | |
1553 | } | |
1554 | ||
1555 | void bch_queue_gc(struct cache_set *c) | |
1556 | { | |
1557 | closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl); | |
1558 | } | |
1559 | ||
1560 | /* Initial partial gc */ | |
1561 | ||
1562 | static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, | |
1563 | unsigned long **seen) | |
1564 | { | |
1565 | int ret; | |
1566 | unsigned i; | |
1567 | struct bkey *k; | |
1568 | struct bucket *g; | |
1569 | struct btree_iter iter; | |
1570 | ||
1571 | for_each_key_filter(b, k, &iter, bch_ptr_invalid) { | |
1572 | for (i = 0; i < KEY_PTRS(k); i++) { | |
1573 | if (!ptr_available(b->c, k, i)) | |
1574 | continue; | |
1575 | ||
1576 | g = PTR_BUCKET(b->c, k, i); | |
1577 | ||
1578 | if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i), | |
1579 | seen[PTR_DEV(k, i)]) || | |
1580 | !ptr_stale(b->c, k, i)) { | |
1581 | g->gen = PTR_GEN(k, i); | |
1582 | ||
1583 | if (b->level) | |
1584 | g->prio = BTREE_PRIO; | |
1585 | else if (g->prio == BTREE_PRIO) | |
1586 | g->prio = INITIAL_PRIO; | |
1587 | } | |
1588 | } | |
1589 | ||
1590 | btree_mark_key(b, k); | |
1591 | } | |
1592 | ||
1593 | if (b->level) { | |
1594 | k = bch_next_recurse_key(b, &ZERO_KEY); | |
1595 | ||
1596 | while (k) { | |
1597 | struct bkey *p = bch_next_recurse_key(b, k); | |
1598 | if (p) | |
1599 | btree_node_prefetch(b->c, p, b->level - 1); | |
1600 | ||
1601 | ret = btree(check_recurse, k, b, op, seen); | |
1602 | if (ret) | |
1603 | return ret; | |
1604 | ||
1605 | k = p; | |
1606 | } | |
1607 | } | |
1608 | ||
1609 | return 0; | |
1610 | } | |
1611 | ||
1612 | int bch_btree_check(struct cache_set *c, struct btree_op *op) | |
1613 | { | |
1614 | int ret = -ENOMEM; | |
1615 | unsigned i; | |
1616 | unsigned long *seen[MAX_CACHES_PER_SET]; | |
1617 | ||
1618 | memset(seen, 0, sizeof(seen)); | |
1619 | ||
1620 | for (i = 0; c->cache[i]; i++) { | |
1621 | size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); | |
1622 | seen[i] = kmalloc(n, GFP_KERNEL); | |
1623 | if (!seen[i]) | |
1624 | goto err; | |
1625 | ||
1626 | /* Disables the seen array until prio_read() uses it too */ | |
1627 | memset(seen[i], 0xFF, n); | |
1628 | } | |
1629 | ||
1630 | ret = btree_root(check_recurse, c, op, seen); | |
1631 | err: | |
1632 | for (i = 0; i < MAX_CACHES_PER_SET; i++) | |
1633 | kfree(seen[i]); | |
1634 | return ret; | |
1635 | } | |
1636 | ||
1637 | /* Btree insertion */ | |
1638 | ||
1639 | static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert) | |
1640 | { | |
1641 | struct bset *i = b->sets[b->nsets].data; | |
1642 | ||
1643 | memmove((uint64_t *) where + bkey_u64s(insert), | |
1644 | where, | |
1645 | (void *) end(i) - (void *) where); | |
1646 | ||
1647 | i->keys += bkey_u64s(insert); | |
1648 | bkey_copy(where, insert); | |
1649 | bch_bset_fix_lookup_table(b, where); | |
1650 | } | |
1651 | ||
1652 | static bool fix_overlapping_extents(struct btree *b, | |
1653 | struct bkey *insert, | |
1654 | struct btree_iter *iter, | |
1655 | struct btree_op *op) | |
1656 | { | |
1657 | void subtract_dirty(struct bkey *k, int sectors) | |
1658 | { | |
1659 | struct bcache_device *d = b->c->devices[KEY_INODE(k)]; | |
1660 | ||
1661 | if (KEY_DIRTY(k) && d) | |
1662 | atomic_long_sub(sectors, &d->sectors_dirty); | |
1663 | } | |
1664 | ||
1665 | unsigned old_size, sectors_found = 0; | |
1666 | ||
1667 | while (1) { | |
1668 | struct bkey *k = bch_btree_iter_next(iter); | |
1669 | if (!k || | |
1670 | bkey_cmp(&START_KEY(k), insert) >= 0) | |
1671 | break; | |
1672 | ||
1673 | if (bkey_cmp(k, &START_KEY(insert)) <= 0) | |
1674 | continue; | |
1675 | ||
1676 | old_size = KEY_SIZE(k); | |
1677 | ||
1678 | /* | |
1679 | * We might overlap with 0 size extents; we can't skip these | |
1680 | * because if they're in the set we're inserting to we have to | |
1681 | * adjust them so they don't overlap with the key we're | |
1682 | * inserting. But we don't want to check them for BTREE_REPLACE | |
1683 | * operations. | |
1684 | */ | |
1685 | ||
1686 | if (op->type == BTREE_REPLACE && | |
1687 | KEY_SIZE(k)) { | |
1688 | /* | |
1689 | * k might have been split since we inserted/found the | |
1690 | * key we're replacing | |
1691 | */ | |
1692 | unsigned i; | |
1693 | uint64_t offset = KEY_START(k) - | |
1694 | KEY_START(&op->replace); | |
1695 | ||
1696 | /* But it must be a subset of the replace key */ | |
1697 | if (KEY_START(k) < KEY_START(&op->replace) || | |
1698 | KEY_OFFSET(k) > KEY_OFFSET(&op->replace)) | |
1699 | goto check_failed; | |
1700 | ||
1701 | /* We didn't find a key that we were supposed to */ | |
1702 | if (KEY_START(k) > KEY_START(insert) + sectors_found) | |
1703 | goto check_failed; | |
1704 | ||
1705 | if (KEY_PTRS(&op->replace) != KEY_PTRS(k)) | |
1706 | goto check_failed; | |
1707 | ||
1708 | /* skip past gen */ | |
1709 | offset <<= 8; | |
1710 | ||
1711 | BUG_ON(!KEY_PTRS(&op->replace)); | |
1712 | ||
1713 | for (i = 0; i < KEY_PTRS(&op->replace); i++) | |
1714 | if (k->ptr[i] != op->replace.ptr[i] + offset) | |
1715 | goto check_failed; | |
1716 | ||
1717 | sectors_found = KEY_OFFSET(k) - KEY_START(insert); | |
1718 | } | |
1719 | ||
1720 | if (bkey_cmp(insert, k) < 0 && | |
1721 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { | |
1722 | /* | |
1723 | * We overlapped in the middle of an existing key: that | |
1724 | * means we have to split the old key. But we have to do | |
1725 | * slightly different things depending on whether the | |
1726 | * old key has been written out yet. | |
1727 | */ | |
1728 | ||
1729 | struct bkey *top; | |
1730 | ||
1731 | subtract_dirty(k, KEY_SIZE(insert)); | |
1732 | ||
1733 | if (bkey_written(b, k)) { | |
1734 | /* | |
1735 | * We insert a new key to cover the top of the | |
1736 | * old key, and the old key is modified in place | |
1737 | * to represent the bottom split. | |
1738 | * | |
1739 | * It's completely arbitrary whether the new key | |
1740 | * is the top or the bottom, but it has to match | |
1741 | * up with what btree_sort_fixup() does - it | |
1742 | * doesn't check for this kind of overlap, it | |
1743 | * depends on us inserting a new key for the top | |
1744 | * here. | |
1745 | */ | |
1746 | top = bch_bset_search(b, &b->sets[b->nsets], | |
1747 | insert); | |
1748 | shift_keys(b, top, k); | |
1749 | } else { | |
1750 | BKEY_PADDED(key) temp; | |
1751 | bkey_copy(&temp.key, k); | |
1752 | shift_keys(b, k, &temp.key); | |
1753 | top = bkey_next(k); | |
1754 | } | |
1755 | ||
1756 | bch_cut_front(insert, top); | |
1757 | bch_cut_back(&START_KEY(insert), k); | |
1758 | bch_bset_fix_invalidated_key(b, k); | |
1759 | return false; | |
1760 | } | |
1761 | ||
1762 | if (bkey_cmp(insert, k) < 0) { | |
1763 | bch_cut_front(insert, k); | |
1764 | } else { | |
1765 | if (bkey_written(b, k) && | |
1766 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { | |
1767 | /* | |
1768 | * Completely overwrote, so we don't have to | |
1769 | * invalidate the binary search tree | |
1770 | */ | |
1771 | bch_cut_front(k, k); | |
1772 | } else { | |
1773 | __bch_cut_back(&START_KEY(insert), k); | |
1774 | bch_bset_fix_invalidated_key(b, k); | |
1775 | } | |
1776 | } | |
1777 | ||
1778 | subtract_dirty(k, old_size - KEY_SIZE(k)); | |
1779 | } | |
1780 | ||
1781 | check_failed: | |
1782 | if (op->type == BTREE_REPLACE) { | |
1783 | if (!sectors_found) { | |
1784 | op->insert_collision = true; | |
1785 | return true; | |
1786 | } else if (sectors_found < KEY_SIZE(insert)) { | |
1787 | SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - | |
1788 | (KEY_SIZE(insert) - sectors_found)); | |
1789 | SET_KEY_SIZE(insert, sectors_found); | |
1790 | } | |
1791 | } | |
1792 | ||
1793 | return false; | |
1794 | } | |
1795 | ||
1796 | static bool btree_insert_key(struct btree *b, struct btree_op *op, | |
1797 | struct bkey *k) | |
1798 | { | |
1799 | struct bset *i = b->sets[b->nsets].data; | |
1800 | struct bkey *m, *prev; | |
1801 | const char *status = "insert"; | |
1802 | ||
1803 | BUG_ON(bkey_cmp(k, &b->key) > 0); | |
1804 | BUG_ON(b->level && !KEY_PTRS(k)); | |
1805 | BUG_ON(!b->level && !KEY_OFFSET(k)); | |
1806 | ||
1807 | if (!b->level) { | |
1808 | struct btree_iter iter; | |
1809 | struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0); | |
1810 | ||
1811 | /* | |
1812 | * bset_search() returns the first key that is strictly greater | |
1813 | * than the search key - but for back merging, we want to find | |
1814 | * the first key that is greater than or equal to KEY_START(k) - | |
1815 | * unless KEY_START(k) is 0. | |
1816 | */ | |
1817 | if (KEY_OFFSET(&search)) | |
1818 | SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1); | |
1819 | ||
1820 | prev = NULL; | |
1821 | m = bch_btree_iter_init(b, &iter, &search); | |
1822 | ||
1823 | if (fix_overlapping_extents(b, k, &iter, op)) | |
1824 | return false; | |
1825 | ||
1826 | while (m != end(i) && | |
1827 | bkey_cmp(k, &START_KEY(m)) > 0) | |
1828 | prev = m, m = bkey_next(m); | |
1829 | ||
1830 | if (key_merging_disabled(b->c)) | |
1831 | goto insert; | |
1832 | ||
1833 | /* prev is in the tree, if we merge we're done */ | |
1834 | status = "back merging"; | |
1835 | if (prev && | |
1836 | bch_bkey_try_merge(b, prev, k)) | |
1837 | goto merged; | |
1838 | ||
1839 | status = "overwrote front"; | |
1840 | if (m != end(i) && | |
1841 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) | |
1842 | goto copy; | |
1843 | ||
1844 | status = "front merge"; | |
1845 | if (m != end(i) && | |
1846 | bch_bkey_try_merge(b, k, m)) | |
1847 | goto copy; | |
1848 | } else | |
1849 | m = bch_bset_search(b, &b->sets[b->nsets], k); | |
1850 | ||
1851 | insert: shift_keys(b, m, k); | |
1852 | copy: bkey_copy(m, k); | |
1853 | merged: | |
1854 | bch_check_keys(b, "%s for %s at %s: %s", status, | |
1855 | op_type(op), pbtree(b), pkey(k)); | |
1856 | bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status, | |
1857 | op_type(op), pbtree(b), pkey(k)); | |
1858 | ||
1859 | if (b->level && !KEY_OFFSET(k)) | |
1860 | b->prio_blocked++; | |
1861 | ||
1862 | pr_debug("%s for %s at %s: %s", status, | |
1863 | op_type(op), pbtree(b), pkey(k)); | |
1864 | ||
1865 | return true; | |
1866 | } | |
1867 | ||
1868 | bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) | |
1869 | { | |
1870 | bool ret = false; | |
1871 | struct bkey *k; | |
1872 | unsigned oldsize = bch_count_data(b); | |
1873 | ||
1874 | while ((k = bch_keylist_pop(&op->keys))) { | |
1875 | bkey_put(b->c, k, b->level); | |
1876 | ret |= btree_insert_key(b, op, k); | |
1877 | } | |
1878 | ||
1879 | BUG_ON(bch_count_data(b) < oldsize); | |
1880 | return ret; | |
1881 | } | |
1882 | ||
1883 | bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, | |
1884 | struct bio *bio) | |
1885 | { | |
1886 | bool ret = false; | |
1887 | uint64_t btree_ptr = b->key.ptr[0]; | |
1888 | unsigned long seq = b->seq; | |
1889 | BKEY_PADDED(k) tmp; | |
1890 | ||
1891 | rw_unlock(false, b); | |
1892 | rw_lock(true, b, b->level); | |
1893 | ||
1894 | if (b->key.ptr[0] != btree_ptr || | |
1895 | b->seq != seq + 1 || | |
1896 | should_split(b)) | |
1897 | goto out; | |
1898 | ||
1899 | op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio)); | |
1900 | ||
1901 | SET_KEY_PTRS(&op->replace, 1); | |
1902 | get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); | |
1903 | ||
1904 | SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV); | |
1905 | ||
1906 | bkey_copy(&tmp.k, &op->replace); | |
1907 | ||
1908 | BUG_ON(op->type != BTREE_INSERT); | |
1909 | BUG_ON(!btree_insert_key(b, op, &tmp.k)); | |
1910 | bch_btree_write(b, false, NULL); | |
1911 | ret = true; | |
1912 | out: | |
1913 | downgrade_write(&b->lock); | |
1914 | return ret; | |
1915 | } | |
1916 | ||
1917 | static int btree_split(struct btree *b, struct btree_op *op) | |
1918 | { | |
1919 | bool split, root = b == b->c->root; | |
1920 | struct btree *n1, *n2 = NULL, *n3 = NULL; | |
1921 | uint64_t start_time = local_clock(); | |
1922 | ||
1923 | if (b->level) | |
1924 | set_closure_blocking(&op->cl); | |
1925 | ||
1926 | n1 = btree_node_alloc_replacement(b, &op->cl); | |
1927 | if (IS_ERR(n1)) | |
1928 | goto err; | |
1929 | ||
1930 | split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; | |
1931 | ||
1932 | pr_debug("%ssplitting at %s keys %i", split ? "" : "not ", | |
1933 | pbtree(b), n1->sets[0].data->keys); | |
1934 | ||
1935 | if (split) { | |
1936 | unsigned keys = 0; | |
1937 | ||
1938 | n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); | |
1939 | if (IS_ERR(n2)) | |
1940 | goto err_free1; | |
1941 | ||
1942 | if (root) { | |
1943 | n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); | |
1944 | if (IS_ERR(n3)) | |
1945 | goto err_free2; | |
1946 | } | |
1947 | ||
1948 | bch_btree_insert_keys(n1, op); | |
1949 | ||
1950 | /* Has to be a linear search because we don't have an auxiliary | |
1951 | * search tree yet | |
1952 | */ | |
1953 | ||
1954 | while (keys < (n1->sets[0].data->keys * 3) / 5) | |
1955 | keys += bkey_u64s(node(n1->sets[0].data, keys)); | |
1956 | ||
1957 | bkey_copy_key(&n1->key, node(n1->sets[0].data, keys)); | |
1958 | keys += bkey_u64s(node(n1->sets[0].data, keys)); | |
1959 | ||
1960 | n2->sets[0].data->keys = n1->sets[0].data->keys - keys; | |
1961 | n1->sets[0].data->keys = keys; | |
1962 | ||
1963 | memcpy(n2->sets[0].data->start, | |
1964 | end(n1->sets[0].data), | |
1965 | n2->sets[0].data->keys * sizeof(uint64_t)); | |
1966 | ||
1967 | bkey_copy_key(&n2->key, &b->key); | |
1968 | ||
1969 | bch_keylist_add(&op->keys, &n2->key); | |
1970 | bch_btree_write(n2, true, op); | |
1971 | rw_unlock(true, n2); | |
1972 | } else | |
1973 | bch_btree_insert_keys(n1, op); | |
1974 | ||
1975 | bch_keylist_add(&op->keys, &n1->key); | |
1976 | bch_btree_write(n1, true, op); | |
1977 | ||
1978 | if (n3) { | |
1979 | bkey_copy_key(&n3->key, &MAX_KEY); | |
1980 | bch_btree_insert_keys(n3, op); | |
1981 | bch_btree_write(n3, true, op); | |
1982 | ||
1983 | closure_sync(&op->cl); | |
1984 | bch_btree_set_root(n3); | |
1985 | rw_unlock(true, n3); | |
1986 | } else if (root) { | |
1987 | op->keys.top = op->keys.bottom; | |
1988 | closure_sync(&op->cl); | |
1989 | bch_btree_set_root(n1); | |
1990 | } else { | |
1991 | unsigned i; | |
1992 | ||
1993 | bkey_copy(op->keys.top, &b->key); | |
1994 | bkey_copy_key(op->keys.top, &ZERO_KEY); | |
1995 | ||
1996 | for (i = 0; i < KEY_PTRS(&b->key); i++) { | |
1997 | uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1; | |
1998 | ||
1999 | SET_PTR_GEN(op->keys.top, i, g); | |
2000 | } | |
2001 | ||
2002 | bch_keylist_push(&op->keys); | |
2003 | closure_sync(&op->cl); | |
2004 | atomic_inc(&b->c->prio_blocked); | |
2005 | } | |
2006 | ||
2007 | rw_unlock(true, n1); | |
2008 | btree_node_free(b, op); | |
2009 | ||
2010 | time_stats_update(&b->c->btree_split_time, start_time); | |
2011 | ||
2012 | return 0; | |
2013 | err_free2: | |
2014 | __bkey_put(n2->c, &n2->key); | |
2015 | btree_node_free(n2, op); | |
2016 | rw_unlock(true, n2); | |
2017 | err_free1: | |
2018 | __bkey_put(n1->c, &n1->key); | |
2019 | btree_node_free(n1, op); | |
2020 | rw_unlock(true, n1); | |
2021 | err: | |
2022 | if (n3 == ERR_PTR(-EAGAIN) || | |
2023 | n2 == ERR_PTR(-EAGAIN) || | |
2024 | n1 == ERR_PTR(-EAGAIN)) | |
2025 | return -EAGAIN; | |
2026 | ||
2027 | pr_warn("couldn't split"); | |
2028 | return -ENOMEM; | |
2029 | } | |
2030 | ||
2031 | static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, | |
2032 | struct keylist *stack_keys) | |
2033 | { | |
2034 | if (b->level) { | |
2035 | int ret; | |
2036 | struct bkey *insert = op->keys.bottom; | |
2037 | struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert)); | |
2038 | ||
2039 | if (!k) { | |
2040 | btree_bug(b, "no key to recurse on at level %i/%i", | |
2041 | b->level, b->c->root->level); | |
2042 | ||
2043 | op->keys.top = op->keys.bottom; | |
2044 | return -EIO; | |
2045 | } | |
2046 | ||
2047 | if (bkey_cmp(insert, k) > 0) { | |
2048 | unsigned i; | |
2049 | ||
2050 | if (op->type == BTREE_REPLACE) { | |
2051 | __bkey_put(b->c, insert); | |
2052 | op->keys.top = op->keys.bottom; | |
2053 | op->insert_collision = true; | |
2054 | return 0; | |
2055 | } | |
2056 | ||
2057 | for (i = 0; i < KEY_PTRS(insert); i++) | |
2058 | atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin); | |
2059 | ||
2060 | bkey_copy(stack_keys->top, insert); | |
2061 | ||
2062 | bch_cut_back(k, insert); | |
2063 | bch_cut_front(k, stack_keys->top); | |
2064 | ||
2065 | bch_keylist_push(stack_keys); | |
2066 | } | |
2067 | ||
2068 | ret = btree(insert_recurse, k, b, op, stack_keys); | |
2069 | if (ret) | |
2070 | return ret; | |
2071 | } | |
2072 | ||
2073 | if (!bch_keylist_empty(&op->keys)) { | |
2074 | if (should_split(b)) { | |
2075 | if (op->lock <= b->c->root->level) { | |
2076 | BUG_ON(b->level); | |
2077 | op->lock = b->c->root->level + 1; | |
2078 | return -EINTR; | |
2079 | } | |
2080 | return btree_split(b, op); | |
2081 | } | |
2082 | ||
2083 | BUG_ON(write_block(b) != b->sets[b->nsets].data); | |
2084 | ||
2085 | if (bch_btree_insert_keys(b, op)) | |
2086 | bch_btree_write(b, false, op); | |
2087 | } | |
2088 | ||
2089 | return 0; | |
2090 | } | |
2091 | ||
2092 | int bch_btree_insert(struct btree_op *op, struct cache_set *c) | |
2093 | { | |
2094 | int ret = 0; | |
2095 | struct keylist stack_keys; | |
2096 | ||
2097 | /* | |
2098 | * Don't want to block with the btree locked unless we have to, | |
2099 | * otherwise we get deadlocks with try_harder and between split/gc | |
2100 | */ | |
2101 | clear_closure_blocking(&op->cl); | |
2102 | ||
2103 | BUG_ON(bch_keylist_empty(&op->keys)); | |
2104 | bch_keylist_copy(&stack_keys, &op->keys); | |
2105 | bch_keylist_init(&op->keys); | |
2106 | ||
2107 | while (!bch_keylist_empty(&stack_keys) || | |
2108 | !bch_keylist_empty(&op->keys)) { | |
2109 | if (bch_keylist_empty(&op->keys)) { | |
2110 | bch_keylist_add(&op->keys, | |
2111 | bch_keylist_pop(&stack_keys)); | |
2112 | op->lock = 0; | |
2113 | } | |
2114 | ||
2115 | ret = btree_root(insert_recurse, c, op, &stack_keys); | |
2116 | ||
2117 | if (ret == -EAGAIN) { | |
2118 | ret = 0; | |
2119 | closure_sync(&op->cl); | |
2120 | } else if (ret) { | |
2121 | struct bkey *k; | |
2122 | ||
2123 | pr_err("error %i trying to insert key for %s", | |
2124 | ret, op_type(op)); | |
2125 | ||
2126 | while ((k = bch_keylist_pop(&stack_keys) ?: | |
2127 | bch_keylist_pop(&op->keys))) | |
2128 | bkey_put(c, k, 0); | |
2129 | } | |
2130 | } | |
2131 | ||
2132 | bch_keylist_free(&stack_keys); | |
2133 | ||
2134 | if (op->journal) | |
2135 | atomic_dec_bug(op->journal); | |
2136 | op->journal = NULL; | |
2137 | return ret; | |
2138 | } | |
2139 | ||
2140 | void bch_btree_set_root(struct btree *b) | |
2141 | { | |
2142 | unsigned i; | |
2143 | ||
2144 | BUG_ON(!b->written); | |
2145 | ||
2146 | for (i = 0; i < KEY_PTRS(&b->key); i++) | |
2147 | BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); | |
2148 | ||
2149 | mutex_lock(&b->c->bucket_lock); | |
2150 | list_del_init(&b->list); | |
2151 | mutex_unlock(&b->c->bucket_lock); | |
2152 | ||
2153 | b->c->root = b; | |
2154 | __bkey_put(b->c, &b->key); | |
2155 | ||
2156 | bch_journal_meta(b->c, NULL); | |
2157 | pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0)); | |
2158 | } | |
2159 | ||
2160 | /* Cache lookup */ | |
2161 | ||
2162 | static int submit_partial_cache_miss(struct btree *b, struct btree_op *op, | |
2163 | struct bkey *k) | |
2164 | { | |
2165 | struct search *s = container_of(op, struct search, op); | |
2166 | struct bio *bio = &s->bio.bio; | |
2167 | int ret = 0; | |
2168 | ||
2169 | while (!ret && | |
2170 | !op->lookup_done) { | |
2171 | unsigned sectors = INT_MAX; | |
2172 | ||
2173 | if (KEY_INODE(k) == op->inode) { | |
2174 | if (KEY_START(k) <= bio->bi_sector) | |
2175 | break; | |
2176 | ||
2177 | sectors = min_t(uint64_t, sectors, | |
2178 | KEY_START(k) - bio->bi_sector); | |
2179 | } | |
2180 | ||
2181 | ret = s->d->cache_miss(b, s, bio, sectors); | |
2182 | } | |
2183 | ||
2184 | return ret; | |
2185 | } | |
2186 | ||
2187 | /* | |
2188 | * Read from a single key, handling the initial cache miss if the key starts in | |
2189 | * the middle of the bio | |
2190 | */ | |
2191 | static int submit_partial_cache_hit(struct btree *b, struct btree_op *op, | |
2192 | struct bkey *k) | |
2193 | { | |
2194 | struct search *s = container_of(op, struct search, op); | |
2195 | struct bio *bio = &s->bio.bio; | |
2196 | unsigned ptr; | |
2197 | struct bio *n; | |
2198 | ||
2199 | int ret = submit_partial_cache_miss(b, op, k); | |
2200 | if (ret || op->lookup_done) | |
2201 | return ret; | |
2202 | ||
2203 | /* XXX: figure out best pointer - for multiple cache devices */ | |
2204 | ptr = 0; | |
2205 | ||
2206 | PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; | |
2207 | ||
2208 | while (!op->lookup_done && | |
2209 | KEY_INODE(k) == op->inode && | |
2210 | bio->bi_sector < KEY_OFFSET(k)) { | |
2211 | struct bkey *bio_key; | |
2212 | sector_t sector = PTR_OFFSET(k, ptr) + | |
2213 | (bio->bi_sector - KEY_START(k)); | |
2214 | unsigned sectors = min_t(uint64_t, INT_MAX, | |
2215 | KEY_OFFSET(k) - bio->bi_sector); | |
2216 | ||
2217 | n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | |
2218 | if (!n) | |
2219 | return -EAGAIN; | |
2220 | ||
2221 | if (n == bio) | |
2222 | op->lookup_done = true; | |
2223 | ||
2224 | bio_key = &container_of(n, struct bbio, bio)->key; | |
2225 | ||
2226 | /* | |
2227 | * The bucket we're reading from might be reused while our bio | |
2228 | * is in flight, and we could then end up reading the wrong | |
2229 | * data. | |
2230 | * | |
2231 | * We guard against this by checking (in cache_read_endio()) if | |
2232 | * the pointer is stale again; if so, we treat it as an error | |
2233 | * and reread from the backing device (but we don't pass that | |
2234 | * error up anywhere). | |
2235 | */ | |
2236 | ||
2237 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | |
2238 | SET_PTR_OFFSET(bio_key, 0, sector); | |
2239 | ||
2240 | n->bi_end_io = bch_cache_read_endio; | |
2241 | n->bi_private = &s->cl; | |
2242 | ||
2243 | trace_bcache_cache_hit(n); | |
2244 | __bch_submit_bbio(n, b->c); | |
2245 | } | |
2246 | ||
2247 | return 0; | |
2248 | } | |
2249 | ||
2250 | int bch_btree_search_recurse(struct btree *b, struct btree_op *op) | |
2251 | { | |
2252 | struct search *s = container_of(op, struct search, op); | |
2253 | struct bio *bio = &s->bio.bio; | |
2254 | ||
2255 | int ret = 0; | |
2256 | struct bkey *k; | |
2257 | struct btree_iter iter; | |
2258 | bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); | |
2259 | ||
2260 | pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode, | |
2261 | (uint64_t) bio->bi_sector); | |
2262 | ||
2263 | do { | |
2264 | k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); | |
2265 | if (!k) { | |
2266 | /* | |
2267 | * b->key would be exactly what we want, except that | |
2268 | * pointers to btree nodes have nonzero size - we | |
2269 | * wouldn't go far enough | |
2270 | */ | |
2271 | ||
2272 | ret = submit_partial_cache_miss(b, op, | |
2273 | &KEY(KEY_INODE(&b->key), | |
2274 | KEY_OFFSET(&b->key), 0)); | |
2275 | break; | |
2276 | } | |
2277 | ||
2278 | ret = b->level | |
2279 | ? btree(search_recurse, k, b, op) | |
2280 | : submit_partial_cache_hit(b, op, k); | |
2281 | } while (!ret && | |
2282 | !op->lookup_done); | |
2283 | ||
2284 | return ret; | |
2285 | } | |
2286 | ||
2287 | /* Keybuf code */ | |
2288 | ||
2289 | static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) | |
2290 | { | |
2291 | /* Overlapping keys compare equal */ | |
2292 | if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) | |
2293 | return -1; | |
2294 | if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) | |
2295 | return 1; | |
2296 | return 0; | |
2297 | } | |
2298 | ||
2299 | static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, | |
2300 | struct keybuf_key *r) | |
2301 | { | |
2302 | return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); | |
2303 | } | |
2304 | ||
2305 | static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | |
2306 | struct keybuf *buf, struct bkey *end) | |
2307 | { | |
2308 | struct btree_iter iter; | |
2309 | bch_btree_iter_init(b, &iter, &buf->last_scanned); | |
2310 | ||
2311 | while (!array_freelist_empty(&buf->freelist)) { | |
2312 | struct bkey *k = bch_btree_iter_next_filter(&iter, b, | |
2313 | bch_ptr_bad); | |
2314 | ||
2315 | if (!b->level) { | |
2316 | if (!k) { | |
2317 | buf->last_scanned = b->key; | |
2318 | break; | |
2319 | } | |
2320 | ||
2321 | buf->last_scanned = *k; | |
2322 | if (bkey_cmp(&buf->last_scanned, end) >= 0) | |
2323 | break; | |
2324 | ||
2325 | if (buf->key_predicate(buf, k)) { | |
2326 | struct keybuf_key *w; | |
2327 | ||
2328 | pr_debug("%s", pkey(k)); | |
2329 | ||
2330 | spin_lock(&buf->lock); | |
2331 | ||
2332 | w = array_alloc(&buf->freelist); | |
2333 | ||
2334 | w->private = NULL; | |
2335 | bkey_copy(&w->key, k); | |
2336 | ||
2337 | if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) | |
2338 | array_free(&buf->freelist, w); | |
2339 | ||
2340 | spin_unlock(&buf->lock); | |
2341 | } | |
2342 | } else { | |
2343 | if (!k) | |
2344 | break; | |
2345 | ||
2346 | btree(refill_keybuf, k, b, op, buf, end); | |
2347 | /* | |
2348 | * Might get an error here, but can't really do anything | |
2349 | * and it'll get logged elsewhere. Just read what we | |
2350 | * can. | |
2351 | */ | |
2352 | ||
2353 | if (bkey_cmp(&buf->last_scanned, end) >= 0) | |
2354 | break; | |
2355 | ||
2356 | cond_resched(); | |
2357 | } | |
2358 | } | |
2359 | ||
2360 | return 0; | |
2361 | } | |
2362 | ||
2363 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, | |
2364 | struct bkey *end) | |
2365 | { | |
2366 | struct bkey start = buf->last_scanned; | |
2367 | struct btree_op op; | |
2368 | bch_btree_op_init_stack(&op); | |
2369 | ||
2370 | cond_resched(); | |
2371 | ||
2372 | btree_root(refill_keybuf, c, &op, buf, end); | |
2373 | closure_sync(&op.cl); | |
2374 | ||
2375 | pr_debug("found %s keys from %llu:%llu to %llu:%llu", | |
2376 | RB_EMPTY_ROOT(&buf->keys) ? "no" : | |
2377 | array_freelist_empty(&buf->freelist) ? "some" : "a few", | |
2378 | KEY_INODE(&start), KEY_OFFSET(&start), | |
2379 | KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned)); | |
2380 | ||
2381 | spin_lock(&buf->lock); | |
2382 | ||
2383 | if (!RB_EMPTY_ROOT(&buf->keys)) { | |
2384 | struct keybuf_key *w; | |
2385 | w = RB_FIRST(&buf->keys, struct keybuf_key, node); | |
2386 | buf->start = START_KEY(&w->key); | |
2387 | ||
2388 | w = RB_LAST(&buf->keys, struct keybuf_key, node); | |
2389 | buf->end = w->key; | |
2390 | } else { | |
2391 | buf->start = MAX_KEY; | |
2392 | buf->end = MAX_KEY; | |
2393 | } | |
2394 | ||
2395 | spin_unlock(&buf->lock); | |
2396 | } | |
2397 | ||
2398 | static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) | |
2399 | { | |
2400 | rb_erase(&w->node, &buf->keys); | |
2401 | array_free(&buf->freelist, w); | |
2402 | } | |
2403 | ||
2404 | void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) | |
2405 | { | |
2406 | spin_lock(&buf->lock); | |
2407 | __bch_keybuf_del(buf, w); | |
2408 | spin_unlock(&buf->lock); | |
2409 | } | |
2410 | ||
2411 | bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, | |
2412 | struct bkey *end) | |
2413 | { | |
2414 | bool ret = false; | |
2415 | struct keybuf_key *p, *w, s; | |
2416 | s.key = *start; | |
2417 | ||
2418 | if (bkey_cmp(end, &buf->start) <= 0 || | |
2419 | bkey_cmp(start, &buf->end) >= 0) | |
2420 | return false; | |
2421 | ||
2422 | spin_lock(&buf->lock); | |
2423 | w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); | |
2424 | ||
2425 | while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { | |
2426 | p = w; | |
2427 | w = RB_NEXT(w, node); | |
2428 | ||
2429 | if (p->private) | |
2430 | ret = true; | |
2431 | else | |
2432 | __bch_keybuf_del(buf, p); | |
2433 | } | |
2434 | ||
2435 | spin_unlock(&buf->lock); | |
2436 | return ret; | |
2437 | } | |
2438 | ||
2439 | struct keybuf_key *bch_keybuf_next(struct keybuf *buf) | |
2440 | { | |
2441 | struct keybuf_key *w; | |
2442 | spin_lock(&buf->lock); | |
2443 | ||
2444 | w = RB_FIRST(&buf->keys, struct keybuf_key, node); | |
2445 | ||
2446 | while (w && w->private) | |
2447 | w = RB_NEXT(w, node); | |
2448 | ||
2449 | if (w) | |
2450 | w->private = ERR_PTR(-EINTR); | |
2451 | ||
2452 | spin_unlock(&buf->lock); | |
2453 | return w; | |
2454 | } | |
2455 | ||
2456 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, | |
2457 | struct keybuf *buf, | |
2458 | struct bkey *end) | |
2459 | { | |
2460 | struct keybuf_key *ret; | |
2461 | ||
2462 | while (1) { | |
2463 | ret = bch_keybuf_next(buf); | |
2464 | if (ret) | |
2465 | break; | |
2466 | ||
2467 | if (bkey_cmp(&buf->last_scanned, end) >= 0) { | |
2468 | pr_debug("scan finished"); | |
2469 | break; | |
2470 | } | |
2471 | ||
2472 | bch_refill_keybuf(c, buf, end); | |
2473 | } | |
2474 | ||
2475 | return ret; | |
2476 | } | |
2477 | ||
2478 | void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn) | |
2479 | { | |
2480 | buf->key_predicate = fn; | |
2481 | buf->last_scanned = MAX_KEY; | |
2482 | buf->keys = RB_ROOT; | |
2483 | ||
2484 | spin_lock_init(&buf->lock); | |
2485 | array_allocator_init(&buf->freelist); | |
2486 | } | |
2487 | ||
2488 | void bch_btree_exit(void) | |
2489 | { | |
2490 | if (btree_io_wq) | |
2491 | destroy_workqueue(btree_io_wq); | |
2492 | if (bch_gc_wq) | |
2493 | destroy_workqueue(bch_gc_wq); | |
2494 | } | |
2495 | ||
2496 | int __init bch_btree_init(void) | |
2497 | { | |
2498 | if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) || | |
2499 | !(btree_io_wq = create_singlethread_workqueue("bch_btree_io"))) | |
2500 | return -ENOMEM; | |
2501 | ||
2502 | return 0; | |
2503 | } |