bcache: Pull on disk data structures out into a separate header
[deliverable/linux.git] / drivers / md / bcache / bcache.h
1 #ifndef _BCACHE_H
2 #define _BCACHE_H
3
4 /*
5 * SOME HIGH LEVEL CODE DOCUMENTATION:
6 *
7 * Bcache mostly works with cache sets, cache devices, and backing devices.
8 *
9 * Support for multiple cache devices hasn't quite been finished off yet, but
10 * it's about 95% plumbed through. A cache set and its cache devices is sort of
11 * like a md raid array and its component devices. Most of the code doesn't care
12 * about individual cache devices, the main abstraction is the cache set.
13 *
14 * Multiple cache devices is intended to give us the ability to mirror dirty
15 * cached data and metadata, without mirroring clean cached data.
16 *
17 * Backing devices are different, in that they have a lifetime independent of a
18 * cache set. When you register a newly formatted backing device it'll come up
19 * in passthrough mode, and then you can attach and detach a backing device from
20 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
21 * invalidates any cached data for that backing device.
22 *
23 * A cache set can have multiple (many) backing devices attached to it.
24 *
25 * There's also flash only volumes - this is the reason for the distinction
26 * between struct cached_dev and struct bcache_device. A flash only volume
27 * works much like a bcache device that has a backing device, except the
28 * "cached" data is always dirty. The end result is that we get thin
29 * provisioning with very little additional code.
30 *
31 * Flash only volumes work but they're not production ready because the moving
32 * garbage collector needs more work. More on that later.
33 *
34 * BUCKETS/ALLOCATION:
35 *
36 * Bcache is primarily designed for caching, which means that in normal
37 * operation all of our available space will be allocated. Thus, we need an
38 * efficient way of deleting things from the cache so we can write new things to
39 * it.
40 *
41 * To do this, we first divide the cache device up into buckets. A bucket is the
42 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
43 * works efficiently.
44 *
45 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
46 * it. The gens and priorities for all the buckets are stored contiguously and
47 * packed on disk (in a linked list of buckets - aside from the superblock, all
48 * of bcache's metadata is stored in buckets).
49 *
50 * The priority is used to implement an LRU. We reset a bucket's priority when
51 * we allocate it or on cache it, and every so often we decrement the priority
52 * of each bucket. It could be used to implement something more sophisticated,
53 * if anyone ever gets around to it.
54 *
55 * The generation is used for invalidating buckets. Each pointer also has an 8
56 * bit generation embedded in it; for a pointer to be considered valid, its gen
57 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
58 * we have to do is increment its gen (and write its new gen to disk; we batch
59 * this up).
60 *
61 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
62 * contain metadata (including btree nodes).
63 *
64 * THE BTREE:
65 *
66 * Bcache is in large part design around the btree.
67 *
68 * At a high level, the btree is just an index of key -> ptr tuples.
69 *
70 * Keys represent extents, and thus have a size field. Keys also have a variable
71 * number of pointers attached to them (potentially zero, which is handy for
72 * invalidating the cache).
73 *
74 * The key itself is an inode:offset pair. The inode number corresponds to a
75 * backing device or a flash only volume. The offset is the ending offset of the
76 * extent within the inode - not the starting offset; this makes lookups
77 * slightly more convenient.
78 *
79 * Pointers contain the cache device id, the offset on that device, and an 8 bit
80 * generation number. More on the gen later.
81 *
82 * Index lookups are not fully abstracted - cache lookups in particular are
83 * still somewhat mixed in with the btree code, but things are headed in that
84 * direction.
85 *
86 * Updates are fairly well abstracted, though. There are two different ways of
87 * updating the btree; insert and replace.
88 *
89 * BTREE_INSERT will just take a list of keys and insert them into the btree -
90 * overwriting (possibly only partially) any extents they overlap with. This is
91 * used to update the index after a write.
92 *
93 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
94 * overwriting a key that matches another given key. This is used for inserting
95 * data into the cache after a cache miss, and for background writeback, and for
96 * the moving garbage collector.
97 *
98 * There is no "delete" operation; deleting things from the index is
99 * accomplished by either by invalidating pointers (by incrementing a bucket's
100 * gen) or by inserting a key with 0 pointers - which will overwrite anything
101 * previously present at that location in the index.
102 *
103 * This means that there are always stale/invalid keys in the btree. They're
104 * filtered out by the code that iterates through a btree node, and removed when
105 * a btree node is rewritten.
106 *
107 * BTREE NODES:
108 *
109 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
110 * free smaller than a bucket - so, that's how big our btree nodes are.
111 *
112 * (If buckets are really big we'll only use part of the bucket for a btree node
113 * - no less than 1/4th - but a bucket still contains no more than a single
114 * btree node. I'd actually like to change this, but for now we rely on the
115 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
116 *
117 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
118 * btree implementation.
119 *
120 * The way this is solved is that btree nodes are internally log structured; we
121 * can append new keys to an existing btree node without rewriting it. This
122 * means each set of keys we write is sorted, but the node is not.
123 *
124 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
125 * be expensive, and we have to distinguish between the keys we have written and
126 * the keys we haven't. So to do a lookup in a btree node, we have to search
127 * each sorted set. But we do merge written sets together lazily, so the cost of
128 * these extra searches is quite low (normally most of the keys in a btree node
129 * will be in one big set, and then there'll be one or two sets that are much
130 * smaller).
131 *
132 * This log structure makes bcache's btree more of a hybrid between a
133 * conventional btree and a compacting data structure, with some of the
134 * advantages of both.
135 *
136 * GARBAGE COLLECTION:
137 *
138 * We can't just invalidate any bucket - it might contain dirty data or
139 * metadata. If it once contained dirty data, other writes might overwrite it
140 * later, leaving no valid pointers into that bucket in the index.
141 *
142 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
143 * It also counts how much valid data it each bucket currently contains, so that
144 * allocation can reuse buckets sooner when they've been mostly overwritten.
145 *
146 * It also does some things that are really internal to the btree
147 * implementation. If a btree node contains pointers that are stale by more than
148 * some threshold, it rewrites the btree node to avoid the bucket's generation
149 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
150 *
151 * THE JOURNAL:
152 *
153 * Bcache's journal is not necessary for consistency; we always strictly
154 * order metadata writes so that the btree and everything else is consistent on
155 * disk in the event of an unclean shutdown, and in fact bcache had writeback
156 * caching (with recovery from unclean shutdown) before journalling was
157 * implemented.
158 *
159 * Rather, the journal is purely a performance optimization; we can't complete a
160 * write until we've updated the index on disk, otherwise the cache would be
161 * inconsistent in the event of an unclean shutdown. This means that without the
162 * journal, on random write workloads we constantly have to update all the leaf
163 * nodes in the btree, and those writes will be mostly empty (appending at most
164 * a few keys each) - highly inefficient in terms of amount of metadata writes,
165 * and it puts more strain on the various btree resorting/compacting code.
166 *
167 * The journal is just a log of keys we've inserted; on startup we just reinsert
168 * all the keys in the open journal entries. That means that when we're updating
169 * a node in the btree, we can wait until a 4k block of keys fills up before
170 * writing them out.
171 *
172 * For simplicity, we only journal updates to leaf nodes; updates to parent
173 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
174 * the complexity to deal with journalling them (in particular, journal replay)
175 * - updates to non leaf nodes just happen synchronously (see btree_split()).
176 */
177
178 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179
180 #include <linux/bcache.h>
181 #include <linux/bio.h>
182 #include <linux/kobject.h>
183 #include <linux/list.h>
184 #include <linux/mutex.h>
185 #include <linux/rbtree.h>
186 #include <linux/rwsem.h>
187 #include <linux/types.h>
188 #include <linux/workqueue.h>
189
190 #include "util.h"
191 #include "closure.h"
192
193 struct bucket {
194 atomic_t pin;
195 uint16_t prio;
196 uint8_t gen;
197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen;
200 uint16_t gc_mark;
201 };
202
203 /*
204 * I'd use bitfields for these, but I don't trust the compiler not to screw me
205 * as multiple threads touch struct bucket without locking
206 */
207
208 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209 #define GC_MARK_RECLAIMABLE 0
210 #define GC_MARK_DIRTY 1
211 #define GC_MARK_METADATA 2
212 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
213
214 #include "journal.h"
215 #include "stats.h"
216 struct search;
217 struct btree;
218 struct keybuf;
219
220 struct keybuf_key {
221 struct rb_node node;
222 BKEY_PADDED(key);
223 void *private;
224 };
225
226 struct keybuf {
227 struct bkey last_scanned;
228 spinlock_t lock;
229
230 /*
231 * Beginning and end of range in rb tree - so that we can skip taking
232 * lock and checking the rb tree when we need to check for overlapping
233 * keys.
234 */
235 struct bkey start;
236 struct bkey end;
237
238 struct rb_root keys;
239
240 #define KEYBUF_NR 100
241 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
242 };
243
244 struct bio_split_pool {
245 struct bio_set *bio_split;
246 mempool_t *bio_split_hook;
247 };
248
249 struct bio_split_hook {
250 struct closure cl;
251 struct bio_split_pool *p;
252 struct bio *bio;
253 bio_end_io_t *bi_end_io;
254 void *bi_private;
255 };
256
257 struct bcache_device {
258 struct closure cl;
259
260 struct kobject kobj;
261
262 struct cache_set *c;
263 unsigned id;
264 #define BCACHEDEVNAME_SIZE 12
265 char name[BCACHEDEVNAME_SIZE];
266
267 struct gendisk *disk;
268
269 /* If nonzero, we're closing */
270 atomic_t closing;
271
272 /* If nonzero, we're detaching/unregistering from cache set */
273 atomic_t detaching;
274 int flush_done;
275
276 uint64_t nr_stripes;
277 unsigned stripe_size;
278 atomic_t *stripe_sectors_dirty;
279
280 unsigned long sectors_dirty_last;
281 long sectors_dirty_derivative;
282
283 mempool_t *unaligned_bvec;
284 struct bio_set *bio_split;
285
286 unsigned data_csum:1;
287
288 int (*cache_miss)(struct btree *, struct search *,
289 struct bio *, unsigned);
290 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
291
292 struct bio_split_pool bio_split_hook;
293 };
294
295 struct io {
296 /* Used to track sequential IO so it can be skipped */
297 struct hlist_node hash;
298 struct list_head lru;
299
300 unsigned long jiffies;
301 unsigned sequential;
302 sector_t last;
303 };
304
305 struct cached_dev {
306 struct list_head list;
307 struct bcache_device disk;
308 struct block_device *bdev;
309
310 struct cache_sb sb;
311 struct bio sb_bio;
312 struct bio_vec sb_bv[1];
313 struct closure_with_waitlist sb_write;
314
315 /* Refcount on the cache set. Always nonzero when we're caching. */
316 atomic_t count;
317 struct work_struct detach;
318
319 /*
320 * Device might not be running if it's dirty and the cache set hasn't
321 * showed up yet.
322 */
323 atomic_t running;
324
325 /*
326 * Writes take a shared lock from start to finish; scanning for dirty
327 * data to refill the rb tree requires an exclusive lock.
328 */
329 struct rw_semaphore writeback_lock;
330
331 /*
332 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
333 * data in the cache. Protected by writeback_lock; must have an
334 * shared lock to set and exclusive lock to clear.
335 */
336 atomic_t has_dirty;
337
338 struct bch_ratelimit writeback_rate;
339 struct delayed_work writeback_rate_update;
340
341 /*
342 * Internal to the writeback code, so read_dirty() can keep track of
343 * where it's at.
344 */
345 sector_t last_read;
346
347 /* Limit number of writeback bios in flight */
348 struct semaphore in_flight;
349 struct task_struct *writeback_thread;
350
351 struct keybuf writeback_keys;
352
353 /* For tracking sequential IO */
354 #define RECENT_IO_BITS 7
355 #define RECENT_IO (1 << RECENT_IO_BITS)
356 struct io io[RECENT_IO];
357 struct hlist_head io_hash[RECENT_IO + 1];
358 struct list_head io_lru;
359 spinlock_t io_lock;
360
361 struct cache_accounting accounting;
362
363 /* The rest of this all shows up in sysfs */
364 unsigned sequential_cutoff;
365 unsigned readahead;
366
367 unsigned sequential_merge:1;
368 unsigned verify:1;
369
370 unsigned partial_stripes_expensive:1;
371 unsigned writeback_metadata:1;
372 unsigned writeback_running:1;
373 unsigned char writeback_percent;
374 unsigned writeback_delay;
375
376 int writeback_rate_change;
377 int64_t writeback_rate_derivative;
378 uint64_t writeback_rate_target;
379
380 unsigned writeback_rate_update_seconds;
381 unsigned writeback_rate_d_term;
382 unsigned writeback_rate_p_term_inverse;
383 unsigned writeback_rate_d_smooth;
384 };
385
386 enum alloc_watermarks {
387 WATERMARK_PRIO,
388 WATERMARK_METADATA,
389 WATERMARK_MOVINGGC,
390 WATERMARK_NONE,
391 WATERMARK_MAX
392 };
393
394 struct cache {
395 struct cache_set *set;
396 struct cache_sb sb;
397 struct bio sb_bio;
398 struct bio_vec sb_bv[1];
399
400 struct kobject kobj;
401 struct block_device *bdev;
402
403 unsigned watermark[WATERMARK_MAX];
404
405 struct task_struct *alloc_thread;
406
407 struct closure prio;
408 struct prio_set *disk_buckets;
409
410 /*
411 * When allocating new buckets, prio_write() gets first dibs - since we
412 * may not be allocate at all without writing priorities and gens.
413 * prio_buckets[] contains the last buckets we wrote priorities to (so
414 * gc can mark them as metadata), prio_next[] contains the buckets
415 * allocated for the next prio write.
416 */
417 uint64_t *prio_buckets;
418 uint64_t *prio_last_buckets;
419
420 /*
421 * free: Buckets that are ready to be used
422 *
423 * free_inc: Incoming buckets - these are buckets that currently have
424 * cached data in them, and we can't reuse them until after we write
425 * their new gen to disk. After prio_write() finishes writing the new
426 * gens/prios, they'll be moved to the free list (and possibly discarded
427 * in the process)
428 *
429 * unused: GC found nothing pointing into these buckets (possibly
430 * because all the data they contained was overwritten), so we only
431 * need to discard them before they can be moved to the free list.
432 */
433 DECLARE_FIFO(long, free);
434 DECLARE_FIFO(long, free_inc);
435 DECLARE_FIFO(long, unused);
436
437 size_t fifo_last_bucket;
438
439 /* Allocation stuff: */
440 struct bucket *buckets;
441
442 DECLARE_HEAP(struct bucket *, heap);
443
444 /*
445 * max(gen - disk_gen) for all buckets. When it gets too big we have to
446 * call prio_write() to keep gens from wrapping.
447 */
448 uint8_t need_save_prio;
449 unsigned gc_move_threshold;
450
451 /*
452 * If nonzero, we know we aren't going to find any buckets to invalidate
453 * until a gc finishes - otherwise we could pointlessly burn a ton of
454 * cpu
455 */
456 unsigned invalidate_needs_gc:1;
457
458 bool discard; /* Get rid of? */
459
460 struct journal_device journal;
461
462 /* The rest of this all shows up in sysfs */
463 #define IO_ERROR_SHIFT 20
464 atomic_t io_errors;
465 atomic_t io_count;
466
467 atomic_long_t meta_sectors_written;
468 atomic_long_t btree_sectors_written;
469 atomic_long_t sectors_written;
470
471 struct bio_split_pool bio_split_hook;
472 };
473
474 struct gc_stat {
475 size_t nodes;
476 size_t key_bytes;
477
478 size_t nkeys;
479 uint64_t data; /* sectors */
480 uint64_t dirty; /* sectors */
481 unsigned in_use; /* percent */
482 };
483
484 /*
485 * Flag bits, for how the cache set is shutting down, and what phase it's at:
486 *
487 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
488 * all the backing devices first (their cached data gets invalidated, and they
489 * won't automatically reattach).
490 *
491 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
492 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
493 * flushing dirty data).
494 */
495 #define CACHE_SET_UNREGISTERING 0
496 #define CACHE_SET_STOPPING 1
497
498 struct cache_set {
499 struct closure cl;
500
501 struct list_head list;
502 struct kobject kobj;
503 struct kobject internal;
504 struct dentry *debug;
505 struct cache_accounting accounting;
506
507 unsigned long flags;
508
509 struct cache_sb sb;
510
511 struct cache *cache[MAX_CACHES_PER_SET];
512 struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
513 int caches_loaded;
514
515 struct bcache_device **devices;
516 struct list_head cached_devs;
517 uint64_t cached_dev_sectors;
518 struct closure caching;
519
520 struct closure_with_waitlist sb_write;
521
522 mempool_t *search;
523 mempool_t *bio_meta;
524 struct bio_set *bio_split;
525
526 /* For the btree cache */
527 struct shrinker shrink;
528
529 /* For the btree cache and anything allocation related */
530 struct mutex bucket_lock;
531
532 /* log2(bucket_size), in sectors */
533 unsigned short bucket_bits;
534
535 /* log2(block_size), in sectors */
536 unsigned short block_bits;
537
538 /*
539 * Default number of pages for a new btree node - may be less than a
540 * full bucket
541 */
542 unsigned btree_pages;
543
544 /*
545 * Lists of struct btrees; lru is the list for structs that have memory
546 * allocated for actual btree node, freed is for structs that do not.
547 *
548 * We never free a struct btree, except on shutdown - we just put it on
549 * the btree_cache_freed list and reuse it later. This simplifies the
550 * code, and it doesn't cost us much memory as the memory usage is
551 * dominated by buffers that hold the actual btree node data and those
552 * can be freed - and the number of struct btrees allocated is
553 * effectively bounded.
554 *
555 * btree_cache_freeable effectively is a small cache - we use it because
556 * high order page allocations can be rather expensive, and it's quite
557 * common to delete and allocate btree nodes in quick succession. It
558 * should never grow past ~2-3 nodes in practice.
559 */
560 struct list_head btree_cache;
561 struct list_head btree_cache_freeable;
562 struct list_head btree_cache_freed;
563
564 /* Number of elements in btree_cache + btree_cache_freeable lists */
565 unsigned bucket_cache_used;
566
567 /*
568 * If we need to allocate memory for a new btree node and that
569 * allocation fails, we can cannibalize another node in the btree cache
570 * to satisfy the allocation. However, only one thread can be doing this
571 * at a time, for obvious reasons - try_harder and try_wait are
572 * basically a lock for this that we can wait on asynchronously. The
573 * btree_root() macro releases the lock when it returns.
574 */
575 struct task_struct *try_harder;
576 wait_queue_head_t try_wait;
577 uint64_t try_harder_start;
578
579 /*
580 * When we free a btree node, we increment the gen of the bucket the
581 * node is in - but we can't rewrite the prios and gens until we
582 * finished whatever it is we were doing, otherwise after a crash the
583 * btree node would be freed but for say a split, we might not have the
584 * pointers to the new nodes inserted into the btree yet.
585 *
586 * This is a refcount that blocks prio_write() until the new keys are
587 * written.
588 */
589 atomic_t prio_blocked;
590 wait_queue_head_t bucket_wait;
591
592 /*
593 * For any bio we don't skip we subtract the number of sectors from
594 * rescale; when it hits 0 we rescale all the bucket priorities.
595 */
596 atomic_t rescale;
597 /*
598 * When we invalidate buckets, we use both the priority and the amount
599 * of good data to determine which buckets to reuse first - to weight
600 * those together consistently we keep track of the smallest nonzero
601 * priority of any bucket.
602 */
603 uint16_t min_prio;
604
605 /*
606 * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
607 * to keep gens from wrapping around.
608 */
609 uint8_t need_gc;
610 struct gc_stat gc_stats;
611 size_t nbuckets;
612
613 struct task_struct *gc_thread;
614 /* Where in the btree gc currently is */
615 struct bkey gc_done;
616
617 /*
618 * The allocation code needs gc_mark in struct bucket to be correct, but
619 * it's not while a gc is in progress. Protected by bucket_lock.
620 */
621 int gc_mark_valid;
622
623 /* Counts how many sectors bio_insert has added to the cache */
624 atomic_t sectors_to_gc;
625
626 wait_queue_head_t moving_gc_wait;
627 struct keybuf moving_gc_keys;
628 /* Number of moving GC bios in flight */
629 struct semaphore moving_in_flight;
630
631 struct btree *root;
632
633 #ifdef CONFIG_BCACHE_DEBUG
634 struct btree *verify_data;
635 struct mutex verify_lock;
636 #endif
637
638 unsigned nr_uuids;
639 struct uuid_entry *uuids;
640 BKEY_PADDED(uuid_bucket);
641 struct closure_with_waitlist uuid_write;
642
643 /*
644 * A btree node on disk could have too many bsets for an iterator to fit
645 * on the stack - have to dynamically allocate them
646 */
647 mempool_t *fill_iter;
648
649 /*
650 * btree_sort() is a merge sort and requires temporary space - single
651 * element mempool
652 */
653 struct mutex sort_lock;
654 struct bset *sort;
655 unsigned sort_crit_factor;
656
657 /* List of buckets we're currently writing data to */
658 struct list_head data_buckets;
659 spinlock_t data_bucket_lock;
660
661 struct journal journal;
662
663 #define CONGESTED_MAX 1024
664 unsigned congested_last_us;
665 atomic_t congested;
666
667 /* The rest of this all shows up in sysfs */
668 unsigned congested_read_threshold_us;
669 unsigned congested_write_threshold_us;
670
671 spinlock_t sort_time_lock;
672 struct time_stats sort_time;
673 struct time_stats btree_gc_time;
674 struct time_stats btree_split_time;
675 spinlock_t btree_read_time_lock;
676 struct time_stats btree_read_time;
677 struct time_stats try_harder_time;
678
679 atomic_long_t cache_read_races;
680 atomic_long_t writeback_keys_done;
681 atomic_long_t writeback_keys_failed;
682
683 enum {
684 ON_ERROR_UNREGISTER,
685 ON_ERROR_PANIC,
686 } on_error;
687 unsigned error_limit;
688 unsigned error_decay;
689
690 unsigned short journal_delay_ms;
691 unsigned verify:1;
692 unsigned key_merging_disabled:1;
693 unsigned gc_always_rewrite:1;
694 unsigned shrinker_disabled:1;
695 unsigned copy_gc_enabled:1;
696
697 #define BUCKET_HASH_BITS 12
698 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
699 };
700
701 static inline bool key_merging_disabled(struct cache_set *c)
702 {
703 #ifdef CONFIG_BCACHE_DEBUG
704 return c->key_merging_disabled;
705 #else
706 return 0;
707 #endif
708 }
709
710 struct bbio {
711 unsigned submit_time_us;
712 union {
713 struct bkey key;
714 uint64_t _pad[3];
715 /*
716 * We only need pad = 3 here because we only ever carry around a
717 * single pointer - i.e. the pointer we're doing io to/from.
718 */
719 };
720 struct bio bio;
721 };
722
723 static inline unsigned local_clock_us(void)
724 {
725 return local_clock() >> 10;
726 }
727
728 #define BTREE_PRIO USHRT_MAX
729 #define INITIAL_PRIO 32768
730
731 #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
732 #define btree_blocks(b) \
733 ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
734
735 #define btree_default_blocks(c) \
736 ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
737
738 #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
739 #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
740 #define block_bytes(c) ((c)->sb.block_size << 9)
741
742 #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
743 #define set_bytes(i) __set_bytes(i, i->keys)
744
745 #define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
746 #define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
747
748 #define node(i, j) ((struct bkey *) ((i)->d + (j)))
749 #define end(i) node(i, (i)->keys)
750
751 #define index(i, b) \
752 ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
753 block_bytes(b->c)))
754
755 #define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
756
757 #define prios_per_bucket(c) \
758 ((bucket_bytes(c) - sizeof(struct prio_set)) / \
759 sizeof(struct bucket_disk))
760 #define prio_buckets(c) \
761 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
762
763 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
764 {
765 return s >> c->bucket_bits;
766 }
767
768 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
769 {
770 return ((sector_t) b) << c->bucket_bits;
771 }
772
773 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
774 {
775 return s & (c->sb.bucket_size - 1);
776 }
777
778 static inline struct cache *PTR_CACHE(struct cache_set *c,
779 const struct bkey *k,
780 unsigned ptr)
781 {
782 return c->cache[PTR_DEV(k, ptr)];
783 }
784
785 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
786 const struct bkey *k,
787 unsigned ptr)
788 {
789 return sector_to_bucket(c, PTR_OFFSET(k, ptr));
790 }
791
792 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
793 const struct bkey *k,
794 unsigned ptr)
795 {
796 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
797 }
798
799 /* Btree key macros */
800
801 static inline void bkey_init(struct bkey *k)
802 {
803 *k = ZERO_KEY;
804 }
805
806 /*
807 * This is used for various on disk data structures - cache_sb, prio_set, bset,
808 * jset: The checksum is _always_ the first 8 bytes of these structs
809 */
810 #define csum_set(i) \
811 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
812 ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
813
814 /* Error handling macros */
815
816 #define btree_bug(b, ...) \
817 do { \
818 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
819 dump_stack(); \
820 } while (0)
821
822 #define cache_bug(c, ...) \
823 do { \
824 if (bch_cache_set_error(c, __VA_ARGS__)) \
825 dump_stack(); \
826 } while (0)
827
828 #define btree_bug_on(cond, b, ...) \
829 do { \
830 if (cond) \
831 btree_bug(b, __VA_ARGS__); \
832 } while (0)
833
834 #define cache_bug_on(cond, c, ...) \
835 do { \
836 if (cond) \
837 cache_bug(c, __VA_ARGS__); \
838 } while (0)
839
840 #define cache_set_err_on(cond, c, ...) \
841 do { \
842 if (cond) \
843 bch_cache_set_error(c, __VA_ARGS__); \
844 } while (0)
845
846 /* Looping macros */
847
848 #define for_each_cache(ca, cs, iter) \
849 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
850
851 #define for_each_bucket(b, ca) \
852 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
853 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
854
855 static inline void cached_dev_put(struct cached_dev *dc)
856 {
857 if (atomic_dec_and_test(&dc->count))
858 schedule_work(&dc->detach);
859 }
860
861 static inline bool cached_dev_get(struct cached_dev *dc)
862 {
863 if (!atomic_inc_not_zero(&dc->count))
864 return false;
865
866 /* Paired with the mb in cached_dev_attach */
867 smp_mb__after_atomic_inc();
868 return true;
869 }
870
871 /*
872 * bucket_gc_gen() returns the difference between the bucket's current gen and
873 * the oldest gen of any pointer into that bucket in the btree (last_gc).
874 *
875 * bucket_disk_gen() returns the difference between the current gen and the gen
876 * on disk; they're both used to make sure gens don't wrap around.
877 */
878
879 static inline uint8_t bucket_gc_gen(struct bucket *b)
880 {
881 return b->gen - b->last_gc;
882 }
883
884 static inline uint8_t bucket_disk_gen(struct bucket *b)
885 {
886 return b->gen - b->disk_gen;
887 }
888
889 #define BUCKET_GC_GEN_MAX 96U
890 #define BUCKET_DISK_GEN_MAX 64U
891
892 #define kobj_attribute_write(n, fn) \
893 static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
894
895 #define kobj_attribute_rw(n, show, store) \
896 static struct kobj_attribute ksysfs_##n = \
897 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
898
899 static inline void wake_up_allocators(struct cache_set *c)
900 {
901 struct cache *ca;
902 unsigned i;
903
904 for_each_cache(ca, c, i)
905 wake_up_process(ca->alloc_thread);
906 }
907
908 /* Forward declarations */
909
910 void bch_count_io_errors(struct cache *, int, const char *);
911 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
912 int, const char *);
913 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
914 void bch_bbio_free(struct bio *, struct cache_set *);
915 struct bio *bch_bbio_alloc(struct cache_set *);
916
917 struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
918 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
919 void __bch_submit_bbio(struct bio *, struct cache_set *);
920 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
921
922 uint8_t bch_inc_gen(struct cache *, struct bucket *);
923 void bch_rescale_priorities(struct cache_set *, int);
924 bool bch_bucket_add_unused(struct cache *, struct bucket *);
925
926 long bch_bucket_alloc(struct cache *, unsigned, bool);
927 void bch_bucket_free(struct cache_set *, struct bkey *);
928
929 int __bch_bucket_alloc_set(struct cache_set *, unsigned,
930 struct bkey *, int, bool);
931 int bch_bucket_alloc_set(struct cache_set *, unsigned,
932 struct bkey *, int, bool);
933 bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
934 unsigned, unsigned, bool);
935
936 __printf(2, 3)
937 bool bch_cache_set_error(struct cache_set *, const char *, ...);
938
939 void bch_prio_write(struct cache *);
940 void bch_write_bdev_super(struct cached_dev *, struct closure *);
941
942 extern struct workqueue_struct *bcache_wq;
943 extern const char * const bch_cache_modes[];
944 extern struct mutex bch_register_lock;
945 extern struct list_head bch_cache_sets;
946
947 extern struct kobj_type bch_cached_dev_ktype;
948 extern struct kobj_type bch_flash_dev_ktype;
949 extern struct kobj_type bch_cache_set_ktype;
950 extern struct kobj_type bch_cache_set_internal_ktype;
951 extern struct kobj_type bch_cache_ktype;
952
953 void bch_cached_dev_release(struct kobject *);
954 void bch_flash_dev_release(struct kobject *);
955 void bch_cache_set_release(struct kobject *);
956 void bch_cache_release(struct kobject *);
957
958 int bch_uuid_write(struct cache_set *);
959 void bcache_write_super(struct cache_set *);
960
961 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
962
963 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
964 void bch_cached_dev_detach(struct cached_dev *);
965 void bch_cached_dev_run(struct cached_dev *);
966 void bcache_device_stop(struct bcache_device *);
967
968 void bch_cache_set_unregister(struct cache_set *);
969 void bch_cache_set_stop(struct cache_set *);
970
971 struct cache_set *bch_cache_set_alloc(struct cache_sb *);
972 void bch_btree_cache_free(struct cache_set *);
973 int bch_btree_cache_alloc(struct cache_set *);
974 void bch_moving_init_cache_set(struct cache_set *);
975 int bch_open_buckets_alloc(struct cache_set *);
976 void bch_open_buckets_free(struct cache_set *);
977
978 int bch_cache_allocator_start(struct cache *ca);
979 int bch_cache_allocator_init(struct cache *ca);
980
981 void bch_debug_exit(void);
982 int bch_debug_init(struct kobject *);
983 void bch_request_exit(void);
984 int bch_request_init(void);
985 void bch_btree_exit(void);
986 int bch_btree_init(void);
987
988 #endif /* _BCACHE_H */
This page took 0.060819 seconds and 5 git commands to generate.