Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | #ifndef _BCACHE_BSET_H |
2 | #define _BCACHE_BSET_H | |
3 | ||
c37511b8 KO |
4 | #include <linux/slab.h> |
5 | ||
cafe5635 KO |
6 | /* |
7 | * BKEYS: | |
8 | * | |
9 | * A bkey contains a key, a size field, a variable number of pointers, and some | |
10 | * ancillary flag bits. | |
11 | * | |
12 | * We use two different functions for validating bkeys, bch_ptr_invalid and | |
13 | * bch_ptr_bad(). | |
14 | * | |
15 | * bch_ptr_invalid() primarily filters out keys and pointers that would be | |
16 | * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and | |
17 | * pointer that occur in normal practice but don't point to real data. | |
18 | * | |
19 | * The one exception to the rule that ptr_invalid() filters out invalid keys is | |
20 | * that it also filters out keys of size 0 - these are keys that have been | |
21 | * completely overwritten. It'd be safe to delete these in memory while leaving | |
22 | * them on disk, just unnecessary work - so we filter them out when resorting | |
23 | * instead. | |
24 | * | |
25 | * We can't filter out stale keys when we're resorting, because garbage | |
26 | * collection needs to find them to ensure bucket gens don't wrap around - | |
27 | * unless we're rewriting the btree node those stale keys still exist on disk. | |
28 | * | |
29 | * We also implement functions here for removing some number of sectors from the | |
30 | * front or the back of a bkey - this is mainly used for fixing overlapping | |
31 | * extents, by removing the overlapping sectors from the older key. | |
32 | * | |
33 | * BSETS: | |
34 | * | |
35 | * A bset is an array of bkeys laid out contiguously in memory in sorted order, | |
36 | * along with a header. A btree node is made up of a number of these, written at | |
37 | * different times. | |
38 | * | |
39 | * There could be many of them on disk, but we never allow there to be more than | |
40 | * 4 in memory - we lazily resort as needed. | |
41 | * | |
42 | * We implement code here for creating and maintaining auxiliary search trees | |
43 | * (described below) for searching an individial bset, and on top of that we | |
44 | * implement a btree iterator. | |
45 | * | |
46 | * BTREE ITERATOR: | |
47 | * | |
48 | * Most of the code in bcache doesn't care about an individual bset - it needs | |
49 | * to search entire btree nodes and iterate over them in sorted order. | |
50 | * | |
51 | * The btree iterator code serves both functions; it iterates through the keys | |
52 | * in a btree node in sorted order, starting from either keys after a specific | |
53 | * point (if you pass it a search key) or the start of the btree node. | |
54 | * | |
55 | * AUXILIARY SEARCH TREES: | |
56 | * | |
57 | * Since keys are variable length, we can't use a binary search on a bset - we | |
58 | * wouldn't be able to find the start of the next key. But binary searches are | |
59 | * slow anyways, due to terrible cache behaviour; bcache originally used binary | |
60 | * searches and that code topped out at under 50k lookups/second. | |
61 | * | |
62 | * So we need to construct some sort of lookup table. Since we only insert keys | |
63 | * into the last (unwritten) set, most of the keys within a given btree node are | |
64 | * usually in sets that are mostly constant. We use two different types of | |
65 | * lookup tables to take advantage of this. | |
66 | * | |
67 | * Both lookup tables share in common that they don't index every key in the | |
68 | * set; they index one key every BSET_CACHELINE bytes, and then a linear search | |
69 | * is used for the rest. | |
70 | * | |
71 | * For sets that have been written to disk and are no longer being inserted | |
72 | * into, we construct a binary search tree in an array - traversing a binary | |
73 | * search tree in an array gives excellent locality of reference and is very | |
74 | * fast, since both children of any node are adjacent to each other in memory | |
75 | * (and their grandchildren, and great grandchildren...) - this means | |
76 | * prefetching can be used to great effect. | |
77 | * | |
78 | * It's quite useful performance wise to keep these nodes small - not just | |
79 | * because they're more likely to be in L2, but also because we can prefetch | |
80 | * more nodes on a single cacheline and thus prefetch more iterations in advance | |
81 | * when traversing this tree. | |
82 | * | |
83 | * Nodes in the auxiliary search tree must contain both a key to compare against | |
84 | * (we don't want to fetch the key from the set, that would defeat the purpose), | |
85 | * and a pointer to the key. We use a few tricks to compress both of these. | |
86 | * | |
87 | * To compress the pointer, we take advantage of the fact that one node in the | |
88 | * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have | |
89 | * a function (to_inorder()) that takes the index of a node in a binary tree and | |
90 | * returns what its index would be in an inorder traversal, so we only have to | |
91 | * store the low bits of the offset. | |
92 | * | |
93 | * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To | |
94 | * compress that, we take advantage of the fact that when we're traversing the | |
95 | * search tree at every iteration we know that both our search key and the key | |
96 | * we're looking for lie within some range - bounded by our previous | |
97 | * comparisons. (We special case the start of a search so that this is true even | |
98 | * at the root of the tree). | |
99 | * | |
100 | * So we know the key we're looking for is between a and b, and a and b don't | |
101 | * differ higher than bit 50, we don't need to check anything higher than bit | |
102 | * 50. | |
103 | * | |
104 | * We don't usually need the rest of the bits, either; we only need enough bits | |
105 | * to partition the key range we're currently checking. Consider key n - the | |
106 | * key our auxiliary search tree node corresponds to, and key p, the key | |
107 | * immediately preceding n. The lowest bit we need to store in the auxiliary | |
108 | * search tree is the highest bit that differs between n and p. | |
109 | * | |
110 | * Note that this could be bit 0 - we might sometimes need all 80 bits to do the | |
111 | * comparison. But we'd really like our nodes in the auxiliary search tree to be | |
112 | * of fixed size. | |
113 | * | |
114 | * The solution is to make them fixed size, and when we're constructing a node | |
115 | * check if p and n differed in the bits we needed them to. If they don't we | |
116 | * flag that node, and when doing lookups we fallback to comparing against the | |
117 | * real key. As long as this doesn't happen to often (and it seems to reliably | |
118 | * happen a bit less than 1% of the time), we win - even on failures, that key | |
119 | * is then more likely to be in cache than if we were doing binary searches all | |
120 | * the way, since we're touching so much less memory. | |
121 | * | |
122 | * The keys in the auxiliary search tree are stored in (software) floating | |
123 | * point, with an exponent and a mantissa. The exponent needs to be big enough | |
124 | * to address all the bits in the original key, but the number of bits in the | |
125 | * mantissa is somewhat arbitrary; more bits just gets us fewer failures. | |
126 | * | |
127 | * We need 7 bits for the exponent and 3 bits for the key's offset (since keys | |
128 | * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes. | |
129 | * We need one node per 128 bytes in the btree node, which means the auxiliary | |
130 | * search trees take up 3% as much memory as the btree itself. | |
131 | * | |
132 | * Constructing these auxiliary search trees is moderately expensive, and we | |
133 | * don't want to be constantly rebuilding the search tree for the last set | |
134 | * whenever we insert another key into it. For the unwritten set, we use a much | |
135 | * simpler lookup table - it's just a flat array, so index i in the lookup table | |
136 | * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing | |
137 | * within each byte range works the same as with the auxiliary search trees. | |
138 | * | |
139 | * These are much easier to keep up to date when we insert a key - we do it | |
140 | * somewhat lazily; when we shift a key up we usually just increment the pointer | |
141 | * to it, only when it would overflow do we go to the trouble of finding the | |
142 | * first key in that range of bytes again. | |
143 | */ | |
144 | ||
145 | /* Btree key comparison/iteration */ | |
146 | ||
c37511b8 KO |
147 | #define MAX_BSETS 4U |
148 | ||
cafe5635 KO |
149 | struct btree_iter { |
150 | size_t size, used; | |
151 | struct btree_iter_set { | |
152 | struct bkey *k, *end; | |
153 | } data[MAX_BSETS]; | |
154 | }; | |
155 | ||
156 | struct bset_tree { | |
157 | /* | |
158 | * We construct a binary tree in an array as if the array | |
159 | * started at 1, so that things line up on the same cachelines | |
160 | * better: see comments in bset.c at cacheline_to_bkey() for | |
161 | * details | |
162 | */ | |
163 | ||
164 | /* size of the binary tree and prev array */ | |
165 | unsigned size; | |
166 | ||
167 | /* function of size - precalculated for to_inorder() */ | |
168 | unsigned extra; | |
169 | ||
170 | /* copy of the last key in the set */ | |
171 | struct bkey end; | |
172 | struct bkey_float *tree; | |
173 | ||
174 | /* | |
175 | * The nodes in the bset tree point to specific keys - this | |
176 | * array holds the sizes of the previous key. | |
177 | * | |
178 | * Conceptually it's a member of struct bkey_float, but we want | |
179 | * to keep bkey_float to 4 bytes and prev isn't used in the fast | |
180 | * path. | |
181 | */ | |
182 | uint8_t *prev; | |
183 | ||
184 | /* The actual btree node, with pointers to each sorted set */ | |
185 | struct bset *data; | |
186 | }; | |
187 | ||
188 | static __always_inline int64_t bkey_cmp(const struct bkey *l, | |
189 | const struct bkey *r) | |
190 | { | |
191 | return unlikely(KEY_INODE(l) != KEY_INODE(r)) | |
192 | ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) | |
193 | : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); | |
194 | } | |
195 | ||
196 | static inline size_t bkey_u64s(const struct bkey *k) | |
197 | { | |
198 | BUG_ON(KEY_CSUM(k) > 1); | |
199 | return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0); | |
200 | } | |
201 | ||
202 | static inline size_t bkey_bytes(const struct bkey *k) | |
203 | { | |
204 | return bkey_u64s(k) * sizeof(uint64_t); | |
205 | } | |
206 | ||
207 | static inline void bkey_copy(struct bkey *dest, const struct bkey *src) | |
208 | { | |
209 | memcpy(dest, src, bkey_bytes(src)); | |
210 | } | |
211 | ||
212 | static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) | |
213 | { | |
214 | if (!src) | |
215 | src = &KEY(0, 0, 0); | |
216 | ||
217 | SET_KEY_INODE(dest, KEY_INODE(src)); | |
218 | SET_KEY_OFFSET(dest, KEY_OFFSET(src)); | |
219 | } | |
220 | ||
221 | static inline struct bkey *bkey_next(const struct bkey *k) | |
222 | { | |
223 | uint64_t *d = (void *) k; | |
224 | return (struct bkey *) (d + bkey_u64s(k)); | |
225 | } | |
226 | ||
227 | /* Keylists */ | |
228 | ||
229 | struct keylist { | |
230 | struct bkey *top; | |
231 | union { | |
232 | uint64_t *list; | |
233 | struct bkey *bottom; | |
234 | }; | |
235 | ||
236 | /* Enough room for btree_split's keys without realloc */ | |
237 | #define KEYLIST_INLINE 16 | |
238 | uint64_t d[KEYLIST_INLINE]; | |
239 | }; | |
240 | ||
241 | static inline void bch_keylist_init(struct keylist *l) | |
242 | { | |
243 | l->top = (void *) (l->list = l->d); | |
244 | } | |
245 | ||
246 | static inline void bch_keylist_push(struct keylist *l) | |
247 | { | |
248 | l->top = bkey_next(l->top); | |
249 | } | |
250 | ||
251 | static inline void bch_keylist_add(struct keylist *l, struct bkey *k) | |
252 | { | |
253 | bkey_copy(l->top, k); | |
254 | bch_keylist_push(l); | |
255 | } | |
256 | ||
257 | static inline bool bch_keylist_empty(struct keylist *l) | |
258 | { | |
259 | return l->top == (void *) l->list; | |
260 | } | |
261 | ||
262 | static inline void bch_keylist_free(struct keylist *l) | |
263 | { | |
264 | if (l->list != l->d) | |
265 | kfree(l->list); | |
266 | } | |
267 | ||
268 | void bch_keylist_copy(struct keylist *, struct keylist *); | |
269 | struct bkey *bch_keylist_pop(struct keylist *); | |
270 | int bch_keylist_realloc(struct keylist *, int, struct cache_set *); | |
271 | ||
272 | void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, | |
273 | unsigned); | |
274 | bool __bch_cut_front(const struct bkey *, struct bkey *); | |
275 | bool __bch_cut_back(const struct bkey *, struct bkey *); | |
276 | ||
277 | static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) | |
278 | { | |
279 | BUG_ON(bkey_cmp(where, k) > 0); | |
280 | return __bch_cut_front(where, k); | |
281 | } | |
282 | ||
283 | static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) | |
284 | { | |
285 | BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0); | |
286 | return __bch_cut_back(where, k); | |
287 | } | |
288 | ||
289 | const char *bch_ptr_status(struct cache_set *, const struct bkey *); | |
290 | bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *); | |
291 | bool bch_ptr_bad(struct btree *, const struct bkey *); | |
292 | ||
293 | static inline uint8_t gen_after(uint8_t a, uint8_t b) | |
294 | { | |
295 | uint8_t r = a - b; | |
296 | return r > 128U ? 0 : r; | |
297 | } | |
298 | ||
299 | static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, | |
300 | unsigned i) | |
301 | { | |
302 | return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); | |
303 | } | |
304 | ||
305 | static inline bool ptr_available(struct cache_set *c, const struct bkey *k, | |
306 | unsigned i) | |
307 | { | |
308 | return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); | |
309 | } | |
310 | ||
311 | ||
312 | typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); | |
313 | ||
314 | struct bkey *bch_next_recurse_key(struct btree *, struct bkey *); | |
315 | struct bkey *bch_btree_iter_next(struct btree_iter *); | |
316 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *, | |
317 | struct btree *, ptr_filter_fn); | |
318 | ||
319 | void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); | |
320 | struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *, | |
321 | struct bkey *, struct bset_tree *); | |
322 | ||
323 | /* 32 bits total: */ | |
324 | #define BKEY_MID_BITS 3 | |
325 | #define BKEY_EXPONENT_BITS 7 | |
326 | #define BKEY_MANTISSA_BITS 22 | |
327 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) | |
328 | ||
329 | struct bkey_float { | |
330 | unsigned exponent:BKEY_EXPONENT_BITS; | |
331 | unsigned m:BKEY_MID_BITS; | |
332 | unsigned mantissa:BKEY_MANTISSA_BITS; | |
333 | } __packed; | |
334 | ||
335 | /* | |
336 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - | |
337 | * it used to be 64, but I realized the lookup code would touch slightly less | |
338 | * memory if it was 128. | |
339 | * | |
340 | * It definites the number of bytes (in struct bset) per struct bkey_float in | |
341 | * the auxiliar search tree - when we're done searching the bset_float tree we | |
342 | * have this many bytes left that we do a linear search over. | |
343 | * | |
344 | * Since (after level 5) every level of the bset_tree is on a new cacheline, | |
345 | * we're touching one fewer cacheline in the bset tree in exchange for one more | |
346 | * cacheline in the linear search - but the linear search might stop before it | |
347 | * gets to the second cacheline. | |
348 | */ | |
349 | ||
350 | #define BSET_CACHELINE 128 | |
351 | #define bset_tree_space(b) (btree_data_space(b) / BSET_CACHELINE) | |
352 | ||
353 | #define bset_tree_bytes(b) (bset_tree_space(b) * sizeof(struct bkey_float)) | |
354 | #define bset_prev_bytes(b) (bset_tree_space(b) * sizeof(uint8_t)) | |
355 | ||
356 | void bch_bset_init_next(struct btree *); | |
357 | ||
358 | void bch_bset_fix_invalidated_key(struct btree *, struct bkey *); | |
359 | void bch_bset_fix_lookup_table(struct btree *, struct bkey *); | |
360 | ||
361 | struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, | |
362 | const struct bkey *); | |
363 | ||
364 | static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, | |
365 | const struct bkey *search) | |
366 | { | |
367 | return search ? __bch_bset_search(b, t, search) : t->data->start; | |
368 | } | |
369 | ||
370 | bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); | |
371 | void bch_btree_sort_lazy(struct btree *); | |
372 | void bch_btree_sort_into(struct btree *, struct btree *); | |
373 | void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *); | |
374 | void bch_btree_sort_partial(struct btree *, unsigned); | |
375 | ||
376 | static inline void bch_btree_sort(struct btree *b) | |
377 | { | |
378 | bch_btree_sort_partial(b, 0); | |
379 | } | |
380 | ||
381 | int bch_bset_print_stats(struct cache_set *, char *); | |
382 | ||
383 | #endif |