Commit | Line | Data |
---|---|---|
f2836352 JT |
1 | /* |
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-cache-policy.h" | |
8 | #include "dm.h" | |
9 | ||
10 | #include <linux/hash.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
15 | ||
16 | #define DM_MSG_PREFIX "cache-policy-mq" | |
f2836352 JT |
17 | |
18 | static struct kmem_cache *mq_entry_cache; | |
19 | ||
20 | /*----------------------------------------------------------------*/ | |
21 | ||
22 | static unsigned next_power(unsigned n, unsigned min) | |
23 | { | |
24 | return roundup_pow_of_two(max(n, min)); | |
25 | } | |
26 | ||
27 | /*----------------------------------------------------------------*/ | |
28 | ||
29 | static unsigned long *alloc_bitset(unsigned nr_entries) | |
30 | { | |
31 | size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); | |
32 | return vzalloc(s); | |
33 | } | |
34 | ||
35 | static void free_bitset(unsigned long *bits) | |
36 | { | |
37 | vfree(bits); | |
38 | } | |
39 | ||
40 | /*----------------------------------------------------------------*/ | |
41 | ||
42 | /* | |
43 | * Large, sequential ios are probably better left on the origin device since | |
44 | * spindles tend to have good bandwidth. | |
45 | * | |
46 | * The io_tracker tries to spot when the io is in one of these sequential | |
47 | * modes. | |
48 | * | |
49 | * Two thresholds to switch between random and sequential io mode are defaulting | |
50 | * as follows and can be adjusted via the constructor and message interfaces. | |
51 | */ | |
52 | #define RANDOM_THRESHOLD_DEFAULT 4 | |
53 | #define SEQUENTIAL_THRESHOLD_DEFAULT 512 | |
54 | ||
55 | enum io_pattern { | |
56 | PATTERN_SEQUENTIAL, | |
57 | PATTERN_RANDOM | |
58 | }; | |
59 | ||
60 | struct io_tracker { | |
61 | enum io_pattern pattern; | |
62 | ||
63 | unsigned nr_seq_samples; | |
64 | unsigned nr_rand_samples; | |
65 | unsigned thresholds[2]; | |
66 | ||
67 | dm_oblock_t last_end_oblock; | |
68 | }; | |
69 | ||
70 | static void iot_init(struct io_tracker *t, | |
71 | int sequential_threshold, int random_threshold) | |
72 | { | |
73 | t->pattern = PATTERN_RANDOM; | |
74 | t->nr_seq_samples = 0; | |
75 | t->nr_rand_samples = 0; | |
76 | t->last_end_oblock = 0; | |
77 | t->thresholds[PATTERN_RANDOM] = random_threshold; | |
78 | t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold; | |
79 | } | |
80 | ||
81 | static enum io_pattern iot_pattern(struct io_tracker *t) | |
82 | { | |
83 | return t->pattern; | |
84 | } | |
85 | ||
86 | static void iot_update_stats(struct io_tracker *t, struct bio *bio) | |
87 | { | |
88 | if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) | |
89 | t->nr_seq_samples++; | |
90 | else { | |
91 | /* | |
92 | * Just one non-sequential IO is enough to reset the | |
93 | * counters. | |
94 | */ | |
95 | if (t->nr_seq_samples) { | |
96 | t->nr_seq_samples = 0; | |
97 | t->nr_rand_samples = 0; | |
98 | } | |
99 | ||
100 | t->nr_rand_samples++; | |
101 | } | |
102 | ||
103 | t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); | |
104 | } | |
105 | ||
106 | static void iot_check_for_pattern_switch(struct io_tracker *t) | |
107 | { | |
108 | switch (t->pattern) { | |
109 | case PATTERN_SEQUENTIAL: | |
110 | if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) { | |
111 | t->pattern = PATTERN_RANDOM; | |
112 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
113 | } | |
114 | break; | |
115 | ||
116 | case PATTERN_RANDOM: | |
117 | if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) { | |
118 | t->pattern = PATTERN_SEQUENTIAL; | |
119 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
120 | } | |
121 | break; | |
122 | } | |
123 | } | |
124 | ||
125 | static void iot_examine_bio(struct io_tracker *t, struct bio *bio) | |
126 | { | |
127 | iot_update_stats(t, bio); | |
128 | iot_check_for_pattern_switch(t); | |
129 | } | |
130 | ||
131 | /*----------------------------------------------------------------*/ | |
132 | ||
133 | ||
134 | /* | |
135 | * This queue is divided up into different levels. Allowing us to push | |
136 | * entries to the back of any of the levels. Think of it as a partially | |
137 | * sorted queue. | |
138 | */ | |
139 | #define NR_QUEUE_LEVELS 16u | |
140 | ||
141 | struct queue { | |
142 | struct list_head qs[NR_QUEUE_LEVELS]; | |
143 | }; | |
144 | ||
145 | static void queue_init(struct queue *q) | |
146 | { | |
147 | unsigned i; | |
148 | ||
149 | for (i = 0; i < NR_QUEUE_LEVELS; i++) | |
150 | INIT_LIST_HEAD(q->qs + i); | |
151 | } | |
152 | ||
153 | /* | |
154 | * Insert an entry to the back of the given level. | |
155 | */ | |
156 | static void queue_push(struct queue *q, unsigned level, struct list_head *elt) | |
157 | { | |
158 | list_add_tail(elt, q->qs + level); | |
159 | } | |
160 | ||
161 | static void queue_remove(struct list_head *elt) | |
162 | { | |
163 | list_del(elt); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Shifts all regions down one level. This has no effect on the order of | |
168 | * the queue. | |
169 | */ | |
170 | static void queue_shift_down(struct queue *q) | |
171 | { | |
172 | unsigned level; | |
173 | ||
174 | for (level = 1; level < NR_QUEUE_LEVELS; level++) | |
175 | list_splice_init(q->qs + level, q->qs + level - 1); | |
176 | } | |
177 | ||
178 | /* | |
179 | * Gives us the oldest entry of the lowest popoulated level. If the first | |
180 | * level is emptied then we shift down one level. | |
181 | */ | |
182 | static struct list_head *queue_pop(struct queue *q) | |
183 | { | |
184 | unsigned level; | |
185 | struct list_head *r; | |
186 | ||
187 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
188 | if (!list_empty(q->qs + level)) { | |
189 | r = q->qs[level].next; | |
190 | list_del(r); | |
191 | ||
192 | /* have we just emptied the bottom level? */ | |
193 | if (level == 0 && list_empty(q->qs)) | |
194 | queue_shift_down(q); | |
195 | ||
196 | return r; | |
197 | } | |
198 | ||
199 | return NULL; | |
200 | } | |
201 | ||
202 | static struct list_head *list_pop(struct list_head *lh) | |
203 | { | |
204 | struct list_head *r = lh->next; | |
205 | ||
206 | BUG_ON(!r); | |
207 | list_del_init(r); | |
208 | ||
209 | return r; | |
210 | } | |
211 | ||
212 | /*----------------------------------------------------------------*/ | |
213 | ||
214 | /* | |
215 | * Describes a cache entry. Used in both the cache and the pre_cache. | |
216 | */ | |
217 | struct entry { | |
218 | struct hlist_node hlist; | |
219 | struct list_head list; | |
220 | dm_oblock_t oblock; | |
221 | dm_cblock_t cblock; /* valid iff in_cache */ | |
222 | ||
223 | /* | |
224 | * FIXME: pack these better | |
225 | */ | |
226 | bool in_cache:1; | |
01911c19 | 227 | bool dirty:1; |
f2836352 JT |
228 | unsigned hit_count; |
229 | unsigned generation; | |
230 | unsigned tick; | |
231 | }; | |
232 | ||
233 | struct mq_policy { | |
234 | struct dm_cache_policy policy; | |
235 | ||
236 | /* protects everything */ | |
237 | struct mutex lock; | |
238 | dm_cblock_t cache_size; | |
239 | struct io_tracker tracker; | |
240 | ||
241 | /* | |
01911c19 JT |
242 | * We maintain three queues of entries. The cache proper, |
243 | * consisting of a clean and dirty queue, contains the currently | |
244 | * active mappings. Whereas the pre_cache tracks blocks that | |
245 | * are being hit frequently and potential candidates for promotion | |
246 | * to the cache. | |
f2836352 JT |
247 | */ |
248 | struct queue pre_cache; | |
01911c19 JT |
249 | struct queue cache_clean; |
250 | struct queue cache_dirty; | |
f2836352 JT |
251 | |
252 | /* | |
253 | * Keeps track of time, incremented by the core. We use this to | |
254 | * avoid attributing multiple hits within the same tick. | |
255 | * | |
256 | * Access to tick_protected should be done with the spin lock held. | |
257 | * It's copied to tick at the start of the map function (within the | |
258 | * mutex). | |
259 | */ | |
260 | spinlock_t tick_lock; | |
261 | unsigned tick_protected; | |
262 | unsigned tick; | |
263 | ||
264 | /* | |
265 | * A count of the number of times the map function has been called | |
266 | * and found an entry in the pre_cache or cache. Currently used to | |
267 | * calculate the generation. | |
268 | */ | |
269 | unsigned hit_count; | |
270 | ||
271 | /* | |
272 | * A generation is a longish period that is used to trigger some | |
273 | * book keeping effects. eg, decrementing hit counts on entries. | |
274 | * This is needed to allow the cache to evolve as io patterns | |
275 | * change. | |
276 | */ | |
277 | unsigned generation; | |
278 | unsigned generation_period; /* in lookups (will probably change) */ | |
279 | ||
280 | /* | |
281 | * Entries in the pre_cache whose hit count passes the promotion | |
282 | * threshold move to the cache proper. Working out the correct | |
283 | * value for the promotion_threshold is crucial to this policy. | |
284 | */ | |
285 | unsigned promote_threshold; | |
286 | ||
287 | /* | |
288 | * We need cache_size entries for the cache, and choose to have | |
289 | * cache_size entries for the pre_cache too. One motivation for | |
290 | * using the same size is to make the hit counts directly | |
291 | * comparable between pre_cache and cache. | |
292 | */ | |
293 | unsigned nr_entries; | |
294 | unsigned nr_entries_allocated; | |
295 | struct list_head free; | |
296 | ||
297 | /* | |
298 | * Cache blocks may be unallocated. We store this info in a | |
299 | * bitset. | |
300 | */ | |
301 | unsigned long *allocation_bitset; | |
302 | unsigned nr_cblocks_allocated; | |
303 | unsigned find_free_nr_words; | |
304 | unsigned find_free_last_word; | |
305 | ||
306 | /* | |
307 | * The hash table allows us to quickly find an entry by origin | |
308 | * block. Both pre_cache and cache entries are in here. | |
309 | */ | |
310 | unsigned nr_buckets; | |
311 | dm_block_t hash_bits; | |
312 | struct hlist_head *table; | |
313 | }; | |
314 | ||
315 | /*----------------------------------------------------------------*/ | |
316 | /* Free/alloc mq cache entry structures. */ | |
0184b44e | 317 | static void concat_queue(struct list_head *lh, struct queue *q) |
f2836352 JT |
318 | { |
319 | unsigned level; | |
320 | ||
321 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
322 | list_splice(q->qs + level, lh); | |
323 | } | |
324 | ||
325 | static void free_entries(struct mq_policy *mq) | |
326 | { | |
327 | struct entry *e, *tmp; | |
328 | ||
0184b44e | 329 | concat_queue(&mq->free, &mq->pre_cache); |
01911c19 JT |
330 | concat_queue(&mq->free, &mq->cache_clean); |
331 | concat_queue(&mq->free, &mq->cache_dirty); | |
f2836352 JT |
332 | |
333 | list_for_each_entry_safe(e, tmp, &mq->free, list) | |
334 | kmem_cache_free(mq_entry_cache, e); | |
335 | } | |
336 | ||
337 | static int alloc_entries(struct mq_policy *mq, unsigned elts) | |
338 | { | |
339 | unsigned u = mq->nr_entries; | |
340 | ||
341 | INIT_LIST_HEAD(&mq->free); | |
342 | mq->nr_entries_allocated = 0; | |
343 | ||
344 | while (u--) { | |
345 | struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL); | |
346 | ||
347 | if (!e) { | |
348 | free_entries(mq); | |
349 | return -ENOMEM; | |
350 | } | |
351 | ||
352 | ||
353 | list_add(&e->list, &mq->free); | |
354 | } | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | /*----------------------------------------------------------------*/ | |
360 | ||
361 | /* | |
362 | * Simple hash table implementation. Should replace with the standard hash | |
363 | * table that's making its way upstream. | |
364 | */ | |
365 | static void hash_insert(struct mq_policy *mq, struct entry *e) | |
366 | { | |
367 | unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); | |
368 | ||
369 | hlist_add_head(&e->hlist, mq->table + h); | |
370 | } | |
371 | ||
372 | static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) | |
373 | { | |
374 | unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); | |
375 | struct hlist_head *bucket = mq->table + h; | |
376 | struct entry *e; | |
377 | ||
378 | hlist_for_each_entry(e, bucket, hlist) | |
379 | if (e->oblock == oblock) { | |
380 | hlist_del(&e->hlist); | |
381 | hlist_add_head(&e->hlist, bucket); | |
382 | return e; | |
383 | } | |
384 | ||
385 | return NULL; | |
386 | } | |
387 | ||
388 | static void hash_remove(struct entry *e) | |
389 | { | |
390 | hlist_del(&e->hlist); | |
391 | } | |
392 | ||
393 | /*----------------------------------------------------------------*/ | |
394 | ||
395 | /* | |
396 | * Allocates a new entry structure. The memory is allocated in one lump, | |
397 | * so we just handing it out here. Returns NULL if all entries have | |
398 | * already been allocated. Cannot fail otherwise. | |
399 | */ | |
400 | static struct entry *alloc_entry(struct mq_policy *mq) | |
401 | { | |
402 | struct entry *e; | |
403 | ||
404 | if (mq->nr_entries_allocated >= mq->nr_entries) { | |
405 | BUG_ON(!list_empty(&mq->free)); | |
406 | return NULL; | |
407 | } | |
408 | ||
409 | e = list_entry(list_pop(&mq->free), struct entry, list); | |
410 | INIT_LIST_HEAD(&e->list); | |
411 | INIT_HLIST_NODE(&e->hlist); | |
412 | ||
413 | mq->nr_entries_allocated++; | |
414 | return e; | |
415 | } | |
416 | ||
417 | /*----------------------------------------------------------------*/ | |
418 | ||
419 | /* | |
420 | * Mark cache blocks allocated or not in the bitset. | |
421 | */ | |
422 | static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock) | |
423 | { | |
424 | BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); | |
425 | BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset)); | |
426 | ||
427 | set_bit(from_cblock(cblock), mq->allocation_bitset); | |
428 | mq->nr_cblocks_allocated++; | |
429 | } | |
430 | ||
431 | static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock) | |
432 | { | |
433 | BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); | |
434 | BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset)); | |
435 | ||
436 | clear_bit(from_cblock(cblock), mq->allocation_bitset); | |
437 | mq->nr_cblocks_allocated--; | |
438 | } | |
439 | ||
440 | static bool any_free_cblocks(struct mq_policy *mq) | |
441 | { | |
442 | return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); | |
443 | } | |
444 | ||
445 | /* | |
446 | * Fills result out with a cache block that isn't in use, or return | |
447 | * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is | |
448 | * reponsible for that. | |
449 | */ | |
450 | static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end, | |
451 | dm_cblock_t *result, unsigned *last_word) | |
452 | { | |
453 | int r = -ENOSPC; | |
454 | unsigned w; | |
455 | ||
456 | for (w = begin; w < end; w++) { | |
457 | /* | |
458 | * ffz is undefined if no zero exists | |
459 | */ | |
460 | if (mq->allocation_bitset[w] != ~0UL) { | |
461 | *last_word = w; | |
462 | *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w])); | |
463 | if (from_cblock(*result) < from_cblock(mq->cache_size)) | |
464 | r = 0; | |
465 | ||
466 | break; | |
467 | } | |
468 | } | |
469 | ||
470 | return r; | |
471 | } | |
472 | ||
473 | static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result) | |
474 | { | |
475 | int r; | |
476 | ||
477 | if (!any_free_cblocks(mq)) | |
478 | return -ENOSPC; | |
479 | ||
480 | r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word); | |
481 | if (r == -ENOSPC && mq->find_free_last_word) | |
482 | r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word); | |
483 | ||
484 | return r; | |
485 | } | |
486 | ||
487 | /*----------------------------------------------------------------*/ | |
488 | ||
489 | /* | |
490 | * Now we get to the meat of the policy. This section deals with deciding | |
491 | * when to to add entries to the pre_cache and cache, and move between | |
492 | * them. | |
493 | */ | |
494 | ||
495 | /* | |
496 | * The queue level is based on the log2 of the hit count. | |
497 | */ | |
498 | static unsigned queue_level(struct entry *e) | |
499 | { | |
500 | return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u); | |
501 | } | |
502 | ||
503 | /* | |
504 | * Inserts the entry into the pre_cache or the cache. Ensures the cache | |
505 | * block is marked as allocated if necc. Inserts into the hash table. Sets the | |
506 | * tick which records when the entry was last moved about. | |
507 | */ | |
508 | static void push(struct mq_policy *mq, struct entry *e) | |
509 | { | |
510 | e->tick = mq->tick; | |
511 | hash_insert(mq, e); | |
512 | ||
513 | if (e->in_cache) { | |
514 | alloc_cblock(mq, e->cblock); | |
01911c19 JT |
515 | queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean, |
516 | queue_level(e), &e->list); | |
f2836352 JT |
517 | } else |
518 | queue_push(&mq->pre_cache, queue_level(e), &e->list); | |
519 | } | |
520 | ||
521 | /* | |
522 | * Removes an entry from pre_cache or cache. Removes from the hash table. | |
523 | * Frees off the cache block if necc. | |
524 | */ | |
525 | static void del(struct mq_policy *mq, struct entry *e) | |
526 | { | |
527 | queue_remove(&e->list); | |
528 | hash_remove(e); | |
529 | if (e->in_cache) | |
530 | free_cblock(mq, e->cblock); | |
531 | } | |
532 | ||
533 | /* | |
534 | * Like del, except it removes the first entry in the queue (ie. the least | |
535 | * recently used). | |
536 | */ | |
537 | static struct entry *pop(struct mq_policy *mq, struct queue *q) | |
538 | { | |
0184b44e JT |
539 | struct entry *e; |
540 | struct list_head *h = queue_pop(q); | |
f2836352 | 541 | |
0184b44e JT |
542 | if (!h) |
543 | return NULL; | |
f2836352 | 544 | |
0184b44e JT |
545 | e = container_of(h, struct entry, list); |
546 | hash_remove(e); | |
547 | if (e->in_cache) | |
548 | free_cblock(mq, e->cblock); | |
f2836352 JT |
549 | |
550 | return e; | |
551 | } | |
552 | ||
553 | /* | |
554 | * Has this entry already been updated? | |
555 | */ | |
556 | static bool updated_this_tick(struct mq_policy *mq, struct entry *e) | |
557 | { | |
558 | return mq->tick == e->tick; | |
559 | } | |
560 | ||
561 | /* | |
562 | * The promotion threshold is adjusted every generation. As are the counts | |
563 | * of the entries. | |
564 | * | |
565 | * At the moment the threshold is taken by averaging the hit counts of some | |
01911c19 JT |
566 | * of the entries in the cache (the first 20 entries across all levels in |
567 | * ascending order, giving preference to the clean entries at each level). | |
f2836352 JT |
568 | * |
569 | * We can be much cleverer than this though. For example, each promotion | |
570 | * could bump up the threshold helping to prevent churn. Much more to do | |
571 | * here. | |
572 | */ | |
573 | ||
574 | #define MAX_TO_AVERAGE 20 | |
575 | ||
576 | static void check_generation(struct mq_policy *mq) | |
577 | { | |
578 | unsigned total = 0, nr = 0, count = 0, level; | |
579 | struct list_head *head; | |
580 | struct entry *e; | |
581 | ||
582 | if ((mq->hit_count >= mq->generation_period) && | |
583 | (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) { | |
584 | ||
585 | mq->hit_count = 0; | |
586 | mq->generation++; | |
587 | ||
588 | for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { | |
01911c19 JT |
589 | head = mq->cache_clean.qs + level; |
590 | list_for_each_entry(e, head, list) { | |
591 | nr++; | |
592 | total += e->hit_count; | |
593 | ||
594 | if (++count >= MAX_TO_AVERAGE) | |
595 | break; | |
596 | } | |
597 | ||
598 | head = mq->cache_dirty.qs + level; | |
f2836352 JT |
599 | list_for_each_entry(e, head, list) { |
600 | nr++; | |
601 | total += e->hit_count; | |
602 | ||
603 | if (++count >= MAX_TO_AVERAGE) | |
604 | break; | |
605 | } | |
606 | } | |
607 | ||
608 | mq->promote_threshold = nr ? total / nr : 1; | |
609 | if (mq->promote_threshold * nr < total) | |
610 | mq->promote_threshold++; | |
611 | } | |
612 | } | |
613 | ||
614 | /* | |
615 | * Whenever we use an entry we bump up it's hit counter, and push it to the | |
616 | * back to it's current level. | |
617 | */ | |
618 | static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e) | |
619 | { | |
620 | if (updated_this_tick(mq, e)) | |
621 | return; | |
622 | ||
623 | e->hit_count++; | |
624 | mq->hit_count++; | |
625 | check_generation(mq); | |
626 | ||
627 | /* generation adjustment, to stop the counts increasing forever. */ | |
628 | /* FIXME: divide? */ | |
629 | /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */ | |
630 | e->generation = mq->generation; | |
631 | ||
632 | del(mq, e); | |
633 | push(mq, e); | |
634 | } | |
635 | ||
636 | /* | |
637 | * Demote the least recently used entry from the cache to the pre_cache. | |
638 | * Returns the new cache entry to use, and the old origin block it was | |
639 | * mapped to. | |
640 | * | |
641 | * We drop the hit count on the demoted entry back to 1 to stop it bouncing | |
642 | * straight back into the cache if it's subsequently hit. There are | |
643 | * various options here, and more experimentation would be good: | |
644 | * | |
645 | * - just forget about the demoted entry completely (ie. don't insert it | |
646 | into the pre_cache). | |
647 | * - divide the hit count rather that setting to some hard coded value. | |
648 | * - set the hit count to a hard coded value other than 1, eg, is it better | |
649 | * if it goes in at level 2? | |
650 | */ | |
01911c19 | 651 | static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock) |
f2836352 | 652 | { |
01911c19 | 653 | struct entry *demoted = pop(mq, &mq->cache_clean); |
f2836352 | 654 | |
01911c19 JT |
655 | if (!demoted) |
656 | /* | |
657 | * We could get a block from mq->cache_dirty, but that | |
658 | * would add extra latency to the triggering bio as it | |
659 | * waits for the writeback. Better to not promote this | |
660 | * time and hope there's a clean block next time this block | |
661 | * is hit. | |
662 | */ | |
663 | return -ENOSPC; | |
664 | ||
665 | *cblock = demoted->cblock; | |
f2836352 JT |
666 | *oblock = demoted->oblock; |
667 | demoted->in_cache = false; | |
01911c19 | 668 | demoted->dirty = false; |
f2836352 JT |
669 | demoted->hit_count = 1; |
670 | push(mq, demoted); | |
671 | ||
01911c19 | 672 | return 0; |
f2836352 JT |
673 | } |
674 | ||
675 | /* | |
676 | * We modify the basic promotion_threshold depending on the specific io. | |
677 | * | |
678 | * If the origin block has been discarded then there's no cost to copy it | |
679 | * to the cache. | |
680 | * | |
681 | * We bias towards reads, since they can be demoted at no cost if they | |
682 | * haven't been dirtied. | |
683 | */ | |
684 | #define DISCARDED_PROMOTE_THRESHOLD 1 | |
685 | #define READ_PROMOTE_THRESHOLD 4 | |
686 | #define WRITE_PROMOTE_THRESHOLD 8 | |
687 | ||
688 | static unsigned adjusted_promote_threshold(struct mq_policy *mq, | |
689 | bool discarded_oblock, int data_dir) | |
690 | { | |
691 | if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE) | |
692 | /* | |
693 | * We don't need to do any copying at all, so give this a | |
694 | * very low threshold. In practice this only triggers | |
695 | * during initial population after a format. | |
696 | */ | |
697 | return DISCARDED_PROMOTE_THRESHOLD; | |
698 | ||
699 | return data_dir == READ ? | |
700 | (mq->promote_threshold + READ_PROMOTE_THRESHOLD) : | |
701 | (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD); | |
702 | } | |
703 | ||
704 | static bool should_promote(struct mq_policy *mq, struct entry *e, | |
705 | bool discarded_oblock, int data_dir) | |
706 | { | |
707 | return e->hit_count >= | |
708 | adjusted_promote_threshold(mq, discarded_oblock, data_dir); | |
709 | } | |
710 | ||
711 | static int cache_entry_found(struct mq_policy *mq, | |
712 | struct entry *e, | |
713 | struct policy_result *result) | |
714 | { | |
715 | requeue_and_update_tick(mq, e); | |
716 | ||
717 | if (e->in_cache) { | |
718 | result->op = POLICY_HIT; | |
719 | result->cblock = e->cblock; | |
720 | } | |
721 | ||
722 | return 0; | |
723 | } | |
724 | ||
725 | /* | |
0184b44e | 726 | * Moves an entry from the pre_cache to the cache. The main work is |
f2836352 JT |
727 | * finding which cache block to use. |
728 | */ | |
729 | static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, | |
730 | struct policy_result *result) | |
731 | { | |
01911c19 | 732 | int r; |
f2836352 JT |
733 | dm_cblock_t cblock; |
734 | ||
735 | if (find_free_cblock(mq, &cblock) == -ENOSPC) { | |
736 | result->op = POLICY_REPLACE; | |
01911c19 JT |
737 | r = demote_cblock(mq, &result->old_oblock, &cblock); |
738 | if (r) { | |
739 | result->op = POLICY_MISS; | |
740 | return 0; | |
741 | } | |
f2836352 JT |
742 | } else |
743 | result->op = POLICY_NEW; | |
744 | ||
745 | result->cblock = e->cblock = cblock; | |
746 | ||
747 | del(mq, e); | |
748 | e->in_cache = true; | |
01911c19 | 749 | e->dirty = false; |
f2836352 JT |
750 | push(mq, e); |
751 | ||
752 | return 0; | |
753 | } | |
754 | ||
755 | static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, | |
756 | bool can_migrate, bool discarded_oblock, | |
757 | int data_dir, struct policy_result *result) | |
758 | { | |
759 | int r = 0; | |
760 | bool updated = updated_this_tick(mq, e); | |
761 | ||
762 | requeue_and_update_tick(mq, e); | |
763 | ||
764 | if ((!discarded_oblock && updated) || | |
765 | !should_promote(mq, e, discarded_oblock, data_dir)) | |
766 | result->op = POLICY_MISS; | |
767 | else if (!can_migrate) | |
768 | r = -EWOULDBLOCK; | |
769 | else | |
770 | r = pre_cache_to_cache(mq, e, result); | |
771 | ||
772 | return r; | |
773 | } | |
774 | ||
775 | static void insert_in_pre_cache(struct mq_policy *mq, | |
776 | dm_oblock_t oblock) | |
777 | { | |
778 | struct entry *e = alloc_entry(mq); | |
779 | ||
780 | if (!e) | |
781 | /* | |
782 | * There's no spare entry structure, so we grab the least | |
783 | * used one from the pre_cache. | |
784 | */ | |
785 | e = pop(mq, &mq->pre_cache); | |
786 | ||
787 | if (unlikely(!e)) { | |
788 | DMWARN("couldn't pop from pre cache"); | |
789 | return; | |
790 | } | |
791 | ||
792 | e->in_cache = false; | |
01911c19 | 793 | e->dirty = false; |
f2836352 JT |
794 | e->oblock = oblock; |
795 | e->hit_count = 1; | |
796 | e->generation = mq->generation; | |
797 | push(mq, e); | |
798 | } | |
799 | ||
800 | static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, | |
801 | struct policy_result *result) | |
802 | { | |
803 | struct entry *e; | |
804 | dm_cblock_t cblock; | |
805 | ||
806 | if (find_free_cblock(mq, &cblock) == -ENOSPC) { | |
807 | result->op = POLICY_MISS; | |
808 | insert_in_pre_cache(mq, oblock); | |
809 | return; | |
810 | } | |
811 | ||
812 | e = alloc_entry(mq); | |
813 | if (unlikely(!e)) { | |
814 | result->op = POLICY_MISS; | |
815 | return; | |
816 | } | |
817 | ||
818 | e->oblock = oblock; | |
819 | e->cblock = cblock; | |
820 | e->in_cache = true; | |
01911c19 | 821 | e->dirty = false; |
f2836352 JT |
822 | e->hit_count = 1; |
823 | e->generation = mq->generation; | |
824 | push(mq, e); | |
825 | ||
826 | result->op = POLICY_NEW; | |
827 | result->cblock = e->cblock; | |
828 | } | |
829 | ||
830 | static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, | |
831 | bool can_migrate, bool discarded_oblock, | |
832 | int data_dir, struct policy_result *result) | |
833 | { | |
834 | if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) { | |
835 | if (can_migrate) | |
836 | insert_in_cache(mq, oblock, result); | |
837 | else | |
838 | return -EWOULDBLOCK; | |
839 | } else { | |
840 | insert_in_pre_cache(mq, oblock); | |
841 | result->op = POLICY_MISS; | |
842 | } | |
843 | ||
844 | return 0; | |
845 | } | |
846 | ||
847 | /* | |
848 | * Looks the oblock up in the hash table, then decides whether to put in | |
849 | * pre_cache, or cache etc. | |
850 | */ | |
851 | static int map(struct mq_policy *mq, dm_oblock_t oblock, | |
852 | bool can_migrate, bool discarded_oblock, | |
853 | int data_dir, struct policy_result *result) | |
854 | { | |
855 | int r = 0; | |
856 | struct entry *e = hash_lookup(mq, oblock); | |
857 | ||
858 | if (e && e->in_cache) | |
859 | r = cache_entry_found(mq, e, result); | |
860 | else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) | |
861 | result->op = POLICY_MISS; | |
862 | else if (e) | |
863 | r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock, | |
864 | data_dir, result); | |
865 | else | |
866 | r = no_entry_found(mq, oblock, can_migrate, discarded_oblock, | |
867 | data_dir, result); | |
868 | ||
869 | if (r == -EWOULDBLOCK) | |
870 | result->op = POLICY_MISS; | |
871 | ||
872 | return r; | |
873 | } | |
874 | ||
875 | /*----------------------------------------------------------------*/ | |
876 | ||
877 | /* | |
878 | * Public interface, via the policy struct. See dm-cache-policy.h for a | |
879 | * description of these. | |
880 | */ | |
881 | ||
882 | static struct mq_policy *to_mq_policy(struct dm_cache_policy *p) | |
883 | { | |
884 | return container_of(p, struct mq_policy, policy); | |
885 | } | |
886 | ||
887 | static void mq_destroy(struct dm_cache_policy *p) | |
888 | { | |
889 | struct mq_policy *mq = to_mq_policy(p); | |
890 | ||
891 | free_bitset(mq->allocation_bitset); | |
892 | kfree(mq->table); | |
893 | free_entries(mq); | |
894 | kfree(mq); | |
895 | } | |
896 | ||
897 | static void copy_tick(struct mq_policy *mq) | |
898 | { | |
899 | unsigned long flags; | |
900 | ||
901 | spin_lock_irqsave(&mq->tick_lock, flags); | |
902 | mq->tick = mq->tick_protected; | |
903 | spin_unlock_irqrestore(&mq->tick_lock, flags); | |
904 | } | |
905 | ||
906 | static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock, | |
907 | bool can_block, bool can_migrate, bool discarded_oblock, | |
908 | struct bio *bio, struct policy_result *result) | |
909 | { | |
910 | int r; | |
911 | struct mq_policy *mq = to_mq_policy(p); | |
912 | ||
913 | result->op = POLICY_MISS; | |
914 | ||
915 | if (can_block) | |
916 | mutex_lock(&mq->lock); | |
917 | else if (!mutex_trylock(&mq->lock)) | |
918 | return -EWOULDBLOCK; | |
919 | ||
920 | copy_tick(mq); | |
921 | ||
922 | iot_examine_bio(&mq->tracker, bio); | |
923 | r = map(mq, oblock, can_migrate, discarded_oblock, | |
924 | bio_data_dir(bio), result); | |
925 | ||
926 | mutex_unlock(&mq->lock); | |
927 | ||
928 | return r; | |
929 | } | |
930 | ||
931 | static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) | |
932 | { | |
933 | int r; | |
934 | struct mq_policy *mq = to_mq_policy(p); | |
935 | struct entry *e; | |
936 | ||
937 | if (!mutex_trylock(&mq->lock)) | |
938 | return -EWOULDBLOCK; | |
939 | ||
940 | e = hash_lookup(mq, oblock); | |
941 | if (e && e->in_cache) { | |
942 | *cblock = e->cblock; | |
943 | r = 0; | |
944 | } else | |
945 | r = -ENOENT; | |
946 | ||
947 | mutex_unlock(&mq->lock); | |
948 | ||
949 | return r; | |
950 | } | |
951 | ||
01911c19 JT |
952 | /* |
953 | * FIXME: __mq_set_clear_dirty can block due to mutex. | |
954 | * Ideally a policy should not block in functions called | |
955 | * from the map() function. Explore using RCU. | |
956 | */ | |
957 | static void __mq_set_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock, bool set) | |
958 | { | |
959 | struct mq_policy *mq = to_mq_policy(p); | |
960 | struct entry *e; | |
961 | ||
962 | mutex_lock(&mq->lock); | |
963 | e = hash_lookup(mq, oblock); | |
964 | if (!e) | |
965 | DMWARN("__mq_set_clear_dirty called for a block that isn't in the cache"); | |
966 | else { | |
967 | BUG_ON(!e->in_cache); | |
968 | ||
969 | del(mq, e); | |
970 | e->dirty = set; | |
971 | push(mq, e); | |
972 | } | |
973 | mutex_unlock(&mq->lock); | |
974 | } | |
975 | ||
976 | static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
977 | { | |
978 | __mq_set_clear_dirty(p, oblock, true); | |
979 | } | |
980 | ||
981 | static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
982 | { | |
983 | __mq_set_clear_dirty(p, oblock, false); | |
984 | } | |
985 | ||
f2836352 JT |
986 | static int mq_load_mapping(struct dm_cache_policy *p, |
987 | dm_oblock_t oblock, dm_cblock_t cblock, | |
988 | uint32_t hint, bool hint_valid) | |
989 | { | |
990 | struct mq_policy *mq = to_mq_policy(p); | |
991 | struct entry *e; | |
992 | ||
993 | e = alloc_entry(mq); | |
994 | if (!e) | |
995 | return -ENOMEM; | |
996 | ||
997 | e->cblock = cblock; | |
998 | e->oblock = oblock; | |
999 | e->in_cache = true; | |
01911c19 | 1000 | e->dirty = false; /* this gets corrected in a minute */ |
f2836352 JT |
1001 | e->hit_count = hint_valid ? hint : 1; |
1002 | e->generation = mq->generation; | |
1003 | push(mq, e); | |
1004 | ||
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, | |
1009 | void *context) | |
1010 | { | |
1011 | struct mq_policy *mq = to_mq_policy(p); | |
1012 | int r = 0; | |
1013 | struct entry *e; | |
1014 | unsigned level; | |
1015 | ||
1016 | mutex_lock(&mq->lock); | |
1017 | ||
1018 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
01911c19 JT |
1019 | list_for_each_entry(e, &mq->cache_clean.qs[level], list) { |
1020 | r = fn(context, e->cblock, e->oblock, e->hit_count); | |
1021 | if (r) | |
1022 | goto out; | |
1023 | } | |
1024 | ||
1025 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
1026 | list_for_each_entry(e, &mq->cache_dirty.qs[level], list) { | |
f2836352 JT |
1027 | r = fn(context, e->cblock, e->oblock, e->hit_count); |
1028 | if (r) | |
1029 | goto out; | |
1030 | } | |
1031 | ||
1032 | out: | |
1033 | mutex_unlock(&mq->lock); | |
1034 | ||
1035 | return r; | |
1036 | } | |
1037 | ||
b936bf8b | 1038 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) |
f2836352 | 1039 | { |
b936bf8b GU |
1040 | struct mq_policy *mq = to_mq_policy(p); |
1041 | struct entry *e; | |
1042 | ||
1043 | mutex_lock(&mq->lock); | |
1044 | ||
1045 | e = hash_lookup(mq, oblock); | |
f2836352 JT |
1046 | |
1047 | BUG_ON(!e || !e->in_cache); | |
1048 | ||
1049 | del(mq, e); | |
1050 | e->in_cache = false; | |
01911c19 | 1051 | e->dirty = false; |
f2836352 | 1052 | push(mq, e); |
f2836352 | 1053 | |
f2836352 JT |
1054 | mutex_unlock(&mq->lock); |
1055 | } | |
1056 | ||
01911c19 JT |
1057 | static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, |
1058 | dm_cblock_t *cblock) | |
1059 | { | |
1060 | struct entry *e = pop(mq, &mq->cache_dirty); | |
1061 | ||
1062 | if (!e) | |
1063 | return -ENODATA; | |
1064 | ||
1065 | *oblock = e->oblock; | |
1066 | *cblock = e->cblock; | |
1067 | e->dirty = false; | |
1068 | push(mq, e); | |
1069 | ||
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, | |
1074 | dm_cblock_t *cblock) | |
1075 | { | |
1076 | int r; | |
1077 | struct mq_policy *mq = to_mq_policy(p); | |
1078 | ||
1079 | mutex_lock(&mq->lock); | |
1080 | r = __mq_writeback_work(mq, oblock, cblock); | |
1081 | mutex_unlock(&mq->lock); | |
1082 | ||
1083 | return r; | |
1084 | } | |
1085 | ||
f2836352 JT |
1086 | static void force_mapping(struct mq_policy *mq, |
1087 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
1088 | { | |
1089 | struct entry *e = hash_lookup(mq, current_oblock); | |
1090 | ||
1091 | BUG_ON(!e || !e->in_cache); | |
1092 | ||
1093 | del(mq, e); | |
1094 | e->oblock = new_oblock; | |
01911c19 | 1095 | e->dirty = true; |
f2836352 JT |
1096 | push(mq, e); |
1097 | } | |
1098 | ||
1099 | static void mq_force_mapping(struct dm_cache_policy *p, | |
1100 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
1101 | { | |
1102 | struct mq_policy *mq = to_mq_policy(p); | |
1103 | ||
1104 | mutex_lock(&mq->lock); | |
1105 | force_mapping(mq, current_oblock, new_oblock); | |
1106 | mutex_unlock(&mq->lock); | |
1107 | } | |
1108 | ||
1109 | static dm_cblock_t mq_residency(struct dm_cache_policy *p) | |
1110 | { | |
99ba2ae4 | 1111 | dm_cblock_t r; |
f2836352 JT |
1112 | struct mq_policy *mq = to_mq_policy(p); |
1113 | ||
99ba2ae4 JT |
1114 | mutex_lock(&mq->lock); |
1115 | r = to_cblock(mq->nr_cblocks_allocated); | |
1116 | mutex_unlock(&mq->lock); | |
1117 | ||
1118 | return r; | |
f2836352 JT |
1119 | } |
1120 | ||
1121 | static void mq_tick(struct dm_cache_policy *p) | |
1122 | { | |
1123 | struct mq_policy *mq = to_mq_policy(p); | |
1124 | unsigned long flags; | |
1125 | ||
1126 | spin_lock_irqsave(&mq->tick_lock, flags); | |
1127 | mq->tick_protected++; | |
1128 | spin_unlock_irqrestore(&mq->tick_lock, flags); | |
1129 | } | |
1130 | ||
1131 | static int mq_set_config_value(struct dm_cache_policy *p, | |
1132 | const char *key, const char *value) | |
1133 | { | |
1134 | struct mq_policy *mq = to_mq_policy(p); | |
1135 | enum io_pattern pattern; | |
1136 | unsigned long tmp; | |
1137 | ||
1138 | if (!strcasecmp(key, "random_threshold")) | |
1139 | pattern = PATTERN_RANDOM; | |
1140 | else if (!strcasecmp(key, "sequential_threshold")) | |
1141 | pattern = PATTERN_SEQUENTIAL; | |
1142 | else | |
1143 | return -EINVAL; | |
1144 | ||
1145 | if (kstrtoul(value, 10, &tmp)) | |
1146 | return -EINVAL; | |
1147 | ||
1148 | mq->tracker.thresholds[pattern] = tmp; | |
1149 | ||
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen) | |
1154 | { | |
1155 | ssize_t sz = 0; | |
1156 | struct mq_policy *mq = to_mq_policy(p); | |
1157 | ||
1158 | DMEMIT("4 random_threshold %u sequential_threshold %u", | |
1159 | mq->tracker.thresholds[PATTERN_RANDOM], | |
1160 | mq->tracker.thresholds[PATTERN_SEQUENTIAL]); | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | /* Init the policy plugin interface function pointers. */ | |
1166 | static void init_policy_functions(struct mq_policy *mq) | |
1167 | { | |
1168 | mq->policy.destroy = mq_destroy; | |
1169 | mq->policy.map = mq_map; | |
1170 | mq->policy.lookup = mq_lookup; | |
01911c19 JT |
1171 | mq->policy.set_dirty = mq_set_dirty; |
1172 | mq->policy.clear_dirty = mq_clear_dirty; | |
f2836352 JT |
1173 | mq->policy.load_mapping = mq_load_mapping; |
1174 | mq->policy.walk_mappings = mq_walk_mappings; | |
1175 | mq->policy.remove_mapping = mq_remove_mapping; | |
01911c19 | 1176 | mq->policy.writeback_work = mq_writeback_work; |
f2836352 JT |
1177 | mq->policy.force_mapping = mq_force_mapping; |
1178 | mq->policy.residency = mq_residency; | |
1179 | mq->policy.tick = mq_tick; | |
1180 | mq->policy.emit_config_values = mq_emit_config_values; | |
1181 | mq->policy.set_config_value = mq_set_config_value; | |
1182 | } | |
1183 | ||
1184 | static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, | |
1185 | sector_t origin_size, | |
1186 | sector_t cache_block_size) | |
1187 | { | |
1188 | int r; | |
1189 | struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); | |
1190 | ||
1191 | if (!mq) | |
1192 | return NULL; | |
1193 | ||
1194 | init_policy_functions(mq); | |
1195 | iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); | |
1196 | ||
1197 | mq->cache_size = cache_size; | |
1198 | mq->tick_protected = 0; | |
1199 | mq->tick = 0; | |
1200 | mq->hit_count = 0; | |
1201 | mq->generation = 0; | |
1202 | mq->promote_threshold = 0; | |
1203 | mutex_init(&mq->lock); | |
1204 | spin_lock_init(&mq->tick_lock); | |
1205 | mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG); | |
1206 | mq->find_free_last_word = 0; | |
1207 | ||
1208 | queue_init(&mq->pre_cache); | |
01911c19 JT |
1209 | queue_init(&mq->cache_clean); |
1210 | queue_init(&mq->cache_dirty); | |
1211 | ||
f2836352 JT |
1212 | mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); |
1213 | ||
1214 | mq->nr_entries = 2 * from_cblock(cache_size); | |
1215 | r = alloc_entries(mq, mq->nr_entries); | |
1216 | if (r) | |
1217 | goto bad_cache_alloc; | |
1218 | ||
1219 | mq->nr_entries_allocated = 0; | |
1220 | mq->nr_cblocks_allocated = 0; | |
1221 | ||
1222 | mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); | |
1223 | mq->hash_bits = ffs(mq->nr_buckets) - 1; | |
1224 | mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); | |
1225 | if (!mq->table) | |
1226 | goto bad_alloc_table; | |
1227 | ||
1228 | mq->allocation_bitset = alloc_bitset(from_cblock(cache_size)); | |
1229 | if (!mq->allocation_bitset) | |
1230 | goto bad_alloc_bitset; | |
1231 | ||
1232 | return &mq->policy; | |
1233 | ||
1234 | bad_alloc_bitset: | |
1235 | kfree(mq->table); | |
1236 | bad_alloc_table: | |
1237 | free_entries(mq); | |
1238 | bad_cache_alloc: | |
1239 | kfree(mq); | |
1240 | ||
1241 | return NULL; | |
1242 | } | |
1243 | ||
1244 | /*----------------------------------------------------------------*/ | |
1245 | ||
1246 | static struct dm_cache_policy_type mq_policy_type = { | |
1247 | .name = "mq", | |
4e7f506f | 1248 | .version = {1, 0, 0}, |
f2836352 JT |
1249 | .hint_size = 4, |
1250 | .owner = THIS_MODULE, | |
1251 | .create = mq_create | |
1252 | }; | |
1253 | ||
1254 | static struct dm_cache_policy_type default_policy_type = { | |
1255 | .name = "default", | |
4e7f506f | 1256 | .version = {1, 0, 0}, |
f2836352 JT |
1257 | .hint_size = 4, |
1258 | .owner = THIS_MODULE, | |
1259 | .create = mq_create | |
1260 | }; | |
1261 | ||
1262 | static int __init mq_init(void) | |
1263 | { | |
1264 | int r; | |
1265 | ||
1266 | mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry", | |
1267 | sizeof(struct entry), | |
1268 | __alignof__(struct entry), | |
1269 | 0, NULL); | |
1270 | if (!mq_entry_cache) | |
1271 | goto bad; | |
1272 | ||
1273 | r = dm_cache_policy_register(&mq_policy_type); | |
1274 | if (r) { | |
1275 | DMERR("register failed %d", r); | |
1276 | goto bad_register_mq; | |
1277 | } | |
1278 | ||
1279 | r = dm_cache_policy_register(&default_policy_type); | |
1280 | if (!r) { | |
4e7f506f MS |
1281 | DMINFO("version %u.%u.%u loaded", |
1282 | mq_policy_type.version[0], | |
1283 | mq_policy_type.version[1], | |
1284 | mq_policy_type.version[2]); | |
f2836352 JT |
1285 | return 0; |
1286 | } | |
1287 | ||
1288 | DMERR("register failed (as default) %d", r); | |
1289 | ||
1290 | dm_cache_policy_unregister(&mq_policy_type); | |
1291 | bad_register_mq: | |
1292 | kmem_cache_destroy(mq_entry_cache); | |
1293 | bad: | |
1294 | return -ENOMEM; | |
1295 | } | |
1296 | ||
1297 | static void __exit mq_exit(void) | |
1298 | { | |
1299 | dm_cache_policy_unregister(&mq_policy_type); | |
1300 | dm_cache_policy_unregister(&default_policy_type); | |
1301 | ||
1302 | kmem_cache_destroy(mq_entry_cache); | |
1303 | } | |
1304 | ||
1305 | module_init(mq_init); | |
1306 | module_exit(mq_exit); | |
1307 | ||
1308 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | |
1309 | MODULE_LICENSE("GPL"); | |
1310 | MODULE_DESCRIPTION("mq cache policy"); | |
1311 | ||
1312 | MODULE_ALIAS("dm-cache-default"); |