2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
33 * The io_tracker tries to spot when the io is in one of these sequential
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
48 enum io_pattern pattern
;
50 unsigned nr_seq_samples
;
51 unsigned nr_rand_samples
;
52 unsigned thresholds
[2];
54 dm_oblock_t last_end_oblock
;
57 static void iot_init(struct io_tracker
*t
,
58 int sequential_threshold
, int random_threshold
)
60 t
->pattern
= PATTERN_RANDOM
;
61 t
->nr_seq_samples
= 0;
62 t
->nr_rand_samples
= 0;
63 t
->last_end_oblock
= 0;
64 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
65 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
68 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
73 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
75 if (bio
->bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
79 * Just one non-sequential IO is enough to reset the
82 if (t
->nr_seq_samples
) {
83 t
->nr_seq_samples
= 0;
84 t
->nr_rand_samples
= 0;
90 t
->last_end_oblock
= to_oblock(bio
->bi_sector
+ bio_sectors(bio
) - 1);
93 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
96 case PATTERN_SEQUENTIAL
:
97 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
98 t
->pattern
= PATTERN_RANDOM
;
99 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
104 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
105 t
->pattern
= PATTERN_SEQUENTIAL
;
106 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
112 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
114 iot_update_stats(t
, bio
);
115 iot_check_for_pattern_switch(t
);
118 /*----------------------------------------------------------------*/
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
126 #define NR_QUEUE_LEVELS 16u
129 struct list_head qs
[NR_QUEUE_LEVELS
];
132 static void queue_init(struct queue
*q
)
136 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
137 INIT_LIST_HEAD(q
->qs
+ i
);
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
144 static bool queue_empty(struct queue
*q
)
148 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
149 if (!list_empty(q
->qs
+ i
))
156 * Insert an entry to the back of the given level.
158 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
160 list_add_tail(elt
, q
->qs
+ level
);
163 static void queue_remove(struct list_head
*elt
)
169 * Shifts all regions down one level. This has no effect on the order of
172 static void queue_shift_down(struct queue
*q
)
176 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
177 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
184 static struct list_head
*queue_pop(struct queue
*q
)
189 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
190 if (!list_empty(q
->qs
+ level
)) {
191 r
= q
->qs
[level
].next
;
194 /* have we just emptied the bottom level? */
195 if (level
== 0 && list_empty(q
->qs
))
204 static struct list_head
*list_pop(struct list_head
*lh
)
206 struct list_head
*r
= lh
->next
;
214 /*----------------------------------------------------------------*/
217 * Describes a cache entry. Used in both the cache and the pre_cache.
220 struct hlist_node hlist
;
221 struct list_head list
;
225 * FIXME: pack these better
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
237 * Free entries are linked together into a list.
240 struct entry
*entries
, *entries_end
;
241 struct list_head free
;
242 unsigned nr_allocated
;
245 static int epool_init(struct entry_pool
*ep
, unsigned nr_entries
)
249 ep
->entries
= vzalloc(sizeof(struct entry
) * nr_entries
);
253 ep
->entries_end
= ep
->entries
+ nr_entries
;
255 INIT_LIST_HEAD(&ep
->free
);
256 for (i
= 0; i
< nr_entries
; i
++)
257 list_add(&ep
->entries
[i
].list
, &ep
->free
);
259 ep
->nr_allocated
= 0;
264 static void epool_exit(struct entry_pool
*ep
)
269 static struct entry
*alloc_entry(struct entry_pool
*ep
)
273 if (list_empty(&ep
->free
))
276 e
= list_entry(list_pop(&ep
->free
), struct entry
, list
);
277 INIT_LIST_HEAD(&e
->list
);
278 INIT_HLIST_NODE(&e
->hlist
);
285 * This assumes the cblock hasn't already been allocated.
287 static struct entry
*alloc_particular_entry(struct entry_pool
*ep
, dm_cblock_t cblock
)
289 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
292 INIT_LIST_HEAD(&e
->list
);
293 INIT_HLIST_NODE(&e
->hlist
);
299 static void free_entry(struct entry_pool
*ep
, struct entry
*e
)
301 BUG_ON(!ep
->nr_allocated
);
303 INIT_HLIST_NODE(&e
->hlist
);
304 list_add(&e
->list
, &ep
->free
);
307 static bool epool_empty(struct entry_pool
*ep
)
309 return list_empty(&ep
->free
);
312 static bool in_pool(struct entry_pool
*ep
, struct entry
*e
)
314 return e
>= ep
->entries
&& e
< ep
->entries_end
;
317 static dm_cblock_t
infer_cblock(struct entry_pool
*ep
, struct entry
*e
)
319 return to_cblock(e
- ep
->entries
);
322 /*----------------------------------------------------------------*/
325 struct dm_cache_policy policy
;
327 /* protects everything */
329 dm_cblock_t cache_size
;
330 struct io_tracker tracker
;
333 * Entries come from two pools, one of pre-cache entries, and one
334 * for the cache proper.
336 struct entry_pool pre_cache_pool
;
337 struct entry_pool cache_pool
;
340 * We maintain three queues of entries. The cache proper,
341 * consisting of a clean and dirty queue, contains the currently
342 * active mappings. Whereas the pre_cache tracks blocks that
343 * are being hit frequently and potential candidates for promotion
346 struct queue pre_cache
;
347 struct queue cache_clean
;
348 struct queue cache_dirty
;
351 * Keeps track of time, incremented by the core. We use this to
352 * avoid attributing multiple hits within the same tick.
354 * Access to tick_protected should be done with the spin lock held.
355 * It's copied to tick at the start of the map function (within the
358 spinlock_t tick_lock
;
359 unsigned tick_protected
;
363 * A count of the number of times the map function has been called
364 * and found an entry in the pre_cache or cache. Currently used to
365 * calculate the generation.
370 * A generation is a longish period that is used to trigger some
371 * book keeping effects. eg, decrementing hit counts on entries.
372 * This is needed to allow the cache to evolve as io patterns
376 unsigned generation_period
; /* in lookups (will probably change) */
379 * Entries in the pre_cache whose hit count passes the promotion
380 * threshold move to the cache proper. Working out the correct
381 * value for the promotion_threshold is crucial to this policy.
383 unsigned promote_threshold
;
386 * The hash table allows us to quickly find an entry by origin
387 * block. Both pre_cache and cache entries are in here.
390 dm_block_t hash_bits
;
391 struct hlist_head
*table
;
394 /*----------------------------------------------------------------*/
397 * Simple hash table implementation. Should replace with the standard hash
398 * table that's making its way upstream.
400 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
402 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
404 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
407 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
409 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
410 struct hlist_head
*bucket
= mq
->table
+ h
;
413 hlist_for_each_entry(e
, bucket
, hlist
)
414 if (e
->oblock
== oblock
) {
415 hlist_del(&e
->hlist
);
416 hlist_add_head(&e
->hlist
, bucket
);
423 static void hash_remove(struct entry
*e
)
425 hlist_del(&e
->hlist
);
428 /*----------------------------------------------------------------*/
430 static bool any_free_cblocks(struct mq_policy
*mq
)
432 return !epool_empty(&mq
->cache_pool
);
435 static bool any_clean_cblocks(struct mq_policy
*mq
)
437 return !queue_empty(&mq
->cache_clean
);
440 /*----------------------------------------------------------------*/
443 * Now we get to the meat of the policy. This section deals with deciding
444 * when to to add entries to the pre_cache and cache, and move between
449 * The queue level is based on the log2 of the hit count.
451 static unsigned queue_level(struct entry
*e
)
453 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
456 static bool in_cache(struct mq_policy
*mq
, struct entry
*e
)
458 return in_pool(&mq
->cache_pool
, e
);
462 * Inserts the entry into the pre_cache or the cache. Ensures the cache
463 * block is marked as allocated if necc. Inserts into the hash table.
464 * Sets the tick which records when the entry was last moved about.
466 static void push(struct mq_policy
*mq
, struct entry
*e
)
472 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
473 queue_level(e
), &e
->list
);
475 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
479 * Removes an entry from pre_cache or cache. Removes from the hash table.
481 static void del(struct mq_policy
*mq
, struct entry
*e
)
483 queue_remove(&e
->list
);
488 * Like del, except it removes the first entry in the queue (ie. the least
491 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
494 struct list_head
*h
= queue_pop(q
);
499 e
= container_of(h
, struct entry
, list
);
506 * Has this entry already been updated?
508 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
510 return mq
->tick
== e
->tick
;
514 * The promotion threshold is adjusted every generation. As are the counts
517 * At the moment the threshold is taken by averaging the hit counts of some
518 * of the entries in the cache (the first 20 entries across all levels in
519 * ascending order, giving preference to the clean entries at each level).
521 * We can be much cleverer than this though. For example, each promotion
522 * could bump up the threshold helping to prevent churn. Much more to do
526 #define MAX_TO_AVERAGE 20
528 static void check_generation(struct mq_policy
*mq
)
530 unsigned total
= 0, nr
= 0, count
= 0, level
;
531 struct list_head
*head
;
534 if ((mq
->hit_count
>= mq
->generation_period
) && (epool_empty(&mq
->cache_pool
))) {
538 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
539 head
= mq
->cache_clean
.qs
+ level
;
540 list_for_each_entry(e
, head
, list
) {
542 total
+= e
->hit_count
;
544 if (++count
>= MAX_TO_AVERAGE
)
548 head
= mq
->cache_dirty
.qs
+ level
;
549 list_for_each_entry(e
, head
, list
) {
551 total
+= e
->hit_count
;
553 if (++count
>= MAX_TO_AVERAGE
)
558 mq
->promote_threshold
= nr
? total
/ nr
: 1;
559 if (mq
->promote_threshold
* nr
< total
)
560 mq
->promote_threshold
++;
565 * Whenever we use an entry we bump up it's hit counter, and push it to the
566 * back to it's current level.
568 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
570 if (updated_this_tick(mq
, e
))
575 check_generation(mq
);
577 /* generation adjustment, to stop the counts increasing forever. */
579 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
580 e
->generation
= mq
->generation
;
587 * Demote the least recently used entry from the cache to the pre_cache.
588 * Returns the new cache entry to use, and the old origin block it was
591 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
592 * straight back into the cache if it's subsequently hit. There are
593 * various options here, and more experimentation would be good:
595 * - just forget about the demoted entry completely (ie. don't insert it
597 * - divide the hit count rather that setting to some hard coded value.
598 * - set the hit count to a hard coded value other than 1, eg, is it better
599 * if it goes in at level 2?
601 static int demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
)
603 struct entry
*demoted
= pop(mq
, &mq
->cache_clean
);
607 * We could get a block from mq->cache_dirty, but that
608 * would add extra latency to the triggering bio as it
609 * waits for the writeback. Better to not promote this
610 * time and hope there's a clean block next time this block
615 *oblock
= demoted
->oblock
;
616 free_entry(&mq
->cache_pool
, demoted
);
619 * We used to put the demoted block into the pre-cache, but I think
620 * it's simpler to just let it work it's way up from zero again.
621 * Stops blocks flickering in and out of the cache.
628 * We modify the basic promotion_threshold depending on the specific io.
630 * If the origin block has been discarded then there's no cost to copy it
633 * We bias towards reads, since they can be demoted at no cost if they
634 * haven't been dirtied.
636 #define DISCARDED_PROMOTE_THRESHOLD 1
637 #define READ_PROMOTE_THRESHOLD 4
638 #define WRITE_PROMOTE_THRESHOLD 8
640 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
641 bool discarded_oblock
, int data_dir
)
643 if (data_dir
== READ
)
644 return mq
->promote_threshold
+ READ_PROMOTE_THRESHOLD
;
646 if (discarded_oblock
&& (any_free_cblocks(mq
) || any_clean_cblocks(mq
))) {
648 * We don't need to do any copying at all, so give this a
649 * very low threshold.
651 return DISCARDED_PROMOTE_THRESHOLD
;
654 return mq
->promote_threshold
+ WRITE_PROMOTE_THRESHOLD
;
657 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
658 bool discarded_oblock
, int data_dir
)
660 return e
->hit_count
>=
661 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
664 static int cache_entry_found(struct mq_policy
*mq
,
666 struct policy_result
*result
)
668 requeue_and_update_tick(mq
, e
);
670 if (in_cache(mq
, e
)) {
671 result
->op
= POLICY_HIT
;
672 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
679 * Moves an entry from the pre_cache to the cache. The main work is
680 * finding which cache block to use.
682 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
683 struct policy_result
*result
)
688 /* Ensure there's a free cblock in the cache */
689 if (epool_empty(&mq
->cache_pool
)) {
690 result
->op
= POLICY_REPLACE
;
691 r
= demote_cblock(mq
, &result
->old_oblock
);
693 result
->op
= POLICY_MISS
;
697 result
->op
= POLICY_NEW
;
699 new_e
= alloc_entry(&mq
->cache_pool
);
702 new_e
->oblock
= e
->oblock
;
703 new_e
->dirty
= false;
704 new_e
->hit_count
= e
->hit_count
;
705 new_e
->generation
= e
->generation
;
706 new_e
->tick
= e
->tick
;
709 free_entry(&mq
->pre_cache_pool
, e
);
712 result
->cblock
= infer_cblock(&mq
->cache_pool
, new_e
);
717 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
718 bool can_migrate
, bool discarded_oblock
,
719 int data_dir
, struct policy_result
*result
)
722 bool updated
= updated_this_tick(mq
, e
);
724 requeue_and_update_tick(mq
, e
);
726 if ((!discarded_oblock
&& updated
) ||
727 !should_promote(mq
, e
, discarded_oblock
, data_dir
))
728 result
->op
= POLICY_MISS
;
729 else if (!can_migrate
)
732 r
= pre_cache_to_cache(mq
, e
, result
);
737 static void insert_in_pre_cache(struct mq_policy
*mq
,
740 struct entry
*e
= alloc_entry(&mq
->pre_cache_pool
);
744 * There's no spare entry structure, so we grab the least
745 * used one from the pre_cache.
747 e
= pop(mq
, &mq
->pre_cache
);
750 DMWARN("couldn't pop from pre cache");
757 e
->generation
= mq
->generation
;
761 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
762 struct policy_result
*result
)
767 if (epool_empty(&mq
->cache_pool
)) {
768 result
->op
= POLICY_REPLACE
;
769 r
= demote_cblock(mq
, &result
->old_oblock
);
771 result
->op
= POLICY_MISS
;
772 insert_in_pre_cache(mq
, oblock
);
777 * This will always succeed, since we've just demoted.
779 e
= alloc_entry(&mq
->cache_pool
);
783 e
= alloc_entry(&mq
->cache_pool
);
784 result
->op
= POLICY_NEW
;
790 e
->generation
= mq
->generation
;
793 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
796 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
797 bool can_migrate
, bool discarded_oblock
,
798 int data_dir
, struct policy_result
*result
)
800 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) == 1) {
802 insert_in_cache(mq
, oblock
, result
);
806 insert_in_pre_cache(mq
, oblock
);
807 result
->op
= POLICY_MISS
;
814 * Looks the oblock up in the hash table, then decides whether to put in
815 * pre_cache, or cache etc.
817 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
818 bool can_migrate
, bool discarded_oblock
,
819 int data_dir
, struct policy_result
*result
)
822 struct entry
*e
= hash_lookup(mq
, oblock
);
824 if (e
&& in_cache(mq
, e
))
825 r
= cache_entry_found(mq
, e
, result
);
827 else if (iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
828 result
->op
= POLICY_MISS
;
831 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
835 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
838 if (r
== -EWOULDBLOCK
)
839 result
->op
= POLICY_MISS
;
844 /*----------------------------------------------------------------*/
847 * Public interface, via the policy struct. See dm-cache-policy.h for a
848 * description of these.
851 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
853 return container_of(p
, struct mq_policy
, policy
);
856 static void mq_destroy(struct dm_cache_policy
*p
)
858 struct mq_policy
*mq
= to_mq_policy(p
);
861 epool_exit(&mq
->cache_pool
);
862 epool_exit(&mq
->pre_cache_pool
);
866 static void copy_tick(struct mq_policy
*mq
)
870 spin_lock_irqsave(&mq
->tick_lock
, flags
);
871 mq
->tick
= mq
->tick_protected
;
872 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
875 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
876 bool can_block
, bool can_migrate
, bool discarded_oblock
,
877 struct bio
*bio
, struct policy_result
*result
)
880 struct mq_policy
*mq
= to_mq_policy(p
);
882 result
->op
= POLICY_MISS
;
885 mutex_lock(&mq
->lock
);
886 else if (!mutex_trylock(&mq
->lock
))
891 iot_examine_bio(&mq
->tracker
, bio
);
892 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
893 bio_data_dir(bio
), result
);
895 mutex_unlock(&mq
->lock
);
900 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
903 struct mq_policy
*mq
= to_mq_policy(p
);
906 if (!mutex_trylock(&mq
->lock
))
909 e
= hash_lookup(mq
, oblock
);
910 if (e
&& in_cache(mq
, e
)) {
911 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
916 mutex_unlock(&mq
->lock
);
921 static void __mq_set_clear_dirty(struct mq_policy
*mq
, dm_oblock_t oblock
, bool set
)
925 e
= hash_lookup(mq
, oblock
);
926 BUG_ON(!e
|| !in_cache(mq
, e
));
933 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
935 struct mq_policy
*mq
= to_mq_policy(p
);
937 mutex_lock(&mq
->lock
);
938 __mq_set_clear_dirty(mq
, oblock
, true);
939 mutex_unlock(&mq
->lock
);
942 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
944 struct mq_policy
*mq
= to_mq_policy(p
);
946 mutex_lock(&mq
->lock
);
947 __mq_set_clear_dirty(mq
, oblock
, false);
948 mutex_unlock(&mq
->lock
);
951 static int mq_load_mapping(struct dm_cache_policy
*p
,
952 dm_oblock_t oblock
, dm_cblock_t cblock
,
953 uint32_t hint
, bool hint_valid
)
955 struct mq_policy
*mq
= to_mq_policy(p
);
958 e
= alloc_particular_entry(&mq
->cache_pool
, cblock
);
960 e
->dirty
= false; /* this gets corrected in a minute */
961 e
->hit_count
= hint_valid
? hint
: 1;
962 e
->generation
= mq
->generation
;
968 static int mq_save_hints(struct mq_policy
*mq
, struct queue
*q
,
969 policy_walk_fn fn
, void *context
)
975 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
976 list_for_each_entry(e
, q
->qs
+ level
, list
) {
977 r
= fn(context
, infer_cblock(&mq
->cache_pool
, e
),
978 e
->oblock
, e
->hit_count
);
986 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
989 struct mq_policy
*mq
= to_mq_policy(p
);
992 mutex_lock(&mq
->lock
);
994 r
= mq_save_hints(mq
, &mq
->cache_clean
, fn
, context
);
996 r
= mq_save_hints(mq
, &mq
->cache_dirty
, fn
, context
);
998 mutex_unlock(&mq
->lock
);
1003 static void __remove_mapping(struct mq_policy
*mq
, dm_oblock_t oblock
)
1007 e
= hash_lookup(mq
, oblock
);
1008 BUG_ON(!e
|| !in_cache(mq
, e
));
1011 free_entry(&mq
->cache_pool
, e
);
1014 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1016 struct mq_policy
*mq
= to_mq_policy(p
);
1018 mutex_lock(&mq
->lock
);
1019 __remove_mapping(mq
, oblock
);
1020 mutex_unlock(&mq
->lock
);
1023 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1024 dm_cblock_t
*cblock
)
1026 struct entry
*e
= pop(mq
, &mq
->cache_dirty
);
1031 *oblock
= e
->oblock
;
1032 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1039 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1040 dm_cblock_t
*cblock
)
1043 struct mq_policy
*mq
= to_mq_policy(p
);
1045 mutex_lock(&mq
->lock
);
1046 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1047 mutex_unlock(&mq
->lock
);
1052 static void __force_mapping(struct mq_policy
*mq
,
1053 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1055 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1057 if (e
&& in_cache(mq
, e
)) {
1059 e
->oblock
= new_oblock
;
1065 static void mq_force_mapping(struct dm_cache_policy
*p
,
1066 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1068 struct mq_policy
*mq
= to_mq_policy(p
);
1070 mutex_lock(&mq
->lock
);
1071 __force_mapping(mq
, current_oblock
, new_oblock
);
1072 mutex_unlock(&mq
->lock
);
1075 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1078 struct mq_policy
*mq
= to_mq_policy(p
);
1080 mutex_lock(&mq
->lock
);
1081 r
= to_cblock(mq
->cache_pool
.nr_allocated
);
1082 mutex_unlock(&mq
->lock
);
1087 static void mq_tick(struct dm_cache_policy
*p
)
1089 struct mq_policy
*mq
= to_mq_policy(p
);
1090 unsigned long flags
;
1092 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1093 mq
->tick_protected
++;
1094 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1097 static int mq_set_config_value(struct dm_cache_policy
*p
,
1098 const char *key
, const char *value
)
1100 struct mq_policy
*mq
= to_mq_policy(p
);
1101 enum io_pattern pattern
;
1104 if (!strcasecmp(key
, "random_threshold"))
1105 pattern
= PATTERN_RANDOM
;
1106 else if (!strcasecmp(key
, "sequential_threshold"))
1107 pattern
= PATTERN_SEQUENTIAL
;
1111 if (kstrtoul(value
, 10, &tmp
))
1114 mq
->tracker
.thresholds
[pattern
] = tmp
;
1119 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1122 struct mq_policy
*mq
= to_mq_policy(p
);
1124 DMEMIT("4 random_threshold %u sequential_threshold %u",
1125 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1126 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
]);
1131 /* Init the policy plugin interface function pointers. */
1132 static void init_policy_functions(struct mq_policy
*mq
)
1134 mq
->policy
.destroy
= mq_destroy
;
1135 mq
->policy
.map
= mq_map
;
1136 mq
->policy
.lookup
= mq_lookup
;
1137 mq
->policy
.set_dirty
= mq_set_dirty
;
1138 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1139 mq
->policy
.load_mapping
= mq_load_mapping
;
1140 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1141 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1142 mq
->policy
.writeback_work
= mq_writeback_work
;
1143 mq
->policy
.force_mapping
= mq_force_mapping
;
1144 mq
->policy
.residency
= mq_residency
;
1145 mq
->policy
.tick
= mq_tick
;
1146 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1147 mq
->policy
.set_config_value
= mq_set_config_value
;
1150 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1151 sector_t origin_size
,
1152 sector_t cache_block_size
)
1154 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1159 init_policy_functions(mq
);
1160 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1161 mq
->cache_size
= cache_size
;
1163 if (epool_init(&mq
->pre_cache_pool
, from_cblock(cache_size
))) {
1164 DMERR("couldn't initialize pool of pre-cache entries");
1165 goto bad_pre_cache_init
;
1168 if (epool_init(&mq
->cache_pool
, from_cblock(cache_size
))) {
1169 DMERR("couldn't initialize pool of cache entries");
1170 goto bad_cache_init
;
1173 mq
->tick_protected
= 0;
1177 mq
->promote_threshold
= 0;
1178 mutex_init(&mq
->lock
);
1179 spin_lock_init(&mq
->tick_lock
);
1181 queue_init(&mq
->pre_cache
);
1182 queue_init(&mq
->cache_clean
);
1183 queue_init(&mq
->cache_dirty
);
1185 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1187 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1188 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1189 mq
->table
= kzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
, GFP_KERNEL
);
1191 goto bad_alloc_table
;
1196 epool_exit(&mq
->cache_pool
);
1198 epool_exit(&mq
->pre_cache_pool
);
1205 /*----------------------------------------------------------------*/
1207 static struct dm_cache_policy_type mq_policy_type
= {
1209 .version
= {1, 1, 0},
1211 .owner
= THIS_MODULE
,
1215 static struct dm_cache_policy_type default_policy_type
= {
1217 .version
= {1, 1, 0},
1219 .owner
= THIS_MODULE
,
1223 static int __init
mq_init(void)
1227 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1228 sizeof(struct entry
),
1229 __alignof__(struct entry
),
1231 if (!mq_entry_cache
)
1234 r
= dm_cache_policy_register(&mq_policy_type
);
1236 DMERR("register failed %d", r
);
1237 goto bad_register_mq
;
1240 r
= dm_cache_policy_register(&default_policy_type
);
1242 DMINFO("version %u.%u.%u loaded",
1243 mq_policy_type
.version
[0],
1244 mq_policy_type
.version
[1],
1245 mq_policy_type
.version
[2]);
1249 DMERR("register failed (as default) %d", r
);
1251 dm_cache_policy_unregister(&mq_policy_type
);
1253 kmem_cache_destroy(mq_entry_cache
);
1258 static void __exit
mq_exit(void)
1260 dm_cache_policy_unregister(&mq_policy_type
);
1261 dm_cache_policy_unregister(&default_policy_type
);
1263 kmem_cache_destroy(mq_entry_cache
);
1266 module_init(mq_init
);
1267 module_exit(mq_exit
);
1269 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1270 MODULE_LICENSE("GPL");
1271 MODULE_DESCRIPTION("mq cache policy");
1273 MODULE_ALIAS("dm-cache-default");