2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
33 * The io_tracker tries to spot when the io is in one of these sequential
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
48 enum io_pattern pattern
;
50 unsigned nr_seq_samples
;
51 unsigned nr_rand_samples
;
52 unsigned thresholds
[2];
54 dm_oblock_t last_end_oblock
;
57 static void iot_init(struct io_tracker
*t
,
58 int sequential_threshold
, int random_threshold
)
60 t
->pattern
= PATTERN_RANDOM
;
61 t
->nr_seq_samples
= 0;
62 t
->nr_rand_samples
= 0;
63 t
->last_end_oblock
= 0;
64 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
65 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
68 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
73 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
75 if (bio
->bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
79 * Just one non-sequential IO is enough to reset the
82 if (t
->nr_seq_samples
) {
83 t
->nr_seq_samples
= 0;
84 t
->nr_rand_samples
= 0;
90 t
->last_end_oblock
= to_oblock(bio
->bi_sector
+ bio_sectors(bio
) - 1);
93 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
96 case PATTERN_SEQUENTIAL
:
97 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
98 t
->pattern
= PATTERN_RANDOM
;
99 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
104 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
105 t
->pattern
= PATTERN_SEQUENTIAL
;
106 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
112 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
114 iot_update_stats(t
, bio
);
115 iot_check_for_pattern_switch(t
);
118 /*----------------------------------------------------------------*/
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
126 #define NR_QUEUE_LEVELS 16u
129 struct list_head qs
[NR_QUEUE_LEVELS
];
132 static void queue_init(struct queue
*q
)
136 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
137 INIT_LIST_HEAD(q
->qs
+ i
);
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
144 static bool queue_empty(struct queue
*q
)
148 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
149 if (!list_empty(q
->qs
+ i
))
156 * Insert an entry to the back of the given level.
158 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
160 list_add_tail(elt
, q
->qs
+ level
);
163 static void queue_remove(struct list_head
*elt
)
169 * Shifts all regions down one level. This has no effect on the order of
172 static void queue_shift_down(struct queue
*q
)
176 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
177 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
184 static struct list_head
*queue_pop(struct queue
*q
)
189 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
190 if (!list_empty(q
->qs
+ level
)) {
191 r
= q
->qs
[level
].next
;
194 /* have we just emptied the bottom level? */
195 if (level
== 0 && list_empty(q
->qs
))
204 static struct list_head
*list_pop(struct list_head
*lh
)
206 struct list_head
*r
= lh
->next
;
214 /*----------------------------------------------------------------*/
217 * Describes a cache entry. Used in both the cache and the pre_cache.
220 struct hlist_node hlist
;
221 struct list_head list
;
225 * FIXME: pack these better
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
237 * Free entries are linked together into a list.
240 struct entry
*entries
, *entries_end
;
241 struct list_head free
;
242 unsigned nr_allocated
;
245 static int epool_init(struct entry_pool
*ep
, unsigned nr_entries
)
249 ep
->entries
= vzalloc(sizeof(struct entry
) * nr_entries
);
253 ep
->entries_end
= ep
->entries
+ nr_entries
;
255 INIT_LIST_HEAD(&ep
->free
);
256 for (i
= 0; i
< nr_entries
; i
++)
257 list_add(&ep
->entries
[i
].list
, &ep
->free
);
259 ep
->nr_allocated
= 0;
264 static void epool_exit(struct entry_pool
*ep
)
269 static struct entry
*alloc_entry(struct entry_pool
*ep
)
273 if (list_empty(&ep
->free
))
276 e
= list_entry(list_pop(&ep
->free
), struct entry
, list
);
277 INIT_LIST_HEAD(&e
->list
);
278 INIT_HLIST_NODE(&e
->hlist
);
285 * This assumes the cblock hasn't already been allocated.
287 static struct entry
*alloc_particular_entry(struct entry_pool
*ep
, dm_cblock_t cblock
)
289 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
292 INIT_LIST_HEAD(&e
->list
);
293 INIT_HLIST_NODE(&e
->hlist
);
299 static void free_entry(struct entry_pool
*ep
, struct entry
*e
)
301 BUG_ON(!ep
->nr_allocated
);
303 INIT_HLIST_NODE(&e
->hlist
);
304 list_add(&e
->list
, &ep
->free
);
308 * Returns NULL if the entry is free.
310 static struct entry
*epool_find(struct entry_pool
*ep
, dm_cblock_t cblock
)
312 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
313 return !hlist_unhashed(&e
->hlist
) ? e
: NULL
;
316 static bool epool_empty(struct entry_pool
*ep
)
318 return list_empty(&ep
->free
);
321 static bool in_pool(struct entry_pool
*ep
, struct entry
*e
)
323 return e
>= ep
->entries
&& e
< ep
->entries_end
;
326 static dm_cblock_t
infer_cblock(struct entry_pool
*ep
, struct entry
*e
)
328 return to_cblock(e
- ep
->entries
);
331 /*----------------------------------------------------------------*/
334 struct dm_cache_policy policy
;
336 /* protects everything */
338 dm_cblock_t cache_size
;
339 struct io_tracker tracker
;
342 * Entries come from two pools, one of pre-cache entries, and one
343 * for the cache proper.
345 struct entry_pool pre_cache_pool
;
346 struct entry_pool cache_pool
;
349 * We maintain three queues of entries. The cache proper,
350 * consisting of a clean and dirty queue, contains the currently
351 * active mappings. Whereas the pre_cache tracks blocks that
352 * are being hit frequently and potential candidates for promotion
355 struct queue pre_cache
;
356 struct queue cache_clean
;
357 struct queue cache_dirty
;
360 * Keeps track of time, incremented by the core. We use this to
361 * avoid attributing multiple hits within the same tick.
363 * Access to tick_protected should be done with the spin lock held.
364 * It's copied to tick at the start of the map function (within the
367 spinlock_t tick_lock
;
368 unsigned tick_protected
;
372 * A count of the number of times the map function has been called
373 * and found an entry in the pre_cache or cache. Currently used to
374 * calculate the generation.
379 * A generation is a longish period that is used to trigger some
380 * book keeping effects. eg, decrementing hit counts on entries.
381 * This is needed to allow the cache to evolve as io patterns
385 unsigned generation_period
; /* in lookups (will probably change) */
388 * Entries in the pre_cache whose hit count passes the promotion
389 * threshold move to the cache proper. Working out the correct
390 * value for the promotion_threshold is crucial to this policy.
392 unsigned promote_threshold
;
395 * The hash table allows us to quickly find an entry by origin
396 * block. Both pre_cache and cache entries are in here.
399 dm_block_t hash_bits
;
400 struct hlist_head
*table
;
403 /*----------------------------------------------------------------*/
406 * Simple hash table implementation. Should replace with the standard hash
407 * table that's making its way upstream.
409 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
411 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
413 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
416 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
418 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
419 struct hlist_head
*bucket
= mq
->table
+ h
;
422 hlist_for_each_entry(e
, bucket
, hlist
)
423 if (e
->oblock
== oblock
) {
424 hlist_del(&e
->hlist
);
425 hlist_add_head(&e
->hlist
, bucket
);
432 static void hash_remove(struct entry
*e
)
434 hlist_del(&e
->hlist
);
437 /*----------------------------------------------------------------*/
439 static bool any_free_cblocks(struct mq_policy
*mq
)
441 return !epool_empty(&mq
->cache_pool
);
444 static bool any_clean_cblocks(struct mq_policy
*mq
)
446 return !queue_empty(&mq
->cache_clean
);
449 /*----------------------------------------------------------------*/
452 * Now we get to the meat of the policy. This section deals with deciding
453 * when to to add entries to the pre_cache and cache, and move between
458 * The queue level is based on the log2 of the hit count.
460 static unsigned queue_level(struct entry
*e
)
462 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
465 static bool in_cache(struct mq_policy
*mq
, struct entry
*e
)
467 return in_pool(&mq
->cache_pool
, e
);
471 * Inserts the entry into the pre_cache or the cache. Ensures the cache
472 * block is marked as allocated if necc. Inserts into the hash table.
473 * Sets the tick which records when the entry was last moved about.
475 static void push(struct mq_policy
*mq
, struct entry
*e
)
481 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
482 queue_level(e
), &e
->list
);
484 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
488 * Removes an entry from pre_cache or cache. Removes from the hash table.
490 static void del(struct mq_policy
*mq
, struct entry
*e
)
492 queue_remove(&e
->list
);
497 * Like del, except it removes the first entry in the queue (ie. the least
500 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
503 struct list_head
*h
= queue_pop(q
);
508 e
= container_of(h
, struct entry
, list
);
515 * Has this entry already been updated?
517 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
519 return mq
->tick
== e
->tick
;
523 * The promotion threshold is adjusted every generation. As are the counts
526 * At the moment the threshold is taken by averaging the hit counts of some
527 * of the entries in the cache (the first 20 entries across all levels in
528 * ascending order, giving preference to the clean entries at each level).
530 * We can be much cleverer than this though. For example, each promotion
531 * could bump up the threshold helping to prevent churn. Much more to do
535 #define MAX_TO_AVERAGE 20
537 static void check_generation(struct mq_policy
*mq
)
539 unsigned total
= 0, nr
= 0, count
= 0, level
;
540 struct list_head
*head
;
543 if ((mq
->hit_count
>= mq
->generation_period
) && (epool_empty(&mq
->cache_pool
))) {
547 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
548 head
= mq
->cache_clean
.qs
+ level
;
549 list_for_each_entry(e
, head
, list
) {
551 total
+= e
->hit_count
;
553 if (++count
>= MAX_TO_AVERAGE
)
557 head
= mq
->cache_dirty
.qs
+ level
;
558 list_for_each_entry(e
, head
, list
) {
560 total
+= e
->hit_count
;
562 if (++count
>= MAX_TO_AVERAGE
)
567 mq
->promote_threshold
= nr
? total
/ nr
: 1;
568 if (mq
->promote_threshold
* nr
< total
)
569 mq
->promote_threshold
++;
574 * Whenever we use an entry we bump up it's hit counter, and push it to the
575 * back to it's current level.
577 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
579 if (updated_this_tick(mq
, e
))
584 check_generation(mq
);
586 /* generation adjustment, to stop the counts increasing forever. */
588 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
589 e
->generation
= mq
->generation
;
596 * Demote the least recently used entry from the cache to the pre_cache.
597 * Returns the new cache entry to use, and the old origin block it was
600 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
601 * straight back into the cache if it's subsequently hit. There are
602 * various options here, and more experimentation would be good:
604 * - just forget about the demoted entry completely (ie. don't insert it
606 * - divide the hit count rather that setting to some hard coded value.
607 * - set the hit count to a hard coded value other than 1, eg, is it better
608 * if it goes in at level 2?
610 static int demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
)
612 struct entry
*demoted
= pop(mq
, &mq
->cache_clean
);
616 * We could get a block from mq->cache_dirty, but that
617 * would add extra latency to the triggering bio as it
618 * waits for the writeback. Better to not promote this
619 * time and hope there's a clean block next time this block
624 *oblock
= demoted
->oblock
;
625 free_entry(&mq
->cache_pool
, demoted
);
628 * We used to put the demoted block into the pre-cache, but I think
629 * it's simpler to just let it work it's way up from zero again.
630 * Stops blocks flickering in and out of the cache.
637 * We modify the basic promotion_threshold depending on the specific io.
639 * If the origin block has been discarded then there's no cost to copy it
642 * We bias towards reads, since they can be demoted at no cost if they
643 * haven't been dirtied.
645 #define DISCARDED_PROMOTE_THRESHOLD 1
646 #define READ_PROMOTE_THRESHOLD 4
647 #define WRITE_PROMOTE_THRESHOLD 8
649 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
650 bool discarded_oblock
, int data_dir
)
652 if (data_dir
== READ
)
653 return mq
->promote_threshold
+ READ_PROMOTE_THRESHOLD
;
655 if (discarded_oblock
&& (any_free_cblocks(mq
) || any_clean_cblocks(mq
))) {
657 * We don't need to do any copying at all, so give this a
658 * very low threshold.
660 return DISCARDED_PROMOTE_THRESHOLD
;
663 return mq
->promote_threshold
+ WRITE_PROMOTE_THRESHOLD
;
666 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
667 bool discarded_oblock
, int data_dir
)
669 return e
->hit_count
>=
670 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
673 static int cache_entry_found(struct mq_policy
*mq
,
675 struct policy_result
*result
)
677 requeue_and_update_tick(mq
, e
);
679 if (in_cache(mq
, e
)) {
680 result
->op
= POLICY_HIT
;
681 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
688 * Moves an entry from the pre_cache to the cache. The main work is
689 * finding which cache block to use.
691 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
692 struct policy_result
*result
)
697 /* Ensure there's a free cblock in the cache */
698 if (epool_empty(&mq
->cache_pool
)) {
699 result
->op
= POLICY_REPLACE
;
700 r
= demote_cblock(mq
, &result
->old_oblock
);
702 result
->op
= POLICY_MISS
;
706 result
->op
= POLICY_NEW
;
708 new_e
= alloc_entry(&mq
->cache_pool
);
711 new_e
->oblock
= e
->oblock
;
712 new_e
->dirty
= false;
713 new_e
->hit_count
= e
->hit_count
;
714 new_e
->generation
= e
->generation
;
715 new_e
->tick
= e
->tick
;
718 free_entry(&mq
->pre_cache_pool
, e
);
721 result
->cblock
= infer_cblock(&mq
->cache_pool
, new_e
);
726 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
727 bool can_migrate
, bool discarded_oblock
,
728 int data_dir
, struct policy_result
*result
)
731 bool updated
= updated_this_tick(mq
, e
);
733 requeue_and_update_tick(mq
, e
);
735 if ((!discarded_oblock
&& updated
) ||
736 !should_promote(mq
, e
, discarded_oblock
, data_dir
))
737 result
->op
= POLICY_MISS
;
738 else if (!can_migrate
)
741 r
= pre_cache_to_cache(mq
, e
, result
);
746 static void insert_in_pre_cache(struct mq_policy
*mq
,
749 struct entry
*e
= alloc_entry(&mq
->pre_cache_pool
);
753 * There's no spare entry structure, so we grab the least
754 * used one from the pre_cache.
756 e
= pop(mq
, &mq
->pre_cache
);
759 DMWARN("couldn't pop from pre cache");
766 e
->generation
= mq
->generation
;
770 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
771 struct policy_result
*result
)
776 if (epool_empty(&mq
->cache_pool
)) {
777 result
->op
= POLICY_REPLACE
;
778 r
= demote_cblock(mq
, &result
->old_oblock
);
780 result
->op
= POLICY_MISS
;
781 insert_in_pre_cache(mq
, oblock
);
786 * This will always succeed, since we've just demoted.
788 e
= alloc_entry(&mq
->cache_pool
);
792 e
= alloc_entry(&mq
->cache_pool
);
793 result
->op
= POLICY_NEW
;
799 e
->generation
= mq
->generation
;
802 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
805 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
806 bool can_migrate
, bool discarded_oblock
,
807 int data_dir
, struct policy_result
*result
)
809 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) == 1) {
811 insert_in_cache(mq
, oblock
, result
);
815 insert_in_pre_cache(mq
, oblock
);
816 result
->op
= POLICY_MISS
;
823 * Looks the oblock up in the hash table, then decides whether to put in
824 * pre_cache, or cache etc.
826 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
827 bool can_migrate
, bool discarded_oblock
,
828 int data_dir
, struct policy_result
*result
)
831 struct entry
*e
= hash_lookup(mq
, oblock
);
833 if (e
&& in_cache(mq
, e
))
834 r
= cache_entry_found(mq
, e
, result
);
836 else if (iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
837 result
->op
= POLICY_MISS
;
840 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
844 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
847 if (r
== -EWOULDBLOCK
)
848 result
->op
= POLICY_MISS
;
853 /*----------------------------------------------------------------*/
856 * Public interface, via the policy struct. See dm-cache-policy.h for a
857 * description of these.
860 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
862 return container_of(p
, struct mq_policy
, policy
);
865 static void mq_destroy(struct dm_cache_policy
*p
)
867 struct mq_policy
*mq
= to_mq_policy(p
);
870 epool_exit(&mq
->cache_pool
);
871 epool_exit(&mq
->pre_cache_pool
);
875 static void copy_tick(struct mq_policy
*mq
)
879 spin_lock_irqsave(&mq
->tick_lock
, flags
);
880 mq
->tick
= mq
->tick_protected
;
881 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
884 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
885 bool can_block
, bool can_migrate
, bool discarded_oblock
,
886 struct bio
*bio
, struct policy_result
*result
)
889 struct mq_policy
*mq
= to_mq_policy(p
);
891 result
->op
= POLICY_MISS
;
894 mutex_lock(&mq
->lock
);
895 else if (!mutex_trylock(&mq
->lock
))
900 iot_examine_bio(&mq
->tracker
, bio
);
901 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
902 bio_data_dir(bio
), result
);
904 mutex_unlock(&mq
->lock
);
909 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
912 struct mq_policy
*mq
= to_mq_policy(p
);
915 if (!mutex_trylock(&mq
->lock
))
918 e
= hash_lookup(mq
, oblock
);
919 if (e
&& in_cache(mq
, e
)) {
920 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
925 mutex_unlock(&mq
->lock
);
930 static void __mq_set_clear_dirty(struct mq_policy
*mq
, dm_oblock_t oblock
, bool set
)
934 e
= hash_lookup(mq
, oblock
);
935 BUG_ON(!e
|| !in_cache(mq
, e
));
942 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
944 struct mq_policy
*mq
= to_mq_policy(p
);
946 mutex_lock(&mq
->lock
);
947 __mq_set_clear_dirty(mq
, oblock
, true);
948 mutex_unlock(&mq
->lock
);
951 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
953 struct mq_policy
*mq
= to_mq_policy(p
);
955 mutex_lock(&mq
->lock
);
956 __mq_set_clear_dirty(mq
, oblock
, false);
957 mutex_unlock(&mq
->lock
);
960 static int mq_load_mapping(struct dm_cache_policy
*p
,
961 dm_oblock_t oblock
, dm_cblock_t cblock
,
962 uint32_t hint
, bool hint_valid
)
964 struct mq_policy
*mq
= to_mq_policy(p
);
967 e
= alloc_particular_entry(&mq
->cache_pool
, cblock
);
969 e
->dirty
= false; /* this gets corrected in a minute */
970 e
->hit_count
= hint_valid
? hint
: 1;
971 e
->generation
= mq
->generation
;
977 static int mq_save_hints(struct mq_policy
*mq
, struct queue
*q
,
978 policy_walk_fn fn
, void *context
)
984 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
985 list_for_each_entry(e
, q
->qs
+ level
, list
) {
986 r
= fn(context
, infer_cblock(&mq
->cache_pool
, e
),
987 e
->oblock
, e
->hit_count
);
995 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
998 struct mq_policy
*mq
= to_mq_policy(p
);
1001 mutex_lock(&mq
->lock
);
1003 r
= mq_save_hints(mq
, &mq
->cache_clean
, fn
, context
);
1005 r
= mq_save_hints(mq
, &mq
->cache_dirty
, fn
, context
);
1007 mutex_unlock(&mq
->lock
);
1012 static void __remove_mapping(struct mq_policy
*mq
, dm_oblock_t oblock
)
1016 e
= hash_lookup(mq
, oblock
);
1017 BUG_ON(!e
|| !in_cache(mq
, e
));
1020 free_entry(&mq
->cache_pool
, e
);
1023 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1025 struct mq_policy
*mq
= to_mq_policy(p
);
1027 mutex_lock(&mq
->lock
);
1028 __remove_mapping(mq
, oblock
);
1029 mutex_unlock(&mq
->lock
);
1032 static int __remove_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
1034 struct entry
*e
= epool_find(&mq
->cache_pool
, cblock
);
1040 free_entry(&mq
->cache_pool
, e
);
1045 static int mq_remove_cblock(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1048 struct mq_policy
*mq
= to_mq_policy(p
);
1050 mutex_lock(&mq
->lock
);
1051 r
= __remove_cblock(mq
, cblock
);
1052 mutex_unlock(&mq
->lock
);
1057 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1058 dm_cblock_t
*cblock
)
1060 struct entry
*e
= pop(mq
, &mq
->cache_dirty
);
1065 *oblock
= e
->oblock
;
1066 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1073 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1074 dm_cblock_t
*cblock
)
1077 struct mq_policy
*mq
= to_mq_policy(p
);
1079 mutex_lock(&mq
->lock
);
1080 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1081 mutex_unlock(&mq
->lock
);
1086 static void __force_mapping(struct mq_policy
*mq
,
1087 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1089 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1091 if (e
&& in_cache(mq
, e
)) {
1093 e
->oblock
= new_oblock
;
1099 static void mq_force_mapping(struct dm_cache_policy
*p
,
1100 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1102 struct mq_policy
*mq
= to_mq_policy(p
);
1104 mutex_lock(&mq
->lock
);
1105 __force_mapping(mq
, current_oblock
, new_oblock
);
1106 mutex_unlock(&mq
->lock
);
1109 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1112 struct mq_policy
*mq
= to_mq_policy(p
);
1114 mutex_lock(&mq
->lock
);
1115 r
= to_cblock(mq
->cache_pool
.nr_allocated
);
1116 mutex_unlock(&mq
->lock
);
1121 static void mq_tick(struct dm_cache_policy
*p
)
1123 struct mq_policy
*mq
= to_mq_policy(p
);
1124 unsigned long flags
;
1126 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1127 mq
->tick_protected
++;
1128 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1131 static int mq_set_config_value(struct dm_cache_policy
*p
,
1132 const char *key
, const char *value
)
1134 struct mq_policy
*mq
= to_mq_policy(p
);
1135 enum io_pattern pattern
;
1138 if (!strcasecmp(key
, "random_threshold"))
1139 pattern
= PATTERN_RANDOM
;
1140 else if (!strcasecmp(key
, "sequential_threshold"))
1141 pattern
= PATTERN_SEQUENTIAL
;
1145 if (kstrtoul(value
, 10, &tmp
))
1148 mq
->tracker
.thresholds
[pattern
] = tmp
;
1153 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1156 struct mq_policy
*mq
= to_mq_policy(p
);
1158 DMEMIT("4 random_threshold %u sequential_threshold %u",
1159 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1160 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
]);
1165 /* Init the policy plugin interface function pointers. */
1166 static void init_policy_functions(struct mq_policy
*mq
)
1168 mq
->policy
.destroy
= mq_destroy
;
1169 mq
->policy
.map
= mq_map
;
1170 mq
->policy
.lookup
= mq_lookup
;
1171 mq
->policy
.set_dirty
= mq_set_dirty
;
1172 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1173 mq
->policy
.load_mapping
= mq_load_mapping
;
1174 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1175 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1176 mq
->policy
.remove_cblock
= mq_remove_cblock
;
1177 mq
->policy
.writeback_work
= mq_writeback_work
;
1178 mq
->policy
.force_mapping
= mq_force_mapping
;
1179 mq
->policy
.residency
= mq_residency
;
1180 mq
->policy
.tick
= mq_tick
;
1181 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1182 mq
->policy
.set_config_value
= mq_set_config_value
;
1185 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1186 sector_t origin_size
,
1187 sector_t cache_block_size
)
1189 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1194 init_policy_functions(mq
);
1195 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1196 mq
->cache_size
= cache_size
;
1198 if (epool_init(&mq
->pre_cache_pool
, from_cblock(cache_size
))) {
1199 DMERR("couldn't initialize pool of pre-cache entries");
1200 goto bad_pre_cache_init
;
1203 if (epool_init(&mq
->cache_pool
, from_cblock(cache_size
))) {
1204 DMERR("couldn't initialize pool of cache entries");
1205 goto bad_cache_init
;
1208 mq
->tick_protected
= 0;
1212 mq
->promote_threshold
= 0;
1213 mutex_init(&mq
->lock
);
1214 spin_lock_init(&mq
->tick_lock
);
1216 queue_init(&mq
->pre_cache
);
1217 queue_init(&mq
->cache_clean
);
1218 queue_init(&mq
->cache_dirty
);
1220 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1222 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1223 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1224 mq
->table
= kzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
, GFP_KERNEL
);
1226 goto bad_alloc_table
;
1231 epool_exit(&mq
->cache_pool
);
1233 epool_exit(&mq
->pre_cache_pool
);
1240 /*----------------------------------------------------------------*/
1242 static struct dm_cache_policy_type mq_policy_type
= {
1244 .version
= {1, 1, 0},
1246 .owner
= THIS_MODULE
,
1250 static struct dm_cache_policy_type default_policy_type
= {
1252 .version
= {1, 1, 0},
1254 .owner
= THIS_MODULE
,
1258 static int __init
mq_init(void)
1262 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1263 sizeof(struct entry
),
1264 __alignof__(struct entry
),
1266 if (!mq_entry_cache
)
1269 r
= dm_cache_policy_register(&mq_policy_type
);
1271 DMERR("register failed %d", r
);
1272 goto bad_register_mq
;
1275 r
= dm_cache_policy_register(&default_policy_type
);
1277 DMINFO("version %u.%u.%u loaded",
1278 mq_policy_type
.version
[0],
1279 mq_policy_type
.version
[1],
1280 mq_policy_type
.version
[2]);
1284 DMERR("register failed (as default) %d", r
);
1286 dm_cache_policy_unregister(&mq_policy_type
);
1288 kmem_cache_destroy(mq_entry_cache
);
1293 static void __exit
mq_exit(void)
1295 dm_cache_policy_unregister(&mq_policy_type
);
1296 dm_cache_policy_unregister(&default_policy_type
);
1298 kmem_cache_destroy(mq_entry_cache
);
1301 module_init(mq_init
);
1302 module_exit(mq_exit
);
1304 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1305 MODULE_LICENSE("GPL");
1306 MODULE_DESCRIPTION("mq cache policy");
1308 MODULE_ALIAS("dm-cache-default");