2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
29 static unsigned long *alloc_bitset(unsigned nr_entries
)
31 size_t s
= sizeof(unsigned long) * dm_div_up(nr_entries
, BITS_PER_LONG
);
35 static void free_bitset(unsigned long *bits
)
40 /*----------------------------------------------------------------*/
43 * Large, sequential ios are probably better left on the origin device since
44 * spindles tend to have good bandwidth.
46 * The io_tracker tries to spot when the io is in one of these sequential
49 * Two thresholds to switch between random and sequential io mode are defaulting
50 * as follows and can be adjusted via the constructor and message interfaces.
52 #define RANDOM_THRESHOLD_DEFAULT 4
53 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
61 enum io_pattern pattern
;
63 unsigned nr_seq_samples
;
64 unsigned nr_rand_samples
;
65 unsigned thresholds
[2];
67 dm_oblock_t last_end_oblock
;
70 static void iot_init(struct io_tracker
*t
,
71 int sequential_threshold
, int random_threshold
)
73 t
->pattern
= PATTERN_RANDOM
;
74 t
->nr_seq_samples
= 0;
75 t
->nr_rand_samples
= 0;
76 t
->last_end_oblock
= 0;
77 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
78 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
81 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
86 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
88 if (bio
->bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
92 * Just one non-sequential IO is enough to reset the
95 if (t
->nr_seq_samples
) {
96 t
->nr_seq_samples
= 0;
97 t
->nr_rand_samples
= 0;
100 t
->nr_rand_samples
++;
103 t
->last_end_oblock
= to_oblock(bio
->bi_sector
+ bio_sectors(bio
) - 1);
106 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
108 switch (t
->pattern
) {
109 case PATTERN_SEQUENTIAL
:
110 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
111 t
->pattern
= PATTERN_RANDOM
;
112 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
117 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
118 t
->pattern
= PATTERN_SEQUENTIAL
;
119 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
125 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
127 iot_update_stats(t
, bio
);
128 iot_check_for_pattern_switch(t
);
131 /*----------------------------------------------------------------*/
135 * This queue is divided up into different levels. Allowing us to push
136 * entries to the back of any of the levels. Think of it as a partially
139 #define NR_QUEUE_LEVELS 16u
142 struct list_head qs
[NR_QUEUE_LEVELS
];
145 static void queue_init(struct queue
*q
)
149 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
150 INIT_LIST_HEAD(q
->qs
+ i
);
154 * Insert an entry to the back of the given level.
156 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
158 list_add_tail(elt
, q
->qs
+ level
);
161 static void queue_remove(struct list_head
*elt
)
167 * Shifts all regions down one level. This has no effect on the order of
170 static void queue_shift_down(struct queue
*q
)
174 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
175 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
179 * Gives us the oldest entry of the lowest popoulated level. If the first
180 * level is emptied then we shift down one level.
182 static struct list_head
*queue_pop(struct queue
*q
)
187 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
188 if (!list_empty(q
->qs
+ level
)) {
189 r
= q
->qs
[level
].next
;
192 /* have we just emptied the bottom level? */
193 if (level
== 0 && list_empty(q
->qs
))
202 static struct list_head
*list_pop(struct list_head
*lh
)
204 struct list_head
*r
= lh
->next
;
212 /*----------------------------------------------------------------*/
215 * Describes a cache entry. Used in both the cache and the pre_cache.
218 struct hlist_node hlist
;
219 struct list_head list
;
221 dm_cblock_t cblock
; /* valid iff in_cache */
224 * FIXME: pack these better
234 struct dm_cache_policy policy
;
236 /* protects everything */
238 dm_cblock_t cache_size
;
239 struct io_tracker tracker
;
242 * We maintain three queues of entries. The cache proper,
243 * consisting of a clean and dirty queue, contains the currently
244 * active mappings. Whereas the pre_cache tracks blocks that
245 * are being hit frequently and potential candidates for promotion
248 struct queue pre_cache
;
249 struct queue cache_clean
;
250 struct queue cache_dirty
;
253 * Keeps track of time, incremented by the core. We use this to
254 * avoid attributing multiple hits within the same tick.
256 * Access to tick_protected should be done with the spin lock held.
257 * It's copied to tick at the start of the map function (within the
260 spinlock_t tick_lock
;
261 unsigned tick_protected
;
265 * A count of the number of times the map function has been called
266 * and found an entry in the pre_cache or cache. Currently used to
267 * calculate the generation.
272 * A generation is a longish period that is used to trigger some
273 * book keeping effects. eg, decrementing hit counts on entries.
274 * This is needed to allow the cache to evolve as io patterns
278 unsigned generation_period
; /* in lookups (will probably change) */
281 * Entries in the pre_cache whose hit count passes the promotion
282 * threshold move to the cache proper. Working out the correct
283 * value for the promotion_threshold is crucial to this policy.
285 unsigned promote_threshold
;
288 * We need cache_size entries for the cache, and choose to have
289 * cache_size entries for the pre_cache too. One motivation for
290 * using the same size is to make the hit counts directly
291 * comparable between pre_cache and cache.
294 unsigned nr_entries_allocated
;
295 struct list_head free
;
298 * Cache blocks may be unallocated. We store this info in a
301 unsigned long *allocation_bitset
;
302 unsigned nr_cblocks_allocated
;
303 unsigned find_free_nr_words
;
304 unsigned find_free_last_word
;
307 * The hash table allows us to quickly find an entry by origin
308 * block. Both pre_cache and cache entries are in here.
311 dm_block_t hash_bits
;
312 struct hlist_head
*table
;
315 /*----------------------------------------------------------------*/
316 /* Free/alloc mq cache entry structures. */
317 static void concat_queue(struct list_head
*lh
, struct queue
*q
)
321 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
322 list_splice(q
->qs
+ level
, lh
);
325 static void free_entries(struct mq_policy
*mq
)
327 struct entry
*e
, *tmp
;
329 concat_queue(&mq
->free
, &mq
->pre_cache
);
330 concat_queue(&mq
->free
, &mq
->cache_clean
);
331 concat_queue(&mq
->free
, &mq
->cache_dirty
);
333 list_for_each_entry_safe(e
, tmp
, &mq
->free
, list
)
334 kmem_cache_free(mq_entry_cache
, e
);
337 static int alloc_entries(struct mq_policy
*mq
, unsigned elts
)
339 unsigned u
= mq
->nr_entries
;
341 INIT_LIST_HEAD(&mq
->free
);
342 mq
->nr_entries_allocated
= 0;
345 struct entry
*e
= kmem_cache_zalloc(mq_entry_cache
, GFP_KERNEL
);
353 list_add(&e
->list
, &mq
->free
);
359 /*----------------------------------------------------------------*/
362 * Simple hash table implementation. Should replace with the standard hash
363 * table that's making its way upstream.
365 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
367 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
369 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
372 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
374 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
375 struct hlist_head
*bucket
= mq
->table
+ h
;
378 hlist_for_each_entry(e
, bucket
, hlist
)
379 if (e
->oblock
== oblock
) {
380 hlist_del(&e
->hlist
);
381 hlist_add_head(&e
->hlist
, bucket
);
388 static void hash_remove(struct entry
*e
)
390 hlist_del(&e
->hlist
);
393 /*----------------------------------------------------------------*/
396 * Allocates a new entry structure. The memory is allocated in one lump,
397 * so we just handing it out here. Returns NULL if all entries have
398 * already been allocated. Cannot fail otherwise.
400 static struct entry
*alloc_entry(struct mq_policy
*mq
)
404 if (mq
->nr_entries_allocated
>= mq
->nr_entries
) {
405 BUG_ON(!list_empty(&mq
->free
));
409 e
= list_entry(list_pop(&mq
->free
), struct entry
, list
);
410 INIT_LIST_HEAD(&e
->list
);
411 INIT_HLIST_NODE(&e
->hlist
);
413 mq
->nr_entries_allocated
++;
417 /*----------------------------------------------------------------*/
420 * Mark cache blocks allocated or not in the bitset.
422 static void alloc_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
424 BUG_ON(from_cblock(cblock
) > from_cblock(mq
->cache_size
));
425 BUG_ON(test_bit(from_cblock(cblock
), mq
->allocation_bitset
));
427 set_bit(from_cblock(cblock
), mq
->allocation_bitset
);
428 mq
->nr_cblocks_allocated
++;
431 static void free_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
433 BUG_ON(from_cblock(cblock
) > from_cblock(mq
->cache_size
));
434 BUG_ON(!test_bit(from_cblock(cblock
), mq
->allocation_bitset
));
436 clear_bit(from_cblock(cblock
), mq
->allocation_bitset
);
437 mq
->nr_cblocks_allocated
--;
440 static bool any_free_cblocks(struct mq_policy
*mq
)
442 return mq
->nr_cblocks_allocated
< from_cblock(mq
->cache_size
);
446 * Fills result out with a cache block that isn't in use, or return
447 * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
448 * reponsible for that.
450 static int __find_free_cblock(struct mq_policy
*mq
, unsigned begin
, unsigned end
,
451 dm_cblock_t
*result
, unsigned *last_word
)
456 for (w
= begin
; w
< end
; w
++) {
458 * ffz is undefined if no zero exists
460 if (mq
->allocation_bitset
[w
] != ~0UL) {
462 *result
= to_cblock((w
* BITS_PER_LONG
) + ffz(mq
->allocation_bitset
[w
]));
463 if (from_cblock(*result
) < from_cblock(mq
->cache_size
))
473 static int find_free_cblock(struct mq_policy
*mq
, dm_cblock_t
*result
)
477 if (!any_free_cblocks(mq
))
480 r
= __find_free_cblock(mq
, mq
->find_free_last_word
, mq
->find_free_nr_words
, result
, &mq
->find_free_last_word
);
481 if (r
== -ENOSPC
&& mq
->find_free_last_word
)
482 r
= __find_free_cblock(mq
, 0, mq
->find_free_last_word
, result
, &mq
->find_free_last_word
);
487 /*----------------------------------------------------------------*/
490 * Now we get to the meat of the policy. This section deals with deciding
491 * when to to add entries to the pre_cache and cache, and move between
496 * The queue level is based on the log2 of the hit count.
498 static unsigned queue_level(struct entry
*e
)
500 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
504 * Inserts the entry into the pre_cache or the cache. Ensures the cache
505 * block is marked as allocated if necc. Inserts into the hash table. Sets the
506 * tick which records when the entry was last moved about.
508 static void push(struct mq_policy
*mq
, struct entry
*e
)
514 alloc_cblock(mq
, e
->cblock
);
515 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
516 queue_level(e
), &e
->list
);
518 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
522 * Removes an entry from pre_cache or cache. Removes from the hash table.
523 * Frees off the cache block if necc.
525 static void del(struct mq_policy
*mq
, struct entry
*e
)
527 queue_remove(&e
->list
);
530 free_cblock(mq
, e
->cblock
);
534 * Like del, except it removes the first entry in the queue (ie. the least
537 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
540 struct list_head
*h
= queue_pop(q
);
545 e
= container_of(h
, struct entry
, list
);
548 free_cblock(mq
, e
->cblock
);
554 * Has this entry already been updated?
556 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
558 return mq
->tick
== e
->tick
;
562 * The promotion threshold is adjusted every generation. As are the counts
565 * At the moment the threshold is taken by averaging the hit counts of some
566 * of the entries in the cache (the first 20 entries across all levels in
567 * ascending order, giving preference to the clean entries at each level).
569 * We can be much cleverer than this though. For example, each promotion
570 * could bump up the threshold helping to prevent churn. Much more to do
574 #define MAX_TO_AVERAGE 20
576 static void check_generation(struct mq_policy
*mq
)
578 unsigned total
= 0, nr
= 0, count
= 0, level
;
579 struct list_head
*head
;
582 if ((mq
->hit_count
>= mq
->generation_period
) &&
583 (mq
->nr_cblocks_allocated
== from_cblock(mq
->cache_size
))) {
588 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
589 head
= mq
->cache_clean
.qs
+ level
;
590 list_for_each_entry(e
, head
, list
) {
592 total
+= e
->hit_count
;
594 if (++count
>= MAX_TO_AVERAGE
)
598 head
= mq
->cache_dirty
.qs
+ level
;
599 list_for_each_entry(e
, head
, list
) {
601 total
+= e
->hit_count
;
603 if (++count
>= MAX_TO_AVERAGE
)
608 mq
->promote_threshold
= nr
? total
/ nr
: 1;
609 if (mq
->promote_threshold
* nr
< total
)
610 mq
->promote_threshold
++;
615 * Whenever we use an entry we bump up it's hit counter, and push it to the
616 * back to it's current level.
618 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
620 if (updated_this_tick(mq
, e
))
625 check_generation(mq
);
627 /* generation adjustment, to stop the counts increasing forever. */
629 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
630 e
->generation
= mq
->generation
;
637 * Demote the least recently used entry from the cache to the pre_cache.
638 * Returns the new cache entry to use, and the old origin block it was
641 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
642 * straight back into the cache if it's subsequently hit. There are
643 * various options here, and more experimentation would be good:
645 * - just forget about the demoted entry completely (ie. don't insert it
647 * - divide the hit count rather that setting to some hard coded value.
648 * - set the hit count to a hard coded value other than 1, eg, is it better
649 * if it goes in at level 2?
651 static int demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
, dm_cblock_t
*cblock
)
653 struct entry
*demoted
= pop(mq
, &mq
->cache_clean
);
657 * We could get a block from mq->cache_dirty, but that
658 * would add extra latency to the triggering bio as it
659 * waits for the writeback. Better to not promote this
660 * time and hope there's a clean block next time this block
665 *cblock
= demoted
->cblock
;
666 *oblock
= demoted
->oblock
;
667 demoted
->in_cache
= false;
668 demoted
->dirty
= false;
669 demoted
->hit_count
= 1;
676 * We modify the basic promotion_threshold depending on the specific io.
678 * If the origin block has been discarded then there's no cost to copy it
681 * We bias towards reads, since they can be demoted at no cost if they
682 * haven't been dirtied.
684 #define DISCARDED_PROMOTE_THRESHOLD 1
685 #define READ_PROMOTE_THRESHOLD 4
686 #define WRITE_PROMOTE_THRESHOLD 8
688 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
689 bool discarded_oblock
, int data_dir
)
691 if (discarded_oblock
&& any_free_cblocks(mq
) && data_dir
== WRITE
)
693 * We don't need to do any copying at all, so give this a
694 * very low threshold. In practice this only triggers
695 * during initial population after a format.
697 return DISCARDED_PROMOTE_THRESHOLD
;
699 return data_dir
== READ
?
700 (mq
->promote_threshold
+ READ_PROMOTE_THRESHOLD
) :
701 (mq
->promote_threshold
+ WRITE_PROMOTE_THRESHOLD
);
704 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
705 bool discarded_oblock
, int data_dir
)
707 return e
->hit_count
>=
708 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
711 static int cache_entry_found(struct mq_policy
*mq
,
713 struct policy_result
*result
)
715 requeue_and_update_tick(mq
, e
);
718 result
->op
= POLICY_HIT
;
719 result
->cblock
= e
->cblock
;
726 * Moves an entry from the pre_cache to the cache. The main work is
727 * finding which cache block to use.
729 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
730 struct policy_result
*result
)
735 if (find_free_cblock(mq
, &cblock
) == -ENOSPC
) {
736 result
->op
= POLICY_REPLACE
;
737 r
= demote_cblock(mq
, &result
->old_oblock
, &cblock
);
739 result
->op
= POLICY_MISS
;
743 result
->op
= POLICY_NEW
;
745 result
->cblock
= e
->cblock
= cblock
;
755 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
756 bool can_migrate
, bool discarded_oblock
,
757 int data_dir
, struct policy_result
*result
)
760 bool updated
= updated_this_tick(mq
, e
);
762 requeue_and_update_tick(mq
, e
);
764 if ((!discarded_oblock
&& updated
) ||
765 !should_promote(mq
, e
, discarded_oblock
, data_dir
))
766 result
->op
= POLICY_MISS
;
767 else if (!can_migrate
)
770 r
= pre_cache_to_cache(mq
, e
, result
);
775 static void insert_in_pre_cache(struct mq_policy
*mq
,
778 struct entry
*e
= alloc_entry(mq
);
782 * There's no spare entry structure, so we grab the least
783 * used one from the pre_cache.
785 e
= pop(mq
, &mq
->pre_cache
);
788 DMWARN("couldn't pop from pre cache");
796 e
->generation
= mq
->generation
;
800 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
801 struct policy_result
*result
)
806 if (find_free_cblock(mq
, &cblock
) == -ENOSPC
) {
807 result
->op
= POLICY_MISS
;
808 insert_in_pre_cache(mq
, oblock
);
814 result
->op
= POLICY_MISS
;
823 e
->generation
= mq
->generation
;
826 result
->op
= POLICY_NEW
;
827 result
->cblock
= e
->cblock
;
830 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
831 bool can_migrate
, bool discarded_oblock
,
832 int data_dir
, struct policy_result
*result
)
834 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) == 1) {
836 insert_in_cache(mq
, oblock
, result
);
840 insert_in_pre_cache(mq
, oblock
);
841 result
->op
= POLICY_MISS
;
848 * Looks the oblock up in the hash table, then decides whether to put in
849 * pre_cache, or cache etc.
851 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
852 bool can_migrate
, bool discarded_oblock
,
853 int data_dir
, struct policy_result
*result
)
856 struct entry
*e
= hash_lookup(mq
, oblock
);
858 if (e
&& e
->in_cache
)
859 r
= cache_entry_found(mq
, e
, result
);
860 else if (iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
861 result
->op
= POLICY_MISS
;
863 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
866 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
869 if (r
== -EWOULDBLOCK
)
870 result
->op
= POLICY_MISS
;
875 /*----------------------------------------------------------------*/
878 * Public interface, via the policy struct. See dm-cache-policy.h for a
879 * description of these.
882 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
884 return container_of(p
, struct mq_policy
, policy
);
887 static void mq_destroy(struct dm_cache_policy
*p
)
889 struct mq_policy
*mq
= to_mq_policy(p
);
891 free_bitset(mq
->allocation_bitset
);
897 static void copy_tick(struct mq_policy
*mq
)
901 spin_lock_irqsave(&mq
->tick_lock
, flags
);
902 mq
->tick
= mq
->tick_protected
;
903 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
906 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
907 bool can_block
, bool can_migrate
, bool discarded_oblock
,
908 struct bio
*bio
, struct policy_result
*result
)
911 struct mq_policy
*mq
= to_mq_policy(p
);
913 result
->op
= POLICY_MISS
;
916 mutex_lock(&mq
->lock
);
917 else if (!mutex_trylock(&mq
->lock
))
922 iot_examine_bio(&mq
->tracker
, bio
);
923 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
924 bio_data_dir(bio
), result
);
926 mutex_unlock(&mq
->lock
);
931 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
934 struct mq_policy
*mq
= to_mq_policy(p
);
937 if (!mutex_trylock(&mq
->lock
))
940 e
= hash_lookup(mq
, oblock
);
941 if (e
&& e
->in_cache
) {
947 mutex_unlock(&mq
->lock
);
953 * FIXME: __mq_set_clear_dirty can block due to mutex.
954 * Ideally a policy should not block in functions called
955 * from the map() function. Explore using RCU.
957 static void __mq_set_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
, bool set
)
959 struct mq_policy
*mq
= to_mq_policy(p
);
962 mutex_lock(&mq
->lock
);
963 e
= hash_lookup(mq
, oblock
);
965 DMWARN("__mq_set_clear_dirty called for a block that isn't in the cache");
967 BUG_ON(!e
->in_cache
);
973 mutex_unlock(&mq
->lock
);
976 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
978 __mq_set_clear_dirty(p
, oblock
, true);
981 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
983 __mq_set_clear_dirty(p
, oblock
, false);
986 static int mq_load_mapping(struct dm_cache_policy
*p
,
987 dm_oblock_t oblock
, dm_cblock_t cblock
,
988 uint32_t hint
, bool hint_valid
)
990 struct mq_policy
*mq
= to_mq_policy(p
);
1000 e
->dirty
= false; /* this gets corrected in a minute */
1001 e
->hit_count
= hint_valid
? hint
: 1;
1002 e
->generation
= mq
->generation
;
1008 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
1011 struct mq_policy
*mq
= to_mq_policy(p
);
1016 mutex_lock(&mq
->lock
);
1018 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
1019 list_for_each_entry(e
, &mq
->cache_clean
.qs
[level
], list
) {
1020 r
= fn(context
, e
->cblock
, e
->oblock
, e
->hit_count
);
1025 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
1026 list_for_each_entry(e
, &mq
->cache_dirty
.qs
[level
], list
) {
1027 r
= fn(context
, e
->cblock
, e
->oblock
, e
->hit_count
);
1033 mutex_unlock(&mq
->lock
);
1038 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1040 struct mq_policy
*mq
= to_mq_policy(p
);
1043 mutex_lock(&mq
->lock
);
1045 e
= hash_lookup(mq
, oblock
);
1047 BUG_ON(!e
|| !e
->in_cache
);
1050 e
->in_cache
= false;
1054 mutex_unlock(&mq
->lock
);
1057 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1058 dm_cblock_t
*cblock
)
1060 struct entry
*e
= pop(mq
, &mq
->cache_dirty
);
1065 *oblock
= e
->oblock
;
1066 *cblock
= e
->cblock
;
1073 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1074 dm_cblock_t
*cblock
)
1077 struct mq_policy
*mq
= to_mq_policy(p
);
1079 mutex_lock(&mq
->lock
);
1080 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1081 mutex_unlock(&mq
->lock
);
1086 static void force_mapping(struct mq_policy
*mq
,
1087 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1089 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1091 BUG_ON(!e
|| !e
->in_cache
);
1094 e
->oblock
= new_oblock
;
1099 static void mq_force_mapping(struct dm_cache_policy
*p
,
1100 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1102 struct mq_policy
*mq
= to_mq_policy(p
);
1104 mutex_lock(&mq
->lock
);
1105 force_mapping(mq
, current_oblock
, new_oblock
);
1106 mutex_unlock(&mq
->lock
);
1109 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1112 struct mq_policy
*mq
= to_mq_policy(p
);
1114 mutex_lock(&mq
->lock
);
1115 r
= to_cblock(mq
->nr_cblocks_allocated
);
1116 mutex_unlock(&mq
->lock
);
1121 static void mq_tick(struct dm_cache_policy
*p
)
1123 struct mq_policy
*mq
= to_mq_policy(p
);
1124 unsigned long flags
;
1126 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1127 mq
->tick_protected
++;
1128 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1131 static int mq_set_config_value(struct dm_cache_policy
*p
,
1132 const char *key
, const char *value
)
1134 struct mq_policy
*mq
= to_mq_policy(p
);
1135 enum io_pattern pattern
;
1138 if (!strcasecmp(key
, "random_threshold"))
1139 pattern
= PATTERN_RANDOM
;
1140 else if (!strcasecmp(key
, "sequential_threshold"))
1141 pattern
= PATTERN_SEQUENTIAL
;
1145 if (kstrtoul(value
, 10, &tmp
))
1148 mq
->tracker
.thresholds
[pattern
] = tmp
;
1153 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1156 struct mq_policy
*mq
= to_mq_policy(p
);
1158 DMEMIT("4 random_threshold %u sequential_threshold %u",
1159 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1160 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
]);
1165 /* Init the policy plugin interface function pointers. */
1166 static void init_policy_functions(struct mq_policy
*mq
)
1168 mq
->policy
.destroy
= mq_destroy
;
1169 mq
->policy
.map
= mq_map
;
1170 mq
->policy
.lookup
= mq_lookup
;
1171 mq
->policy
.set_dirty
= mq_set_dirty
;
1172 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1173 mq
->policy
.load_mapping
= mq_load_mapping
;
1174 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1175 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1176 mq
->policy
.writeback_work
= mq_writeback_work
;
1177 mq
->policy
.force_mapping
= mq_force_mapping
;
1178 mq
->policy
.residency
= mq_residency
;
1179 mq
->policy
.tick
= mq_tick
;
1180 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1181 mq
->policy
.set_config_value
= mq_set_config_value
;
1184 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1185 sector_t origin_size
,
1186 sector_t cache_block_size
)
1189 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1194 init_policy_functions(mq
);
1195 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1197 mq
->cache_size
= cache_size
;
1198 mq
->tick_protected
= 0;
1202 mq
->promote_threshold
= 0;
1203 mutex_init(&mq
->lock
);
1204 spin_lock_init(&mq
->tick_lock
);
1205 mq
->find_free_nr_words
= dm_div_up(from_cblock(mq
->cache_size
), BITS_PER_LONG
);
1206 mq
->find_free_last_word
= 0;
1208 queue_init(&mq
->pre_cache
);
1209 queue_init(&mq
->cache_clean
);
1210 queue_init(&mq
->cache_dirty
);
1212 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1214 mq
->nr_entries
= 2 * from_cblock(cache_size
);
1215 r
= alloc_entries(mq
, mq
->nr_entries
);
1217 goto bad_cache_alloc
;
1219 mq
->nr_entries_allocated
= 0;
1220 mq
->nr_cblocks_allocated
= 0;
1222 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1223 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1224 mq
->table
= kzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
, GFP_KERNEL
);
1226 goto bad_alloc_table
;
1228 mq
->allocation_bitset
= alloc_bitset(from_cblock(cache_size
));
1229 if (!mq
->allocation_bitset
)
1230 goto bad_alloc_bitset
;
1244 /*----------------------------------------------------------------*/
1246 static struct dm_cache_policy_type mq_policy_type
= {
1248 .version
= {1, 0, 0},
1250 .owner
= THIS_MODULE
,
1254 static struct dm_cache_policy_type default_policy_type
= {
1256 .version
= {1, 0, 0},
1258 .owner
= THIS_MODULE
,
1262 static int __init
mq_init(void)
1266 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1267 sizeof(struct entry
),
1268 __alignof__(struct entry
),
1270 if (!mq_entry_cache
)
1273 r
= dm_cache_policy_register(&mq_policy_type
);
1275 DMERR("register failed %d", r
);
1276 goto bad_register_mq
;
1279 r
= dm_cache_policy_register(&default_policy_type
);
1281 DMINFO("version %u.%u.%u loaded",
1282 mq_policy_type
.version
[0],
1283 mq_policy_type
.version
[1],
1284 mq_policy_type
.version
[2]);
1288 DMERR("register failed (as default) %d", r
);
1290 dm_cache_policy_unregister(&mq_policy_type
);
1292 kmem_cache_destroy(mq_entry_cache
);
1297 static void __exit
mq_exit(void)
1299 dm_cache_policy_unregister(&mq_policy_type
);
1300 dm_cache_policy_unregister(&default_policy_type
);
1302 kmem_cache_destroy(mq_entry_cache
);
1305 module_init(mq_init
);
1306 module_exit(mq_exit
);
1308 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1309 MODULE_LICENSE("GPL");
1310 MODULE_DESCRIPTION("mq cache policy");
1312 MODULE_ALIAS("dm-cache-default");