Merge remote-tracking branch 'asoc/fix/dapm' into asoc-linus
[deliverable/linux.git] / drivers / md / dm-cache-policy-mq.c
CommitLineData
f2836352
JT
1/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-cache-policy.h"
8#include "dm.h"
9
10#include <linux/hash.h>
e65ff870 11#include <linux/jiffies.h>
f2836352
JT
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16
17#define DM_MSG_PREFIX "cache-policy-mq"
f2836352
JT
18
19static struct kmem_cache *mq_entry_cache;
20
21/*----------------------------------------------------------------*/
22
23static unsigned next_power(unsigned n, unsigned min)
24{
25 return roundup_pow_of_two(max(n, min));
26}
27
28/*----------------------------------------------------------------*/
29
f2836352
JT
30/*
31 * Large, sequential ios are probably better left on the origin device since
32 * spindles tend to have good bandwidth.
33 *
34 * The io_tracker tries to spot when the io is in one of these sequential
35 * modes.
36 *
37 * Two thresholds to switch between random and sequential io mode are defaulting
38 * as follows and can be adjusted via the constructor and message interfaces.
39 */
40#define RANDOM_THRESHOLD_DEFAULT 4
41#define SEQUENTIAL_THRESHOLD_DEFAULT 512
42
43enum io_pattern {
44 PATTERN_SEQUENTIAL,
45 PATTERN_RANDOM
46};
47
48struct io_tracker {
49 enum io_pattern pattern;
50
51 unsigned nr_seq_samples;
52 unsigned nr_rand_samples;
53 unsigned thresholds[2];
54
55 dm_oblock_t last_end_oblock;
56};
57
58static void iot_init(struct io_tracker *t,
59 int sequential_threshold, int random_threshold)
60{
61 t->pattern = PATTERN_RANDOM;
62 t->nr_seq_samples = 0;
63 t->nr_rand_samples = 0;
64 t->last_end_oblock = 0;
65 t->thresholds[PATTERN_RANDOM] = random_threshold;
66 t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
67}
68
69static enum io_pattern iot_pattern(struct io_tracker *t)
70{
71 return t->pattern;
72}
73
74static void iot_update_stats(struct io_tracker *t, struct bio *bio)
75{
4f024f37 76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
f2836352
JT
77 t->nr_seq_samples++;
78 else {
79 /*
80 * Just one non-sequential IO is enough to reset the
81 * counters.
82 */
83 if (t->nr_seq_samples) {
84 t->nr_seq_samples = 0;
85 t->nr_rand_samples = 0;
86 }
87
88 t->nr_rand_samples++;
89 }
90
4f024f37 91 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
f2836352
JT
92}
93
94static void iot_check_for_pattern_switch(struct io_tracker *t)
95{
96 switch (t->pattern) {
97 case PATTERN_SEQUENTIAL:
98 if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
99 t->pattern = PATTERN_RANDOM;
100 t->nr_seq_samples = t->nr_rand_samples = 0;
101 }
102 break;
103
104 case PATTERN_RANDOM:
105 if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
106 t->pattern = PATTERN_SEQUENTIAL;
107 t->nr_seq_samples = t->nr_rand_samples = 0;
108 }
109 break;
110 }
111}
112
113static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
114{
115 iot_update_stats(t, bio);
116 iot_check_for_pattern_switch(t);
117}
118
119/*----------------------------------------------------------------*/
120
121
122/*
123 * This queue is divided up into different levels. Allowing us to push
124 * entries to the back of any of the levels. Think of it as a partially
125 * sorted queue.
126 */
127#define NR_QUEUE_LEVELS 16u
3e45c91e 128#define NR_SENTINELS NR_QUEUE_LEVELS * 3
f2836352 129
e65ff870
JT
130#define WRITEBACK_PERIOD HZ
131
f2836352 132struct queue {
75da39bf 133 unsigned nr_elts;
e65ff870
JT
134 bool current_writeback_sentinels;
135 unsigned long next_writeback;
f2836352 136 struct list_head qs[NR_QUEUE_LEVELS];
3e45c91e 137 struct list_head sentinels[NR_SENTINELS];
f2836352
JT
138};
139
140static void queue_init(struct queue *q)
141{
142 unsigned i;
143
75da39bf 144 q->nr_elts = 0;
e65ff870
JT
145 q->current_writeback_sentinels = false;
146 q->next_writeback = 0;
3e45c91e 147 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
f2836352 148 INIT_LIST_HEAD(q->qs + i);
3e45c91e 149 INIT_LIST_HEAD(q->sentinels + i);
e65ff870
JT
150 INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
151 INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
3e45c91e 152 }
f2836352
JT
153}
154
e65ff870
JT
155static unsigned queue_size(struct queue *q)
156{
157 return q->nr_elts;
158}
159
c86c3070
JT
160static bool queue_empty(struct queue *q)
161{
75da39bf 162 return q->nr_elts == 0;
c86c3070
JT
163}
164
f2836352
JT
165/*
166 * Insert an entry to the back of the given level.
167 */
168static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
169{
75da39bf 170 q->nr_elts++;
f2836352
JT
171 list_add_tail(elt, q->qs + level);
172}
173
75da39bf 174static void queue_remove(struct queue *q, struct list_head *elt)
f2836352 175{
75da39bf 176 q->nr_elts--;
f2836352
JT
177 list_del(elt);
178}
179
3e45c91e
JT
180static bool is_sentinel(struct queue *q, struct list_head *h)
181{
182 return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
183}
184
f2836352
JT
185/*
186 * Gives us the oldest entry of the lowest popoulated level. If the first
187 * level is emptied then we shift down one level.
188 */
b155aa0e 189static struct list_head *queue_peek(struct queue *q)
f2836352
JT
190{
191 unsigned level;
3e45c91e 192 struct list_head *h;
f2836352
JT
193
194 for (level = 0; level < NR_QUEUE_LEVELS; level++)
3e45c91e
JT
195 list_for_each(h, q->qs + level)
196 if (!is_sentinel(q, h))
197 return h;
f2836352 198
b155aa0e
JT
199 return NULL;
200}
f2836352 201
b155aa0e
JT
202static struct list_head *queue_pop(struct queue *q)
203{
204 struct list_head *r = queue_peek(q);
f2836352 205
b155aa0e 206 if (r) {
75da39bf 207 q->nr_elts--;
b155aa0e 208 list_del(r);
b155aa0e
JT
209 }
210
211 return r;
f2836352
JT
212}
213
e65ff870
JT
214/*
215 * Pops an entry from a level that is not past a sentinel.
216 */
217static struct list_head *queue_pop_old(struct queue *q)
218{
219 unsigned level;
220 struct list_head *h;
221
222 for (level = 0; level < NR_QUEUE_LEVELS; level++)
223 list_for_each(h, q->qs + level) {
224 if (is_sentinel(q, h))
225 break;
226
227 q->nr_elts--;
228 list_del(h);
229 return h;
230 }
231
232 return NULL;
233}
234
f2836352
JT
235static struct list_head *list_pop(struct list_head *lh)
236{
237 struct list_head *r = lh->next;
238
239 BUG_ON(!r);
240 list_del_init(r);
241
242 return r;
243}
244
e65ff870
JT
245static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
246{
247 if (q->current_writeback_sentinels)
248 return q->sentinels + NR_QUEUE_LEVELS + level;
249 else
250 return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
251}
252
253static void queue_update_writeback_sentinels(struct queue *q)
254{
255 unsigned i;
256 struct list_head *h;
257
258 if (time_after(jiffies, q->next_writeback)) {
259 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
260 h = writeback_sentinel(q, i);
261 list_del(h);
262 list_add_tail(h, q->qs + i);
263 }
264
265 q->next_writeback = jiffies + WRITEBACK_PERIOD;
266 q->current_writeback_sentinels = !q->current_writeback_sentinels;
267 }
268}
269
3e45c91e
JT
270/*
271 * Sometimes we want to iterate through entries that have been pushed since
272 * a certain event. We use sentinel entries on the queues to delimit these
273 * 'tick' events.
274 */
275static void queue_tick(struct queue *q)
276{
277 unsigned i;
278
279 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
280 list_del(q->sentinels + i);
281 list_add_tail(q->sentinels + i, q->qs + i);
282 }
283}
284
285typedef void (*iter_fn)(struct list_head *, void *);
286static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
287{
288 unsigned i;
289 struct list_head *h;
290
291 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
292 list_for_each_prev(h, q->qs + i) {
293 if (is_sentinel(q, h))
294 break;
295
296 fn(h, context);
297 }
298 }
299}
300
f2836352
JT
301/*----------------------------------------------------------------*/
302
303/*
304 * Describes a cache entry. Used in both the cache and the pre_cache.
305 */
306struct entry {
307 struct hlist_node hlist;
308 struct list_head list;
309 dm_oblock_t oblock;
f2836352
JT
310
311 /*
312 * FIXME: pack these better
313 */
01911c19 314 bool dirty:1;
f2836352 315 unsigned hit_count;
f2836352
JT
316};
317
633618e3
JT
318/*
319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
321 *
322 * Free entries are linked together into a list.
323 */
324struct entry_pool {
325 struct entry *entries, *entries_end;
326 struct list_head free;
327 unsigned nr_allocated;
328};
329
330static int epool_init(struct entry_pool *ep, unsigned nr_entries)
331{
332 unsigned i;
333
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
335 if (!ep->entries)
336 return -ENOMEM;
337
338 ep->entries_end = ep->entries + nr_entries;
339
340 INIT_LIST_HEAD(&ep->free);
341 for (i = 0; i < nr_entries; i++)
342 list_add(&ep->entries[i].list, &ep->free);
343
344 ep->nr_allocated = 0;
345
346 return 0;
347}
348
349static void epool_exit(struct entry_pool *ep)
350{
351 vfree(ep->entries);
352}
353
354static struct entry *alloc_entry(struct entry_pool *ep)
355{
356 struct entry *e;
357
358 if (list_empty(&ep->free))
359 return NULL;
360
361 e = list_entry(list_pop(&ep->free), struct entry, list);
362 INIT_LIST_HEAD(&e->list);
363 INIT_HLIST_NODE(&e->hlist);
364 ep->nr_allocated++;
365
366 return e;
367}
368
369/*
370 * This assumes the cblock hasn't already been allocated.
371 */
372static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
373{
374 struct entry *e = ep->entries + from_cblock(cblock);
633618e3 375
b8158051 376 list_del_init(&e->list);
633618e3
JT
377 INIT_HLIST_NODE(&e->hlist);
378 ep->nr_allocated++;
379
380 return e;
381}
382
383static void free_entry(struct entry_pool *ep, struct entry *e)
384{
385 BUG_ON(!ep->nr_allocated);
386 ep->nr_allocated--;
387 INIT_HLIST_NODE(&e->hlist);
388 list_add(&e->list, &ep->free);
389}
390
532906aa
JT
391/*
392 * Returns NULL if the entry is free.
393 */
394static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
395{
396 struct entry *e = ep->entries + from_cblock(cblock);
7b6b2bc9 397 return !hlist_unhashed(&e->hlist) ? e : NULL;
532906aa
JT
398}
399
633618e3
JT
400static bool epool_empty(struct entry_pool *ep)
401{
402 return list_empty(&ep->free);
403}
404
405static bool in_pool(struct entry_pool *ep, struct entry *e)
406{
407 return e >= ep->entries && e < ep->entries_end;
408}
409
410static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
411{
412 return to_cblock(e - ep->entries);
413}
414
415/*----------------------------------------------------------------*/
416
f2836352
JT
417struct mq_policy {
418 struct dm_cache_policy policy;
419
420 /* protects everything */
421 struct mutex lock;
422 dm_cblock_t cache_size;
423 struct io_tracker tracker;
424
633618e3
JT
425 /*
426 * Entries come from two pools, one of pre-cache entries, and one
427 * for the cache proper.
428 */
429 struct entry_pool pre_cache_pool;
430 struct entry_pool cache_pool;
431
f2836352 432 /*
01911c19
JT
433 * We maintain three queues of entries. The cache proper,
434 * consisting of a clean and dirty queue, contains the currently
435 * active mappings. Whereas the pre_cache tracks blocks that
436 * are being hit frequently and potential candidates for promotion
437 * to the cache.
f2836352
JT
438 */
439 struct queue pre_cache;
01911c19
JT
440 struct queue cache_clean;
441 struct queue cache_dirty;
f2836352
JT
442
443 /*
444 * Keeps track of time, incremented by the core. We use this to
445 * avoid attributing multiple hits within the same tick.
446 *
447 * Access to tick_protected should be done with the spin lock held.
448 * It's copied to tick at the start of the map function (within the
449 * mutex).
450 */
451 spinlock_t tick_lock;
452 unsigned tick_protected;
453 unsigned tick;
454
455 /*
456 * A count of the number of times the map function has been called
457 * and found an entry in the pre_cache or cache. Currently used to
458 * calculate the generation.
459 */
460 unsigned hit_count;
461
462 /*
463 * A generation is a longish period that is used to trigger some
464 * book keeping effects. eg, decrementing hit counts on entries.
465 * This is needed to allow the cache to evolve as io patterns
466 * change.
467 */
468 unsigned generation;
469 unsigned generation_period; /* in lookups (will probably change) */
470
78e03d69
JT
471 unsigned discard_promote_adjustment;
472 unsigned read_promote_adjustment;
473 unsigned write_promote_adjustment;
474
f2836352
JT
475 /*
476 * The hash table allows us to quickly find an entry by origin
477 * block. Both pre_cache and cache entries are in here.
478 */
479 unsigned nr_buckets;
480 dm_block_t hash_bits;
481 struct hlist_head *table;
482};
483
78e03d69
JT
484#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
485#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
486#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
b155aa0e 487#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
78e03d69 488
f2836352
JT
489/*----------------------------------------------------------------*/
490
491/*
492 * Simple hash table implementation. Should replace with the standard hash
493 * table that's making its way upstream.
494 */
495static void hash_insert(struct mq_policy *mq, struct entry *e)
496{
497 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
498
499 hlist_add_head(&e->hlist, mq->table + h);
500}
501
502static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
503{
504 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
505 struct hlist_head *bucket = mq->table + h;
506 struct entry *e;
507
508 hlist_for_each_entry(e, bucket, hlist)
509 if (e->oblock == oblock) {
510 hlist_del(&e->hlist);
511 hlist_add_head(&e->hlist, bucket);
512 return e;
513 }
514
515 return NULL;
516}
517
518static void hash_remove(struct entry *e)
519{
520 hlist_del(&e->hlist);
521}
522
523/*----------------------------------------------------------------*/
524
f2836352
JT
525static bool any_free_cblocks(struct mq_policy *mq)
526{
633618e3 527 return !epool_empty(&mq->cache_pool);
f2836352
JT
528}
529
c86c3070
JT
530static bool any_clean_cblocks(struct mq_policy *mq)
531{
532 return !queue_empty(&mq->cache_clean);
533}
534
f2836352
JT
535/*----------------------------------------------------------------*/
536
537/*
538 * Now we get to the meat of the policy. This section deals with deciding
539 * when to to add entries to the pre_cache and cache, and move between
540 * them.
541 */
542
543/*
544 * The queue level is based on the log2 of the hit count.
545 */
546static unsigned queue_level(struct entry *e)
547{
548 return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
549}
550
633618e3
JT
551static bool in_cache(struct mq_policy *mq, struct entry *e)
552{
553 return in_pool(&mq->cache_pool, e);
554}
555
f2836352
JT
556/*
557 * Inserts the entry into the pre_cache or the cache. Ensures the cache
633618e3
JT
558 * block is marked as allocated if necc. Inserts into the hash table.
559 * Sets the tick which records when the entry was last moved about.
f2836352
JT
560 */
561static void push(struct mq_policy *mq, struct entry *e)
562{
f2836352
JT
563 hash_insert(mq, e);
564
633618e3 565 if (in_cache(mq, e))
01911c19
JT
566 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
567 queue_level(e), &e->list);
633618e3 568 else
f2836352
JT
569 queue_push(&mq->pre_cache, queue_level(e), &e->list);
570}
571
572/*
573 * Removes an entry from pre_cache or cache. Removes from the hash table.
f2836352
JT
574 */
575static void del(struct mq_policy *mq, struct entry *e)
576{
75da39bf
JT
577 if (in_cache(mq, e))
578 queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
579 else
580 queue_remove(&mq->pre_cache, &e->list);
581
f2836352 582 hash_remove(e);
f2836352
JT
583}
584
585/*
586 * Like del, except it removes the first entry in the queue (ie. the least
587 * recently used).
588 */
589static struct entry *pop(struct mq_policy *mq, struct queue *q)
590{
0184b44e
JT
591 struct entry *e;
592 struct list_head *h = queue_pop(q);
f2836352 593
0184b44e
JT
594 if (!h)
595 return NULL;
f2836352 596
0184b44e
JT
597 e = container_of(h, struct entry, list);
598 hash_remove(e);
f2836352
JT
599
600 return e;
601}
602
e65ff870
JT
603static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
604{
605 struct entry *e;
606 struct list_head *h = queue_pop_old(q);
607
608 if (!h)
609 return NULL;
610
611 e = container_of(h, struct entry, list);
612 hash_remove(e);
613
614 return e;
615}
616
b155aa0e
JT
617static struct entry *peek(struct queue *q)
618{
619 struct list_head *h = queue_peek(q);
620 return h ? container_of(h, struct entry, list) : NULL;
621}
622
f2836352
JT
623/*
624 * The promotion threshold is adjusted every generation. As are the counts
625 * of the entries.
626 *
627 * At the moment the threshold is taken by averaging the hit counts of some
01911c19
JT
628 * of the entries in the cache (the first 20 entries across all levels in
629 * ascending order, giving preference to the clean entries at each level).
f2836352
JT
630 *
631 * We can be much cleverer than this though. For example, each promotion
632 * could bump up the threshold helping to prevent churn. Much more to do
633 * here.
634 */
635
636#define MAX_TO_AVERAGE 20
637
638static void check_generation(struct mq_policy *mq)
639{
640 unsigned total = 0, nr = 0, count = 0, level;
641 struct list_head *head;
642 struct entry *e;
643
633618e3 644 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
f2836352
JT
645 mq->hit_count = 0;
646 mq->generation++;
647
648 for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
01911c19
JT
649 head = mq->cache_clean.qs + level;
650 list_for_each_entry(e, head, list) {
651 nr++;
652 total += e->hit_count;
653
654 if (++count >= MAX_TO_AVERAGE)
655 break;
656 }
657
658 head = mq->cache_dirty.qs + level;
f2836352
JT
659 list_for_each_entry(e, head, list) {
660 nr++;
661 total += e->hit_count;
662
663 if (++count >= MAX_TO_AVERAGE)
664 break;
665 }
666 }
f2836352
JT
667 }
668}
669
670/*
671 * Whenever we use an entry we bump up it's hit counter, and push it to the
672 * back to it's current level.
673 */
3e45c91e 674static void requeue(struct mq_policy *mq, struct entry *e)
f2836352 675{
f2836352 676 check_generation(mq);
f2836352
JT
677 del(mq, e);
678 push(mq, e);
679}
680
681/*
682 * Demote the least recently used entry from the cache to the pre_cache.
683 * Returns the new cache entry to use, and the old origin block it was
684 * mapped to.
685 *
686 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
687 * straight back into the cache if it's subsequently hit. There are
688 * various options here, and more experimentation would be good:
689 *
690 * - just forget about the demoted entry completely (ie. don't insert it
691 into the pre_cache).
692 * - divide the hit count rather that setting to some hard coded value.
693 * - set the hit count to a hard coded value other than 1, eg, is it better
694 * if it goes in at level 2?
695 */
633618e3 696static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
f2836352 697{
01911c19 698 struct entry *demoted = pop(mq, &mq->cache_clean);
f2836352 699
01911c19
JT
700 if (!demoted)
701 /*
702 * We could get a block from mq->cache_dirty, but that
703 * would add extra latency to the triggering bio as it
704 * waits for the writeback. Better to not promote this
705 * time and hope there's a clean block next time this block
706 * is hit.
707 */
708 return -ENOSPC;
709
f2836352 710 *oblock = demoted->oblock;
633618e3
JT
711 free_entry(&mq->cache_pool, demoted);
712
713 /*
714 * We used to put the demoted block into the pre-cache, but I think
715 * it's simpler to just let it work it's way up from zero again.
716 * Stops blocks flickering in and out of the cache.
717 */
f2836352 718
01911c19 719 return 0;
f2836352
JT
720}
721
b155aa0e
JT
722/*
723 * Entries in the pre_cache whose hit count passes the promotion
724 * threshold move to the cache proper. Working out the correct
725 * value for the promotion_threshold is crucial to this policy.
726 */
727static unsigned promote_threshold(struct mq_policy *mq)
728{
729 struct entry *e;
730
731 if (any_free_cblocks(mq))
732 return 0;
733
734 e = peek(&mq->cache_clean);
735 if (e)
736 return e->hit_count;
737
738 e = peek(&mq->cache_dirty);
739 if (e)
740 return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
741
742 /* This should never happen */
743 return 0;
744}
745
f2836352
JT
746/*
747 * We modify the basic promotion_threshold depending on the specific io.
748 *
749 * If the origin block has been discarded then there's no cost to copy it
750 * to the cache.
751 *
752 * We bias towards reads, since they can be demoted at no cost if they
753 * haven't been dirtied.
754 */
f2836352
JT
755static unsigned adjusted_promote_threshold(struct mq_policy *mq,
756 bool discarded_oblock, int data_dir)
757{
c86c3070 758 if (data_dir == READ)
b155aa0e 759 return promote_threshold(mq) + mq->read_promote_adjustment;
c86c3070
JT
760
761 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
f2836352
JT
762 /*
763 * We don't need to do any copying at all, so give this a
c86c3070 764 * very low threshold.
f2836352 765 */
78e03d69 766 return mq->discard_promote_adjustment;
c86c3070 767 }
f2836352 768
b155aa0e 769 return promote_threshold(mq) + mq->write_promote_adjustment;
f2836352
JT
770}
771
772static bool should_promote(struct mq_policy *mq, struct entry *e,
773 bool discarded_oblock, int data_dir)
774{
775 return e->hit_count >=
776 adjusted_promote_threshold(mq, discarded_oblock, data_dir);
777}
778
779static int cache_entry_found(struct mq_policy *mq,
780 struct entry *e,
781 struct policy_result *result)
782{
3e45c91e 783 requeue(mq, e);
f2836352 784
633618e3 785 if (in_cache(mq, e)) {
f2836352 786 result->op = POLICY_HIT;
633618e3 787 result->cblock = infer_cblock(&mq->cache_pool, e);
f2836352
JT
788 }
789
790 return 0;
791}
792
793/*
0184b44e 794 * Moves an entry from the pre_cache to the cache. The main work is
f2836352
JT
795 * finding which cache block to use.
796 */
797static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
798 struct policy_result *result)
799{
01911c19 800 int r;
633618e3 801 struct entry *new_e;
f2836352 802
633618e3
JT
803 /* Ensure there's a free cblock in the cache */
804 if (epool_empty(&mq->cache_pool)) {
f2836352 805 result->op = POLICY_REPLACE;
633618e3 806 r = demote_cblock(mq, &result->old_oblock);
01911c19
JT
807 if (r) {
808 result->op = POLICY_MISS;
809 return 0;
810 }
f2836352
JT
811 } else
812 result->op = POLICY_NEW;
813
633618e3
JT
814 new_e = alloc_entry(&mq->cache_pool);
815 BUG_ON(!new_e);
816
817 new_e->oblock = e->oblock;
818 new_e->dirty = false;
819 new_e->hit_count = e->hit_count;
f2836352
JT
820
821 del(mq, e);
633618e3
JT
822 free_entry(&mq->pre_cache_pool, e);
823 push(mq, new_e);
824
825 result->cblock = infer_cblock(&mq->cache_pool, new_e);
f2836352
JT
826
827 return 0;
828}
829
830static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
831 bool can_migrate, bool discarded_oblock,
832 int data_dir, struct policy_result *result)
833{
834 int r = 0;
f2836352 835
3e45c91e
JT
836 if (!should_promote(mq, e, discarded_oblock, data_dir)) {
837 requeue(mq, e);
f2836352 838 result->op = POLICY_MISS;
af95e7a6
JT
839
840 } else if (!can_migrate)
f2836352 841 r = -EWOULDBLOCK;
af95e7a6
JT
842
843 else {
3e45c91e 844 requeue(mq, e);
f2836352 845 r = pre_cache_to_cache(mq, e, result);
af95e7a6 846 }
f2836352
JT
847
848 return r;
849}
850
851static void insert_in_pre_cache(struct mq_policy *mq,
852 dm_oblock_t oblock)
853{
633618e3 854 struct entry *e = alloc_entry(&mq->pre_cache_pool);
f2836352
JT
855
856 if (!e)
857 /*
858 * There's no spare entry structure, so we grab the least
859 * used one from the pre_cache.
860 */
861 e = pop(mq, &mq->pre_cache);
862
863 if (unlikely(!e)) {
864 DMWARN("couldn't pop from pre cache");
865 return;
866 }
867
633618e3
JT
868 e->dirty = false;
869 e->oblock = oblock;
870 e->hit_count = 1;
633618e3 871 push(mq, e);
f2836352
JT
872}
873
874static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
875 struct policy_result *result)
876{
c86c3070 877 int r;
f2836352 878 struct entry *e;
f2836352 879
633618e3
JT
880 if (epool_empty(&mq->cache_pool)) {
881 result->op = POLICY_REPLACE;
882 r = demote_cblock(mq, &result->old_oblock);
c86c3070
JT
883 if (unlikely(r)) {
884 result->op = POLICY_MISS;
885 insert_in_pre_cache(mq, oblock);
886 return;
887 }
f2836352 888
c86c3070
JT
889 /*
890 * This will always succeed, since we've just demoted.
891 */
633618e3
JT
892 e = alloc_entry(&mq->cache_pool);
893 BUG_ON(!e);
c86c3070
JT
894
895 } else {
633618e3 896 e = alloc_entry(&mq->cache_pool);
c86c3070 897 result->op = POLICY_NEW;
f2836352
JT
898 }
899
900 e->oblock = oblock;
01911c19 901 e->dirty = false;
f2836352 902 e->hit_count = 1;
f2836352
JT
903 push(mq, e);
904
633618e3 905 result->cblock = infer_cblock(&mq->cache_pool, e);
f2836352
JT
906}
907
908static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
909 bool can_migrate, bool discarded_oblock,
910 int data_dir, struct policy_result *result)
911{
78e03d69 912 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
f2836352
JT
913 if (can_migrate)
914 insert_in_cache(mq, oblock, result);
915 else
916 return -EWOULDBLOCK;
917 } else {
918 insert_in_pre_cache(mq, oblock);
919 result->op = POLICY_MISS;
920 }
921
922 return 0;
923}
924
925/*
926 * Looks the oblock up in the hash table, then decides whether to put in
927 * pre_cache, or cache etc.
928 */
929static int map(struct mq_policy *mq, dm_oblock_t oblock,
930 bool can_migrate, bool discarded_oblock,
931 int data_dir, struct policy_result *result)
932{
933 int r = 0;
934 struct entry *e = hash_lookup(mq, oblock);
935
633618e3 936 if (e && in_cache(mq, e))
f2836352 937 r = cache_entry_found(mq, e, result);
633618e3 938
f1afb36a
MS
939 else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
940 iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
f2836352 941 result->op = POLICY_MISS;
633618e3 942
f2836352
JT
943 else if (e)
944 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
945 data_dir, result);
633618e3 946
f2836352
JT
947 else
948 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
949 data_dir, result);
950
951 if (r == -EWOULDBLOCK)
952 result->op = POLICY_MISS;
953
954 return r;
955}
956
957/*----------------------------------------------------------------*/
958
959/*
960 * Public interface, via the policy struct. See dm-cache-policy.h for a
961 * description of these.
962 */
963
964static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
965{
966 return container_of(p, struct mq_policy, policy);
967}
968
969static void mq_destroy(struct dm_cache_policy *p)
970{
971 struct mq_policy *mq = to_mq_policy(p);
972
14f398ca 973 vfree(mq->table);
633618e3
JT
974 epool_exit(&mq->cache_pool);
975 epool_exit(&mq->pre_cache_pool);
f2836352
JT
976 kfree(mq);
977}
978
3e45c91e
JT
979static void update_pre_cache_hits(struct list_head *h, void *context)
980{
981 struct entry *e = container_of(h, struct entry, list);
982 e->hit_count++;
983}
984
985static void update_cache_hits(struct list_head *h, void *context)
986{
987 struct mq_policy *mq = context;
988 struct entry *e = container_of(h, struct entry, list);
989 e->hit_count++;
990 mq->hit_count++;
991}
992
f2836352
JT
993static void copy_tick(struct mq_policy *mq)
994{
3e45c91e 995 unsigned long flags, tick;
f2836352
JT
996
997 spin_lock_irqsave(&mq->tick_lock, flags);
3e45c91e
JT
998 tick = mq->tick_protected;
999 if (tick != mq->tick) {
1000 queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
1001 queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
1002 queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
1003 mq->tick = tick;
1004 }
1005
1006 queue_tick(&mq->pre_cache);
1007 queue_tick(&mq->cache_dirty);
1008 queue_tick(&mq->cache_clean);
e65ff870 1009 queue_update_writeback_sentinels(&mq->cache_dirty);
f2836352
JT
1010 spin_unlock_irqrestore(&mq->tick_lock, flags);
1011}
1012
1013static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
1014 bool can_block, bool can_migrate, bool discarded_oblock,
1015 struct bio *bio, struct policy_result *result)
1016{
1017 int r;
1018 struct mq_policy *mq = to_mq_policy(p);
1019
1020 result->op = POLICY_MISS;
1021
1022 if (can_block)
1023 mutex_lock(&mq->lock);
1024 else if (!mutex_trylock(&mq->lock))
1025 return -EWOULDBLOCK;
1026
1027 copy_tick(mq);
1028
1029 iot_examine_bio(&mq->tracker, bio);
1030 r = map(mq, oblock, can_migrate, discarded_oblock,
1031 bio_data_dir(bio), result);
1032
1033 mutex_unlock(&mq->lock);
1034
1035 return r;
1036}
1037
1038static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
1039{
1040 int r;
1041 struct mq_policy *mq = to_mq_policy(p);
1042 struct entry *e;
1043
1044 if (!mutex_trylock(&mq->lock))
1045 return -EWOULDBLOCK;
1046
1047 e = hash_lookup(mq, oblock);
633618e3
JT
1048 if (e && in_cache(mq, e)) {
1049 *cblock = infer_cblock(&mq->cache_pool, e);
f2836352
JT
1050 r = 0;
1051 } else
1052 r = -ENOENT;
1053
1054 mutex_unlock(&mq->lock);
1055
1056 return r;
1057}
1058
633618e3 1059static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
01911c19 1060{
01911c19
JT
1061 struct entry *e;
1062
01911c19 1063 e = hash_lookup(mq, oblock);
633618e3 1064 BUG_ON(!e || !in_cache(mq, e));
01911c19 1065
633618e3
JT
1066 del(mq, e);
1067 e->dirty = set;
1068 push(mq, e);
01911c19
JT
1069}
1070
1071static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1072{
633618e3
JT
1073 struct mq_policy *mq = to_mq_policy(p);
1074
1075 mutex_lock(&mq->lock);
1076 __mq_set_clear_dirty(mq, oblock, true);
1077 mutex_unlock(&mq->lock);
01911c19
JT
1078}
1079
1080static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1081{
633618e3
JT
1082 struct mq_policy *mq = to_mq_policy(p);
1083
1084 mutex_lock(&mq->lock);
1085 __mq_set_clear_dirty(mq, oblock, false);
1086 mutex_unlock(&mq->lock);
01911c19
JT
1087}
1088
f2836352
JT
1089static int mq_load_mapping(struct dm_cache_policy *p,
1090 dm_oblock_t oblock, dm_cblock_t cblock,
1091 uint32_t hint, bool hint_valid)
1092{
1093 struct mq_policy *mq = to_mq_policy(p);
1094 struct entry *e;
1095
633618e3 1096 e = alloc_particular_entry(&mq->cache_pool, cblock);
f2836352 1097 e->oblock = oblock;
01911c19 1098 e->dirty = false; /* this gets corrected in a minute */
f2836352 1099 e->hit_count = hint_valid ? hint : 1;
f2836352
JT
1100 push(mq, e);
1101
1102 return 0;
1103}
1104
633618e3
JT
1105static int mq_save_hints(struct mq_policy *mq, struct queue *q,
1106 policy_walk_fn fn, void *context)
1107{
1108 int r;
1109 unsigned level;
3e45c91e 1110 struct list_head *h;
633618e3
JT
1111 struct entry *e;
1112
1113 for (level = 0; level < NR_QUEUE_LEVELS; level++)
3e45c91e
JT
1114 list_for_each(h, q->qs + level) {
1115 if (is_sentinel(q, h))
1116 continue;
1117
1118 e = container_of(h, struct entry, list);
633618e3
JT
1119 r = fn(context, infer_cblock(&mq->cache_pool, e),
1120 e->oblock, e->hit_count);
1121 if (r)
1122 return r;
1123 }
1124
1125 return 0;
1126}
1127
f2836352
JT
1128static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
1129 void *context)
1130{
1131 struct mq_policy *mq = to_mq_policy(p);
1132 int r = 0;
f2836352
JT
1133
1134 mutex_lock(&mq->lock);
1135
633618e3
JT
1136 r = mq_save_hints(mq, &mq->cache_clean, fn, context);
1137 if (!r)
1138 r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
f2836352 1139
f2836352
JT
1140 mutex_unlock(&mq->lock);
1141
1142 return r;
1143}
1144
633618e3 1145static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
f2836352 1146{
b936bf8b
GU
1147 struct entry *e;
1148
b936bf8b 1149 e = hash_lookup(mq, oblock);
633618e3 1150 BUG_ON(!e || !in_cache(mq, e));
f2836352
JT
1151
1152 del(mq, e);
633618e3
JT
1153 free_entry(&mq->cache_pool, e);
1154}
1155
1156static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1157{
1158 struct mq_policy *mq = to_mq_policy(p);
f2836352 1159
633618e3
JT
1160 mutex_lock(&mq->lock);
1161 __remove_mapping(mq, oblock);
f2836352
JT
1162 mutex_unlock(&mq->lock);
1163}
1164
532906aa
JT
1165static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
1166{
1167 struct entry *e = epool_find(&mq->cache_pool, cblock);
1168
1169 if (!e)
1170 return -ENODATA;
1171
1172 del(mq, e);
1173 free_entry(&mq->cache_pool, e);
1174
1175 return 0;
1176}
1177
1178static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1179{
1180 int r;
1181 struct mq_policy *mq = to_mq_policy(p);
1182
1183 mutex_lock(&mq->lock);
1184 r = __remove_cblock(mq, cblock);
1185 mutex_unlock(&mq->lock);
1186
1187 return r;
1188}
1189
e65ff870
JT
1190#define CLEAN_TARGET_PERCENTAGE 25
1191
1192static bool clean_target_met(struct mq_policy *mq)
1193{
1194 /*
1195 * Cache entries may not be populated. So we're cannot rely on the
1196 * size of the clean queue.
1197 */
1198 unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
1199 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
1200
1201 return nr_clean >= target;
1202}
1203
01911c19
JT
1204static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1205 dm_cblock_t *cblock)
1206{
e65ff870
JT
1207 struct entry *e = pop_old(mq, &mq->cache_dirty);
1208
1209 if (!e && !clean_target_met(mq))
1210 e = pop(mq, &mq->cache_dirty);
01911c19
JT
1211
1212 if (!e)
1213 return -ENODATA;
1214
1215 *oblock = e->oblock;
633618e3 1216 *cblock = infer_cblock(&mq->cache_pool, e);
01911c19
JT
1217 e->dirty = false;
1218 push(mq, e);
1219
1220 return 0;
1221}
1222
1223static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1224 dm_cblock_t *cblock)
1225{
1226 int r;
1227 struct mq_policy *mq = to_mq_policy(p);
1228
1229 mutex_lock(&mq->lock);
1230 r = __mq_writeback_work(mq, oblock, cblock);
1231 mutex_unlock(&mq->lock);
1232
1233 return r;
1234}
1235
633618e3
JT
1236static void __force_mapping(struct mq_policy *mq,
1237 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
f2836352
JT
1238{
1239 struct entry *e = hash_lookup(mq, current_oblock);
1240
633618e3
JT
1241 if (e && in_cache(mq, e)) {
1242 del(mq, e);
1243 e->oblock = new_oblock;
1244 e->dirty = true;
1245 push(mq, e);
1246 }
f2836352
JT
1247}
1248
1249static void mq_force_mapping(struct dm_cache_policy *p,
1250 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1251{
1252 struct mq_policy *mq = to_mq_policy(p);
1253
1254 mutex_lock(&mq->lock);
633618e3 1255 __force_mapping(mq, current_oblock, new_oblock);
f2836352
JT
1256 mutex_unlock(&mq->lock);
1257}
1258
1259static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1260{
99ba2ae4 1261 dm_cblock_t r;
f2836352
JT
1262 struct mq_policy *mq = to_mq_policy(p);
1263
99ba2ae4 1264 mutex_lock(&mq->lock);
633618e3 1265 r = to_cblock(mq->cache_pool.nr_allocated);
99ba2ae4
JT
1266 mutex_unlock(&mq->lock);
1267
1268 return r;
f2836352
JT
1269}
1270
1271static void mq_tick(struct dm_cache_policy *p)
1272{
1273 struct mq_policy *mq = to_mq_policy(p);
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&mq->tick_lock, flags);
1277 mq->tick_protected++;
1278 spin_unlock_irqrestore(&mq->tick_lock, flags);
1279}
1280
1281static int mq_set_config_value(struct dm_cache_policy *p,
1282 const char *key, const char *value)
1283{
1284 struct mq_policy *mq = to_mq_policy(p);
f2836352
JT
1285 unsigned long tmp;
1286
f2836352
JT
1287 if (kstrtoul(value, 10, &tmp))
1288 return -EINVAL;
1289
78e03d69
JT
1290 if (!strcasecmp(key, "random_threshold")) {
1291 mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
1292
1293 } else if (!strcasecmp(key, "sequential_threshold")) {
1294 mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
1295
1296 } else if (!strcasecmp(key, "discard_promote_adjustment"))
1297 mq->discard_promote_adjustment = tmp;
1298
1299 else if (!strcasecmp(key, "read_promote_adjustment"))
1300 mq->read_promote_adjustment = tmp;
1301
1302 else if (!strcasecmp(key, "write_promote_adjustment"))
1303 mq->write_promote_adjustment = tmp;
1304
1305 else
1306 return -EINVAL;
f2836352
JT
1307
1308 return 0;
1309}
1310
1311static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1312{
1313 ssize_t sz = 0;
1314 struct mq_policy *mq = to_mq_policy(p);
1315
78e03d69
JT
1316 DMEMIT("10 random_threshold %u "
1317 "sequential_threshold %u "
1318 "discard_promote_adjustment %u "
1319 "read_promote_adjustment %u "
1320 "write_promote_adjustment %u",
f2836352 1321 mq->tracker.thresholds[PATTERN_RANDOM],
78e03d69
JT
1322 mq->tracker.thresholds[PATTERN_SEQUENTIAL],
1323 mq->discard_promote_adjustment,
1324 mq->read_promote_adjustment,
1325 mq->write_promote_adjustment);
f2836352
JT
1326
1327 return 0;
1328}
1329
1330/* Init the policy plugin interface function pointers. */
1331static void init_policy_functions(struct mq_policy *mq)
1332{
1333 mq->policy.destroy = mq_destroy;
1334 mq->policy.map = mq_map;
1335 mq->policy.lookup = mq_lookup;
01911c19
JT
1336 mq->policy.set_dirty = mq_set_dirty;
1337 mq->policy.clear_dirty = mq_clear_dirty;
f2836352
JT
1338 mq->policy.load_mapping = mq_load_mapping;
1339 mq->policy.walk_mappings = mq_walk_mappings;
1340 mq->policy.remove_mapping = mq_remove_mapping;
532906aa 1341 mq->policy.remove_cblock = mq_remove_cblock;
01911c19 1342 mq->policy.writeback_work = mq_writeback_work;
f2836352
JT
1343 mq->policy.force_mapping = mq_force_mapping;
1344 mq->policy.residency = mq_residency;
1345 mq->policy.tick = mq_tick;
1346 mq->policy.emit_config_values = mq_emit_config_values;
1347 mq->policy.set_config_value = mq_set_config_value;
1348}
1349
1350static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1351 sector_t origin_size,
1352 sector_t cache_block_size)
1353{
f2836352
JT
1354 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1355
1356 if (!mq)
1357 return NULL;
1358
1359 init_policy_functions(mq);
1360 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
f2836352 1361 mq->cache_size = cache_size;
633618e3
JT
1362
1363 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1364 DMERR("couldn't initialize pool of pre-cache entries");
1365 goto bad_pre_cache_init;
1366 }
1367
1368 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1369 DMERR("couldn't initialize pool of cache entries");
1370 goto bad_cache_init;
1371 }
1372
f2836352
JT
1373 mq->tick_protected = 0;
1374 mq->tick = 0;
1375 mq->hit_count = 0;
1376 mq->generation = 0;
78e03d69
JT
1377 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
1378 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
1379 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
f2836352
JT
1380 mutex_init(&mq->lock);
1381 spin_lock_init(&mq->tick_lock);
f2836352
JT
1382
1383 queue_init(&mq->pre_cache);
01911c19
JT
1384 queue_init(&mq->cache_clean);
1385 queue_init(&mq->cache_dirty);
1386
f2836352
JT
1387 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1388
f2836352
JT
1389 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1390 mq->hash_bits = ffs(mq->nr_buckets) - 1;
14f398ca 1391 mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
f2836352
JT
1392 if (!mq->table)
1393 goto bad_alloc_table;
1394
f2836352
JT
1395 return &mq->policy;
1396
f2836352 1397bad_alloc_table:
633618e3
JT
1398 epool_exit(&mq->cache_pool);
1399bad_cache_init:
1400 epool_exit(&mq->pre_cache_pool);
1401bad_pre_cache_init:
f2836352
JT
1402 kfree(mq);
1403
1404 return NULL;
1405}
1406
1407/*----------------------------------------------------------------*/
1408
1409static struct dm_cache_policy_type mq_policy_type = {
1410 .name = "mq",
f1afb36a 1411 .version = {1, 3, 0},
f2836352
JT
1412 .hint_size = 4,
1413 .owner = THIS_MODULE,
1414 .create = mq_create
1415};
1416
1417static struct dm_cache_policy_type default_policy_type = {
1418 .name = "default",
f1afb36a 1419 .version = {1, 3, 0},
f2836352
JT
1420 .hint_size = 4,
1421 .owner = THIS_MODULE,
2e68c4e6
MS
1422 .create = mq_create,
1423 .real = &mq_policy_type
f2836352
JT
1424};
1425
1426static int __init mq_init(void)
1427{
1428 int r;
1429
1430 mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1431 sizeof(struct entry),
1432 __alignof__(struct entry),
1433 0, NULL);
1434 if (!mq_entry_cache)
1435 goto bad;
1436
1437 r = dm_cache_policy_register(&mq_policy_type);
1438 if (r) {
1439 DMERR("register failed %d", r);
1440 goto bad_register_mq;
1441 }
1442
1443 r = dm_cache_policy_register(&default_policy_type);
1444 if (!r) {
4e7f506f
MS
1445 DMINFO("version %u.%u.%u loaded",
1446 mq_policy_type.version[0],
1447 mq_policy_type.version[1],
1448 mq_policy_type.version[2]);
f2836352
JT
1449 return 0;
1450 }
1451
1452 DMERR("register failed (as default) %d", r);
1453
1454 dm_cache_policy_unregister(&mq_policy_type);
1455bad_register_mq:
1456 kmem_cache_destroy(mq_entry_cache);
1457bad:
1458 return -ENOMEM;
1459}
1460
1461static void __exit mq_exit(void)
1462{
1463 dm_cache_policy_unregister(&mq_policy_type);
1464 dm_cache_policy_unregister(&default_policy_type);
1465
1466 kmem_cache_destroy(mq_entry_cache);
1467}
1468
1469module_init(mq_init);
1470module_exit(mq_exit);
1471
1472MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1473MODULE_LICENSE("GPL");
1474MODULE_DESCRIPTION("mq cache policy");
1475
1476MODULE_ALIAS("dm-cache-default");
This page took 0.303291 seconds and 5 git commands to generate.