Commit | Line | Data |
---|---|---|
f2836352 JT |
1 | /* |
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-cache-policy.h" | |
8 | #include "dm.h" | |
9 | ||
10 | #include <linux/hash.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
15 | ||
16 | #define DM_MSG_PREFIX "cache-policy-mq" | |
f2836352 JT |
17 | |
18 | static struct kmem_cache *mq_entry_cache; | |
19 | ||
20 | /*----------------------------------------------------------------*/ | |
21 | ||
22 | static unsigned next_power(unsigned n, unsigned min) | |
23 | { | |
24 | return roundup_pow_of_two(max(n, min)); | |
25 | } | |
26 | ||
27 | /*----------------------------------------------------------------*/ | |
28 | ||
f2836352 JT |
29 | /* |
30 | * Large, sequential ios are probably better left on the origin device since | |
31 | * spindles tend to have good bandwidth. | |
32 | * | |
33 | * The io_tracker tries to spot when the io is in one of these sequential | |
34 | * modes. | |
35 | * | |
36 | * Two thresholds to switch between random and sequential io mode are defaulting | |
37 | * as follows and can be adjusted via the constructor and message interfaces. | |
38 | */ | |
39 | #define RANDOM_THRESHOLD_DEFAULT 4 | |
40 | #define SEQUENTIAL_THRESHOLD_DEFAULT 512 | |
41 | ||
42 | enum io_pattern { | |
43 | PATTERN_SEQUENTIAL, | |
44 | PATTERN_RANDOM | |
45 | }; | |
46 | ||
47 | struct io_tracker { | |
48 | enum io_pattern pattern; | |
49 | ||
50 | unsigned nr_seq_samples; | |
51 | unsigned nr_rand_samples; | |
52 | unsigned thresholds[2]; | |
53 | ||
54 | dm_oblock_t last_end_oblock; | |
55 | }; | |
56 | ||
57 | static void iot_init(struct io_tracker *t, | |
58 | int sequential_threshold, int random_threshold) | |
59 | { | |
60 | t->pattern = PATTERN_RANDOM; | |
61 | t->nr_seq_samples = 0; | |
62 | t->nr_rand_samples = 0; | |
63 | t->last_end_oblock = 0; | |
64 | t->thresholds[PATTERN_RANDOM] = random_threshold; | |
65 | t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold; | |
66 | } | |
67 | ||
68 | static enum io_pattern iot_pattern(struct io_tracker *t) | |
69 | { | |
70 | return t->pattern; | |
71 | } | |
72 | ||
73 | static void iot_update_stats(struct io_tracker *t, struct bio *bio) | |
74 | { | |
4f024f37 | 75 | if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) |
f2836352 JT |
76 | t->nr_seq_samples++; |
77 | else { | |
78 | /* | |
79 | * Just one non-sequential IO is enough to reset the | |
80 | * counters. | |
81 | */ | |
82 | if (t->nr_seq_samples) { | |
83 | t->nr_seq_samples = 0; | |
84 | t->nr_rand_samples = 0; | |
85 | } | |
86 | ||
87 | t->nr_rand_samples++; | |
88 | } | |
89 | ||
4f024f37 | 90 | t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); |
f2836352 JT |
91 | } |
92 | ||
93 | static void iot_check_for_pattern_switch(struct io_tracker *t) | |
94 | { | |
95 | switch (t->pattern) { | |
96 | case PATTERN_SEQUENTIAL: | |
97 | if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) { | |
98 | t->pattern = PATTERN_RANDOM; | |
99 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
100 | } | |
101 | break; | |
102 | ||
103 | case PATTERN_RANDOM: | |
104 | if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) { | |
105 | t->pattern = PATTERN_SEQUENTIAL; | |
106 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
107 | } | |
108 | break; | |
109 | } | |
110 | } | |
111 | ||
112 | static void iot_examine_bio(struct io_tracker *t, struct bio *bio) | |
113 | { | |
114 | iot_update_stats(t, bio); | |
115 | iot_check_for_pattern_switch(t); | |
116 | } | |
117 | ||
118 | /*----------------------------------------------------------------*/ | |
119 | ||
120 | ||
121 | /* | |
122 | * This queue is divided up into different levels. Allowing us to push | |
123 | * entries to the back of any of the levels. Think of it as a partially | |
124 | * sorted queue. | |
125 | */ | |
126 | #define NR_QUEUE_LEVELS 16u | |
3e45c91e | 127 | #define NR_SENTINELS NR_QUEUE_LEVELS * 3 |
f2836352 JT |
128 | |
129 | struct queue { | |
75da39bf | 130 | unsigned nr_elts; |
f2836352 | 131 | struct list_head qs[NR_QUEUE_LEVELS]; |
3e45c91e | 132 | struct list_head sentinels[NR_SENTINELS]; |
f2836352 JT |
133 | }; |
134 | ||
135 | static void queue_init(struct queue *q) | |
136 | { | |
137 | unsigned i; | |
138 | ||
75da39bf | 139 | q->nr_elts = 0; |
3e45c91e | 140 | for (i = 0; i < NR_QUEUE_LEVELS; i++) { |
f2836352 | 141 | INIT_LIST_HEAD(q->qs + i); |
3e45c91e JT |
142 | INIT_LIST_HEAD(q->sentinels + i); |
143 | } | |
f2836352 JT |
144 | } |
145 | ||
c86c3070 JT |
146 | static bool queue_empty(struct queue *q) |
147 | { | |
75da39bf | 148 | return q->nr_elts == 0; |
c86c3070 JT |
149 | } |
150 | ||
f2836352 JT |
151 | /* |
152 | * Insert an entry to the back of the given level. | |
153 | */ | |
154 | static void queue_push(struct queue *q, unsigned level, struct list_head *elt) | |
155 | { | |
75da39bf | 156 | q->nr_elts++; |
f2836352 JT |
157 | list_add_tail(elt, q->qs + level); |
158 | } | |
159 | ||
75da39bf | 160 | static void queue_remove(struct queue *q, struct list_head *elt) |
f2836352 | 161 | { |
75da39bf | 162 | q->nr_elts--; |
f2836352 JT |
163 | list_del(elt); |
164 | } | |
165 | ||
3e45c91e JT |
166 | static bool is_sentinel(struct queue *q, struct list_head *h) |
167 | { | |
168 | return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS)); | |
169 | } | |
170 | ||
f2836352 JT |
171 | /* |
172 | * Gives us the oldest entry of the lowest popoulated level. If the first | |
173 | * level is emptied then we shift down one level. | |
174 | */ | |
b155aa0e | 175 | static struct list_head *queue_peek(struct queue *q) |
f2836352 JT |
176 | { |
177 | unsigned level; | |
3e45c91e | 178 | struct list_head *h; |
f2836352 JT |
179 | |
180 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
3e45c91e JT |
181 | list_for_each(h, q->qs + level) |
182 | if (!is_sentinel(q, h)) | |
183 | return h; | |
f2836352 | 184 | |
b155aa0e JT |
185 | return NULL; |
186 | } | |
f2836352 | 187 | |
b155aa0e JT |
188 | static struct list_head *queue_pop(struct queue *q) |
189 | { | |
190 | struct list_head *r = queue_peek(q); | |
f2836352 | 191 | |
b155aa0e | 192 | if (r) { |
75da39bf | 193 | q->nr_elts--; |
b155aa0e | 194 | list_del(r); |
b155aa0e JT |
195 | } |
196 | ||
197 | return r; | |
f2836352 JT |
198 | } |
199 | ||
200 | static struct list_head *list_pop(struct list_head *lh) | |
201 | { | |
202 | struct list_head *r = lh->next; | |
203 | ||
204 | BUG_ON(!r); | |
205 | list_del_init(r); | |
206 | ||
207 | return r; | |
208 | } | |
209 | ||
3e45c91e JT |
210 | /* |
211 | * Sometimes we want to iterate through entries that have been pushed since | |
212 | * a certain event. We use sentinel entries on the queues to delimit these | |
213 | * 'tick' events. | |
214 | */ | |
215 | static void queue_tick(struct queue *q) | |
216 | { | |
217 | unsigned i; | |
218 | ||
219 | for (i = 0; i < NR_QUEUE_LEVELS; i++) { | |
220 | list_del(q->sentinels + i); | |
221 | list_add_tail(q->sentinels + i, q->qs + i); | |
222 | } | |
223 | } | |
224 | ||
225 | typedef void (*iter_fn)(struct list_head *, void *); | |
226 | static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context) | |
227 | { | |
228 | unsigned i; | |
229 | struct list_head *h; | |
230 | ||
231 | for (i = 0; i < NR_QUEUE_LEVELS; i++) { | |
232 | list_for_each_prev(h, q->qs + i) { | |
233 | if (is_sentinel(q, h)) | |
234 | break; | |
235 | ||
236 | fn(h, context); | |
237 | } | |
238 | } | |
239 | } | |
240 | ||
f2836352 JT |
241 | /*----------------------------------------------------------------*/ |
242 | ||
243 | /* | |
244 | * Describes a cache entry. Used in both the cache and the pre_cache. | |
245 | */ | |
246 | struct entry { | |
247 | struct hlist_node hlist; | |
248 | struct list_head list; | |
249 | dm_oblock_t oblock; | |
f2836352 JT |
250 | |
251 | /* | |
252 | * FIXME: pack these better | |
253 | */ | |
01911c19 | 254 | bool dirty:1; |
f2836352 | 255 | unsigned hit_count; |
f2836352 JT |
256 | }; |
257 | ||
633618e3 JT |
258 | /* |
259 | * Rather than storing the cblock in an entry, we allocate all entries in | |
260 | * an array, and infer the cblock from the entry position. | |
261 | * | |
262 | * Free entries are linked together into a list. | |
263 | */ | |
264 | struct entry_pool { | |
265 | struct entry *entries, *entries_end; | |
266 | struct list_head free; | |
267 | unsigned nr_allocated; | |
268 | }; | |
269 | ||
270 | static int epool_init(struct entry_pool *ep, unsigned nr_entries) | |
271 | { | |
272 | unsigned i; | |
273 | ||
274 | ep->entries = vzalloc(sizeof(struct entry) * nr_entries); | |
275 | if (!ep->entries) | |
276 | return -ENOMEM; | |
277 | ||
278 | ep->entries_end = ep->entries + nr_entries; | |
279 | ||
280 | INIT_LIST_HEAD(&ep->free); | |
281 | for (i = 0; i < nr_entries; i++) | |
282 | list_add(&ep->entries[i].list, &ep->free); | |
283 | ||
284 | ep->nr_allocated = 0; | |
285 | ||
286 | return 0; | |
287 | } | |
288 | ||
289 | static void epool_exit(struct entry_pool *ep) | |
290 | { | |
291 | vfree(ep->entries); | |
292 | } | |
293 | ||
294 | static struct entry *alloc_entry(struct entry_pool *ep) | |
295 | { | |
296 | struct entry *e; | |
297 | ||
298 | if (list_empty(&ep->free)) | |
299 | return NULL; | |
300 | ||
301 | e = list_entry(list_pop(&ep->free), struct entry, list); | |
302 | INIT_LIST_HEAD(&e->list); | |
303 | INIT_HLIST_NODE(&e->hlist); | |
304 | ep->nr_allocated++; | |
305 | ||
306 | return e; | |
307 | } | |
308 | ||
309 | /* | |
310 | * This assumes the cblock hasn't already been allocated. | |
311 | */ | |
312 | static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) | |
313 | { | |
314 | struct entry *e = ep->entries + from_cblock(cblock); | |
633618e3 | 315 | |
b8158051 | 316 | list_del_init(&e->list); |
633618e3 JT |
317 | INIT_HLIST_NODE(&e->hlist); |
318 | ep->nr_allocated++; | |
319 | ||
320 | return e; | |
321 | } | |
322 | ||
323 | static void free_entry(struct entry_pool *ep, struct entry *e) | |
324 | { | |
325 | BUG_ON(!ep->nr_allocated); | |
326 | ep->nr_allocated--; | |
327 | INIT_HLIST_NODE(&e->hlist); | |
328 | list_add(&e->list, &ep->free); | |
329 | } | |
330 | ||
532906aa JT |
331 | /* |
332 | * Returns NULL if the entry is free. | |
333 | */ | |
334 | static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) | |
335 | { | |
336 | struct entry *e = ep->entries + from_cblock(cblock); | |
7b6b2bc9 | 337 | return !hlist_unhashed(&e->hlist) ? e : NULL; |
532906aa JT |
338 | } |
339 | ||
633618e3 JT |
340 | static bool epool_empty(struct entry_pool *ep) |
341 | { | |
342 | return list_empty(&ep->free); | |
343 | } | |
344 | ||
345 | static bool in_pool(struct entry_pool *ep, struct entry *e) | |
346 | { | |
347 | return e >= ep->entries && e < ep->entries_end; | |
348 | } | |
349 | ||
350 | static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) | |
351 | { | |
352 | return to_cblock(e - ep->entries); | |
353 | } | |
354 | ||
355 | /*----------------------------------------------------------------*/ | |
356 | ||
f2836352 JT |
357 | struct mq_policy { |
358 | struct dm_cache_policy policy; | |
359 | ||
360 | /* protects everything */ | |
361 | struct mutex lock; | |
362 | dm_cblock_t cache_size; | |
363 | struct io_tracker tracker; | |
364 | ||
633618e3 JT |
365 | /* |
366 | * Entries come from two pools, one of pre-cache entries, and one | |
367 | * for the cache proper. | |
368 | */ | |
369 | struct entry_pool pre_cache_pool; | |
370 | struct entry_pool cache_pool; | |
371 | ||
f2836352 | 372 | /* |
01911c19 JT |
373 | * We maintain three queues of entries. The cache proper, |
374 | * consisting of a clean and dirty queue, contains the currently | |
375 | * active mappings. Whereas the pre_cache tracks blocks that | |
376 | * are being hit frequently and potential candidates for promotion | |
377 | * to the cache. | |
f2836352 JT |
378 | */ |
379 | struct queue pre_cache; | |
01911c19 JT |
380 | struct queue cache_clean; |
381 | struct queue cache_dirty; | |
f2836352 JT |
382 | |
383 | /* | |
384 | * Keeps track of time, incremented by the core. We use this to | |
385 | * avoid attributing multiple hits within the same tick. | |
386 | * | |
387 | * Access to tick_protected should be done with the spin lock held. | |
388 | * It's copied to tick at the start of the map function (within the | |
389 | * mutex). | |
390 | */ | |
391 | spinlock_t tick_lock; | |
392 | unsigned tick_protected; | |
393 | unsigned tick; | |
394 | ||
395 | /* | |
396 | * A count of the number of times the map function has been called | |
397 | * and found an entry in the pre_cache or cache. Currently used to | |
398 | * calculate the generation. | |
399 | */ | |
400 | unsigned hit_count; | |
401 | ||
402 | /* | |
403 | * A generation is a longish period that is used to trigger some | |
404 | * book keeping effects. eg, decrementing hit counts on entries. | |
405 | * This is needed to allow the cache to evolve as io patterns | |
406 | * change. | |
407 | */ | |
408 | unsigned generation; | |
409 | unsigned generation_period; /* in lookups (will probably change) */ | |
410 | ||
78e03d69 JT |
411 | unsigned discard_promote_adjustment; |
412 | unsigned read_promote_adjustment; | |
413 | unsigned write_promote_adjustment; | |
414 | ||
f2836352 JT |
415 | /* |
416 | * The hash table allows us to quickly find an entry by origin | |
417 | * block. Both pre_cache and cache entries are in here. | |
418 | */ | |
419 | unsigned nr_buckets; | |
420 | dm_block_t hash_bits; | |
421 | struct hlist_head *table; | |
422 | }; | |
423 | ||
78e03d69 JT |
424 | #define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1 |
425 | #define DEFAULT_READ_PROMOTE_ADJUSTMENT 4 | |
426 | #define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8 | |
b155aa0e | 427 | #define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128 |
78e03d69 | 428 | |
f2836352 JT |
429 | /*----------------------------------------------------------------*/ |
430 | ||
431 | /* | |
432 | * Simple hash table implementation. Should replace with the standard hash | |
433 | * table that's making its way upstream. | |
434 | */ | |
435 | static void hash_insert(struct mq_policy *mq, struct entry *e) | |
436 | { | |
437 | unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); | |
438 | ||
439 | hlist_add_head(&e->hlist, mq->table + h); | |
440 | } | |
441 | ||
442 | static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) | |
443 | { | |
444 | unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); | |
445 | struct hlist_head *bucket = mq->table + h; | |
446 | struct entry *e; | |
447 | ||
448 | hlist_for_each_entry(e, bucket, hlist) | |
449 | if (e->oblock == oblock) { | |
450 | hlist_del(&e->hlist); | |
451 | hlist_add_head(&e->hlist, bucket); | |
452 | return e; | |
453 | } | |
454 | ||
455 | return NULL; | |
456 | } | |
457 | ||
458 | static void hash_remove(struct entry *e) | |
459 | { | |
460 | hlist_del(&e->hlist); | |
461 | } | |
462 | ||
463 | /*----------------------------------------------------------------*/ | |
464 | ||
f2836352 JT |
465 | static bool any_free_cblocks(struct mq_policy *mq) |
466 | { | |
633618e3 | 467 | return !epool_empty(&mq->cache_pool); |
f2836352 JT |
468 | } |
469 | ||
c86c3070 JT |
470 | static bool any_clean_cblocks(struct mq_policy *mq) |
471 | { | |
472 | return !queue_empty(&mq->cache_clean); | |
473 | } | |
474 | ||
f2836352 JT |
475 | /*----------------------------------------------------------------*/ |
476 | ||
477 | /* | |
478 | * Now we get to the meat of the policy. This section deals with deciding | |
479 | * when to to add entries to the pre_cache and cache, and move between | |
480 | * them. | |
481 | */ | |
482 | ||
483 | /* | |
484 | * The queue level is based on the log2 of the hit count. | |
485 | */ | |
486 | static unsigned queue_level(struct entry *e) | |
487 | { | |
488 | return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u); | |
489 | } | |
490 | ||
633618e3 JT |
491 | static bool in_cache(struct mq_policy *mq, struct entry *e) |
492 | { | |
493 | return in_pool(&mq->cache_pool, e); | |
494 | } | |
495 | ||
f2836352 JT |
496 | /* |
497 | * Inserts the entry into the pre_cache or the cache. Ensures the cache | |
633618e3 JT |
498 | * block is marked as allocated if necc. Inserts into the hash table. |
499 | * Sets the tick which records when the entry was last moved about. | |
f2836352 JT |
500 | */ |
501 | static void push(struct mq_policy *mq, struct entry *e) | |
502 | { | |
f2836352 JT |
503 | hash_insert(mq, e); |
504 | ||
633618e3 | 505 | if (in_cache(mq, e)) |
01911c19 JT |
506 | queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean, |
507 | queue_level(e), &e->list); | |
633618e3 | 508 | else |
f2836352 JT |
509 | queue_push(&mq->pre_cache, queue_level(e), &e->list); |
510 | } | |
511 | ||
512 | /* | |
513 | * Removes an entry from pre_cache or cache. Removes from the hash table. | |
f2836352 JT |
514 | */ |
515 | static void del(struct mq_policy *mq, struct entry *e) | |
516 | { | |
75da39bf JT |
517 | if (in_cache(mq, e)) |
518 | queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list); | |
519 | else | |
520 | queue_remove(&mq->pre_cache, &e->list); | |
521 | ||
f2836352 | 522 | hash_remove(e); |
f2836352 JT |
523 | } |
524 | ||
525 | /* | |
526 | * Like del, except it removes the first entry in the queue (ie. the least | |
527 | * recently used). | |
528 | */ | |
529 | static struct entry *pop(struct mq_policy *mq, struct queue *q) | |
530 | { | |
0184b44e JT |
531 | struct entry *e; |
532 | struct list_head *h = queue_pop(q); | |
f2836352 | 533 | |
0184b44e JT |
534 | if (!h) |
535 | return NULL; | |
f2836352 | 536 | |
0184b44e JT |
537 | e = container_of(h, struct entry, list); |
538 | hash_remove(e); | |
f2836352 JT |
539 | |
540 | return e; | |
541 | } | |
542 | ||
b155aa0e JT |
543 | static struct entry *peek(struct queue *q) |
544 | { | |
545 | struct list_head *h = queue_peek(q); | |
546 | return h ? container_of(h, struct entry, list) : NULL; | |
547 | } | |
548 | ||
f2836352 JT |
549 | /* |
550 | * The promotion threshold is adjusted every generation. As are the counts | |
551 | * of the entries. | |
552 | * | |
553 | * At the moment the threshold is taken by averaging the hit counts of some | |
01911c19 JT |
554 | * of the entries in the cache (the first 20 entries across all levels in |
555 | * ascending order, giving preference to the clean entries at each level). | |
f2836352 JT |
556 | * |
557 | * We can be much cleverer than this though. For example, each promotion | |
558 | * could bump up the threshold helping to prevent churn. Much more to do | |
559 | * here. | |
560 | */ | |
561 | ||
562 | #define MAX_TO_AVERAGE 20 | |
563 | ||
564 | static void check_generation(struct mq_policy *mq) | |
565 | { | |
566 | unsigned total = 0, nr = 0, count = 0, level; | |
567 | struct list_head *head; | |
568 | struct entry *e; | |
569 | ||
633618e3 | 570 | if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) { |
f2836352 JT |
571 | mq->hit_count = 0; |
572 | mq->generation++; | |
573 | ||
574 | for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { | |
01911c19 JT |
575 | head = mq->cache_clean.qs + level; |
576 | list_for_each_entry(e, head, list) { | |
577 | nr++; | |
578 | total += e->hit_count; | |
579 | ||
580 | if (++count >= MAX_TO_AVERAGE) | |
581 | break; | |
582 | } | |
583 | ||
584 | head = mq->cache_dirty.qs + level; | |
f2836352 JT |
585 | list_for_each_entry(e, head, list) { |
586 | nr++; | |
587 | total += e->hit_count; | |
588 | ||
589 | if (++count >= MAX_TO_AVERAGE) | |
590 | break; | |
591 | } | |
592 | } | |
f2836352 JT |
593 | } |
594 | } | |
595 | ||
596 | /* | |
597 | * Whenever we use an entry we bump up it's hit counter, and push it to the | |
598 | * back to it's current level. | |
599 | */ | |
3e45c91e | 600 | static void requeue(struct mq_policy *mq, struct entry *e) |
f2836352 | 601 | { |
f2836352 | 602 | check_generation(mq); |
f2836352 JT |
603 | del(mq, e); |
604 | push(mq, e); | |
605 | } | |
606 | ||
607 | /* | |
608 | * Demote the least recently used entry from the cache to the pre_cache. | |
609 | * Returns the new cache entry to use, and the old origin block it was | |
610 | * mapped to. | |
611 | * | |
612 | * We drop the hit count on the demoted entry back to 1 to stop it bouncing | |
613 | * straight back into the cache if it's subsequently hit. There are | |
614 | * various options here, and more experimentation would be good: | |
615 | * | |
616 | * - just forget about the demoted entry completely (ie. don't insert it | |
617 | into the pre_cache). | |
618 | * - divide the hit count rather that setting to some hard coded value. | |
619 | * - set the hit count to a hard coded value other than 1, eg, is it better | |
620 | * if it goes in at level 2? | |
621 | */ | |
633618e3 | 622 | static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock) |
f2836352 | 623 | { |
01911c19 | 624 | struct entry *demoted = pop(mq, &mq->cache_clean); |
f2836352 | 625 | |
01911c19 JT |
626 | if (!demoted) |
627 | /* | |
628 | * We could get a block from mq->cache_dirty, but that | |
629 | * would add extra latency to the triggering bio as it | |
630 | * waits for the writeback. Better to not promote this | |
631 | * time and hope there's a clean block next time this block | |
632 | * is hit. | |
633 | */ | |
634 | return -ENOSPC; | |
635 | ||
f2836352 | 636 | *oblock = demoted->oblock; |
633618e3 JT |
637 | free_entry(&mq->cache_pool, demoted); |
638 | ||
639 | /* | |
640 | * We used to put the demoted block into the pre-cache, but I think | |
641 | * it's simpler to just let it work it's way up from zero again. | |
642 | * Stops blocks flickering in and out of the cache. | |
643 | */ | |
f2836352 | 644 | |
01911c19 | 645 | return 0; |
f2836352 JT |
646 | } |
647 | ||
b155aa0e JT |
648 | /* |
649 | * Entries in the pre_cache whose hit count passes the promotion | |
650 | * threshold move to the cache proper. Working out the correct | |
651 | * value for the promotion_threshold is crucial to this policy. | |
652 | */ | |
653 | static unsigned promote_threshold(struct mq_policy *mq) | |
654 | { | |
655 | struct entry *e; | |
656 | ||
657 | if (any_free_cblocks(mq)) | |
658 | return 0; | |
659 | ||
660 | e = peek(&mq->cache_clean); | |
661 | if (e) | |
662 | return e->hit_count; | |
663 | ||
664 | e = peek(&mq->cache_dirty); | |
665 | if (e) | |
666 | return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD; | |
667 | ||
668 | /* This should never happen */ | |
669 | return 0; | |
670 | } | |
671 | ||
f2836352 JT |
672 | /* |
673 | * We modify the basic promotion_threshold depending on the specific io. | |
674 | * | |
675 | * If the origin block has been discarded then there's no cost to copy it | |
676 | * to the cache. | |
677 | * | |
678 | * We bias towards reads, since they can be demoted at no cost if they | |
679 | * haven't been dirtied. | |
680 | */ | |
f2836352 JT |
681 | static unsigned adjusted_promote_threshold(struct mq_policy *mq, |
682 | bool discarded_oblock, int data_dir) | |
683 | { | |
c86c3070 | 684 | if (data_dir == READ) |
b155aa0e | 685 | return promote_threshold(mq) + mq->read_promote_adjustment; |
c86c3070 JT |
686 | |
687 | if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) { | |
f2836352 JT |
688 | /* |
689 | * We don't need to do any copying at all, so give this a | |
c86c3070 | 690 | * very low threshold. |
f2836352 | 691 | */ |
78e03d69 | 692 | return mq->discard_promote_adjustment; |
c86c3070 | 693 | } |
f2836352 | 694 | |
b155aa0e | 695 | return promote_threshold(mq) + mq->write_promote_adjustment; |
f2836352 JT |
696 | } |
697 | ||
698 | static bool should_promote(struct mq_policy *mq, struct entry *e, | |
699 | bool discarded_oblock, int data_dir) | |
700 | { | |
701 | return e->hit_count >= | |
702 | adjusted_promote_threshold(mq, discarded_oblock, data_dir); | |
703 | } | |
704 | ||
705 | static int cache_entry_found(struct mq_policy *mq, | |
706 | struct entry *e, | |
707 | struct policy_result *result) | |
708 | { | |
3e45c91e | 709 | requeue(mq, e); |
f2836352 | 710 | |
633618e3 | 711 | if (in_cache(mq, e)) { |
f2836352 | 712 | result->op = POLICY_HIT; |
633618e3 | 713 | result->cblock = infer_cblock(&mq->cache_pool, e); |
f2836352 JT |
714 | } |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | /* | |
0184b44e | 720 | * Moves an entry from the pre_cache to the cache. The main work is |
f2836352 JT |
721 | * finding which cache block to use. |
722 | */ | |
723 | static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, | |
724 | struct policy_result *result) | |
725 | { | |
01911c19 | 726 | int r; |
633618e3 | 727 | struct entry *new_e; |
f2836352 | 728 | |
633618e3 JT |
729 | /* Ensure there's a free cblock in the cache */ |
730 | if (epool_empty(&mq->cache_pool)) { | |
f2836352 | 731 | result->op = POLICY_REPLACE; |
633618e3 | 732 | r = demote_cblock(mq, &result->old_oblock); |
01911c19 JT |
733 | if (r) { |
734 | result->op = POLICY_MISS; | |
735 | return 0; | |
736 | } | |
f2836352 JT |
737 | } else |
738 | result->op = POLICY_NEW; | |
739 | ||
633618e3 JT |
740 | new_e = alloc_entry(&mq->cache_pool); |
741 | BUG_ON(!new_e); | |
742 | ||
743 | new_e->oblock = e->oblock; | |
744 | new_e->dirty = false; | |
745 | new_e->hit_count = e->hit_count; | |
f2836352 JT |
746 | |
747 | del(mq, e); | |
633618e3 JT |
748 | free_entry(&mq->pre_cache_pool, e); |
749 | push(mq, new_e); | |
750 | ||
751 | result->cblock = infer_cblock(&mq->cache_pool, new_e); | |
f2836352 JT |
752 | |
753 | return 0; | |
754 | } | |
755 | ||
756 | static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, | |
757 | bool can_migrate, bool discarded_oblock, | |
758 | int data_dir, struct policy_result *result) | |
759 | { | |
760 | int r = 0; | |
f2836352 | 761 | |
3e45c91e JT |
762 | if (!should_promote(mq, e, discarded_oblock, data_dir)) { |
763 | requeue(mq, e); | |
f2836352 | 764 | result->op = POLICY_MISS; |
af95e7a6 JT |
765 | |
766 | } else if (!can_migrate) | |
f2836352 | 767 | r = -EWOULDBLOCK; |
af95e7a6 JT |
768 | |
769 | else { | |
3e45c91e | 770 | requeue(mq, e); |
f2836352 | 771 | r = pre_cache_to_cache(mq, e, result); |
af95e7a6 | 772 | } |
f2836352 JT |
773 | |
774 | return r; | |
775 | } | |
776 | ||
777 | static void insert_in_pre_cache(struct mq_policy *mq, | |
778 | dm_oblock_t oblock) | |
779 | { | |
633618e3 | 780 | struct entry *e = alloc_entry(&mq->pre_cache_pool); |
f2836352 JT |
781 | |
782 | if (!e) | |
783 | /* | |
784 | * There's no spare entry structure, so we grab the least | |
785 | * used one from the pre_cache. | |
786 | */ | |
787 | e = pop(mq, &mq->pre_cache); | |
788 | ||
789 | if (unlikely(!e)) { | |
790 | DMWARN("couldn't pop from pre cache"); | |
791 | return; | |
792 | } | |
793 | ||
633618e3 JT |
794 | e->dirty = false; |
795 | e->oblock = oblock; | |
796 | e->hit_count = 1; | |
633618e3 | 797 | push(mq, e); |
f2836352 JT |
798 | } |
799 | ||
800 | static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, | |
801 | struct policy_result *result) | |
802 | { | |
c86c3070 | 803 | int r; |
f2836352 | 804 | struct entry *e; |
f2836352 | 805 | |
633618e3 JT |
806 | if (epool_empty(&mq->cache_pool)) { |
807 | result->op = POLICY_REPLACE; | |
808 | r = demote_cblock(mq, &result->old_oblock); | |
c86c3070 JT |
809 | if (unlikely(r)) { |
810 | result->op = POLICY_MISS; | |
811 | insert_in_pre_cache(mq, oblock); | |
812 | return; | |
813 | } | |
f2836352 | 814 | |
c86c3070 JT |
815 | /* |
816 | * This will always succeed, since we've just demoted. | |
817 | */ | |
633618e3 JT |
818 | e = alloc_entry(&mq->cache_pool); |
819 | BUG_ON(!e); | |
c86c3070 JT |
820 | |
821 | } else { | |
633618e3 | 822 | e = alloc_entry(&mq->cache_pool); |
c86c3070 | 823 | result->op = POLICY_NEW; |
f2836352 JT |
824 | } |
825 | ||
826 | e->oblock = oblock; | |
01911c19 | 827 | e->dirty = false; |
f2836352 | 828 | e->hit_count = 1; |
f2836352 JT |
829 | push(mq, e); |
830 | ||
633618e3 | 831 | result->cblock = infer_cblock(&mq->cache_pool, e); |
f2836352 JT |
832 | } |
833 | ||
834 | static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, | |
835 | bool can_migrate, bool discarded_oblock, | |
836 | int data_dir, struct policy_result *result) | |
837 | { | |
78e03d69 | 838 | if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) { |
f2836352 JT |
839 | if (can_migrate) |
840 | insert_in_cache(mq, oblock, result); | |
841 | else | |
842 | return -EWOULDBLOCK; | |
843 | } else { | |
844 | insert_in_pre_cache(mq, oblock); | |
845 | result->op = POLICY_MISS; | |
846 | } | |
847 | ||
848 | return 0; | |
849 | } | |
850 | ||
851 | /* | |
852 | * Looks the oblock up in the hash table, then decides whether to put in | |
853 | * pre_cache, or cache etc. | |
854 | */ | |
855 | static int map(struct mq_policy *mq, dm_oblock_t oblock, | |
856 | bool can_migrate, bool discarded_oblock, | |
857 | int data_dir, struct policy_result *result) | |
858 | { | |
859 | int r = 0; | |
860 | struct entry *e = hash_lookup(mq, oblock); | |
861 | ||
633618e3 | 862 | if (e && in_cache(mq, e)) |
f2836352 | 863 | r = cache_entry_found(mq, e, result); |
633618e3 | 864 | |
f1afb36a MS |
865 | else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] && |
866 | iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) | |
f2836352 | 867 | result->op = POLICY_MISS; |
633618e3 | 868 | |
f2836352 JT |
869 | else if (e) |
870 | r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock, | |
871 | data_dir, result); | |
633618e3 | 872 | |
f2836352 JT |
873 | else |
874 | r = no_entry_found(mq, oblock, can_migrate, discarded_oblock, | |
875 | data_dir, result); | |
876 | ||
877 | if (r == -EWOULDBLOCK) | |
878 | result->op = POLICY_MISS; | |
879 | ||
880 | return r; | |
881 | } | |
882 | ||
883 | /*----------------------------------------------------------------*/ | |
884 | ||
885 | /* | |
886 | * Public interface, via the policy struct. See dm-cache-policy.h for a | |
887 | * description of these. | |
888 | */ | |
889 | ||
890 | static struct mq_policy *to_mq_policy(struct dm_cache_policy *p) | |
891 | { | |
892 | return container_of(p, struct mq_policy, policy); | |
893 | } | |
894 | ||
895 | static void mq_destroy(struct dm_cache_policy *p) | |
896 | { | |
897 | struct mq_policy *mq = to_mq_policy(p); | |
898 | ||
14f398ca | 899 | vfree(mq->table); |
633618e3 JT |
900 | epool_exit(&mq->cache_pool); |
901 | epool_exit(&mq->pre_cache_pool); | |
f2836352 JT |
902 | kfree(mq); |
903 | } | |
904 | ||
3e45c91e JT |
905 | static void update_pre_cache_hits(struct list_head *h, void *context) |
906 | { | |
907 | struct entry *e = container_of(h, struct entry, list); | |
908 | e->hit_count++; | |
909 | } | |
910 | ||
911 | static void update_cache_hits(struct list_head *h, void *context) | |
912 | { | |
913 | struct mq_policy *mq = context; | |
914 | struct entry *e = container_of(h, struct entry, list); | |
915 | e->hit_count++; | |
916 | mq->hit_count++; | |
917 | } | |
918 | ||
f2836352 JT |
919 | static void copy_tick(struct mq_policy *mq) |
920 | { | |
3e45c91e | 921 | unsigned long flags, tick; |
f2836352 JT |
922 | |
923 | spin_lock_irqsave(&mq->tick_lock, flags); | |
3e45c91e JT |
924 | tick = mq->tick_protected; |
925 | if (tick != mq->tick) { | |
926 | queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq); | |
927 | queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq); | |
928 | queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq); | |
929 | mq->tick = tick; | |
930 | } | |
931 | ||
932 | queue_tick(&mq->pre_cache); | |
933 | queue_tick(&mq->cache_dirty); | |
934 | queue_tick(&mq->cache_clean); | |
f2836352 JT |
935 | spin_unlock_irqrestore(&mq->tick_lock, flags); |
936 | } | |
937 | ||
938 | static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock, | |
939 | bool can_block, bool can_migrate, bool discarded_oblock, | |
940 | struct bio *bio, struct policy_result *result) | |
941 | { | |
942 | int r; | |
943 | struct mq_policy *mq = to_mq_policy(p); | |
944 | ||
945 | result->op = POLICY_MISS; | |
946 | ||
947 | if (can_block) | |
948 | mutex_lock(&mq->lock); | |
949 | else if (!mutex_trylock(&mq->lock)) | |
950 | return -EWOULDBLOCK; | |
951 | ||
952 | copy_tick(mq); | |
953 | ||
954 | iot_examine_bio(&mq->tracker, bio); | |
955 | r = map(mq, oblock, can_migrate, discarded_oblock, | |
956 | bio_data_dir(bio), result); | |
957 | ||
958 | mutex_unlock(&mq->lock); | |
959 | ||
960 | return r; | |
961 | } | |
962 | ||
963 | static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) | |
964 | { | |
965 | int r; | |
966 | struct mq_policy *mq = to_mq_policy(p); | |
967 | struct entry *e; | |
968 | ||
969 | if (!mutex_trylock(&mq->lock)) | |
970 | return -EWOULDBLOCK; | |
971 | ||
972 | e = hash_lookup(mq, oblock); | |
633618e3 JT |
973 | if (e && in_cache(mq, e)) { |
974 | *cblock = infer_cblock(&mq->cache_pool, e); | |
f2836352 JT |
975 | r = 0; |
976 | } else | |
977 | r = -ENOENT; | |
978 | ||
979 | mutex_unlock(&mq->lock); | |
980 | ||
981 | return r; | |
982 | } | |
983 | ||
633618e3 | 984 | static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set) |
01911c19 | 985 | { |
01911c19 JT |
986 | struct entry *e; |
987 | ||
01911c19 | 988 | e = hash_lookup(mq, oblock); |
633618e3 | 989 | BUG_ON(!e || !in_cache(mq, e)); |
01911c19 | 990 | |
633618e3 JT |
991 | del(mq, e); |
992 | e->dirty = set; | |
993 | push(mq, e); | |
01911c19 JT |
994 | } |
995 | ||
996 | static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
997 | { | |
633618e3 JT |
998 | struct mq_policy *mq = to_mq_policy(p); |
999 | ||
1000 | mutex_lock(&mq->lock); | |
1001 | __mq_set_clear_dirty(mq, oblock, true); | |
1002 | mutex_unlock(&mq->lock); | |
01911c19 JT |
1003 | } |
1004 | ||
1005 | static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
1006 | { | |
633618e3 JT |
1007 | struct mq_policy *mq = to_mq_policy(p); |
1008 | ||
1009 | mutex_lock(&mq->lock); | |
1010 | __mq_set_clear_dirty(mq, oblock, false); | |
1011 | mutex_unlock(&mq->lock); | |
01911c19 JT |
1012 | } |
1013 | ||
f2836352 JT |
1014 | static int mq_load_mapping(struct dm_cache_policy *p, |
1015 | dm_oblock_t oblock, dm_cblock_t cblock, | |
1016 | uint32_t hint, bool hint_valid) | |
1017 | { | |
1018 | struct mq_policy *mq = to_mq_policy(p); | |
1019 | struct entry *e; | |
1020 | ||
633618e3 | 1021 | e = alloc_particular_entry(&mq->cache_pool, cblock); |
f2836352 | 1022 | e->oblock = oblock; |
01911c19 | 1023 | e->dirty = false; /* this gets corrected in a minute */ |
f2836352 | 1024 | e->hit_count = hint_valid ? hint : 1; |
f2836352 JT |
1025 | push(mq, e); |
1026 | ||
1027 | return 0; | |
1028 | } | |
1029 | ||
633618e3 JT |
1030 | static int mq_save_hints(struct mq_policy *mq, struct queue *q, |
1031 | policy_walk_fn fn, void *context) | |
1032 | { | |
1033 | int r; | |
1034 | unsigned level; | |
3e45c91e | 1035 | struct list_head *h; |
633618e3 JT |
1036 | struct entry *e; |
1037 | ||
1038 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
3e45c91e JT |
1039 | list_for_each(h, q->qs + level) { |
1040 | if (is_sentinel(q, h)) | |
1041 | continue; | |
1042 | ||
1043 | e = container_of(h, struct entry, list); | |
633618e3 JT |
1044 | r = fn(context, infer_cblock(&mq->cache_pool, e), |
1045 | e->oblock, e->hit_count); | |
1046 | if (r) | |
1047 | return r; | |
1048 | } | |
1049 | ||
1050 | return 0; | |
1051 | } | |
1052 | ||
f2836352 JT |
1053 | static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, |
1054 | void *context) | |
1055 | { | |
1056 | struct mq_policy *mq = to_mq_policy(p); | |
1057 | int r = 0; | |
f2836352 JT |
1058 | |
1059 | mutex_lock(&mq->lock); | |
1060 | ||
633618e3 JT |
1061 | r = mq_save_hints(mq, &mq->cache_clean, fn, context); |
1062 | if (!r) | |
1063 | r = mq_save_hints(mq, &mq->cache_dirty, fn, context); | |
f2836352 | 1064 | |
f2836352 JT |
1065 | mutex_unlock(&mq->lock); |
1066 | ||
1067 | return r; | |
1068 | } | |
1069 | ||
633618e3 | 1070 | static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) |
f2836352 | 1071 | { |
b936bf8b GU |
1072 | struct entry *e; |
1073 | ||
b936bf8b | 1074 | e = hash_lookup(mq, oblock); |
633618e3 | 1075 | BUG_ON(!e || !in_cache(mq, e)); |
f2836352 JT |
1076 | |
1077 | del(mq, e); | |
633618e3 JT |
1078 | free_entry(&mq->cache_pool, e); |
1079 | } | |
1080 | ||
1081 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) | |
1082 | { | |
1083 | struct mq_policy *mq = to_mq_policy(p); | |
f2836352 | 1084 | |
633618e3 JT |
1085 | mutex_lock(&mq->lock); |
1086 | __remove_mapping(mq, oblock); | |
f2836352 JT |
1087 | mutex_unlock(&mq->lock); |
1088 | } | |
1089 | ||
532906aa JT |
1090 | static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) |
1091 | { | |
1092 | struct entry *e = epool_find(&mq->cache_pool, cblock); | |
1093 | ||
1094 | if (!e) | |
1095 | return -ENODATA; | |
1096 | ||
1097 | del(mq, e); | |
1098 | free_entry(&mq->cache_pool, e); | |
1099 | ||
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) | |
1104 | { | |
1105 | int r; | |
1106 | struct mq_policy *mq = to_mq_policy(p); | |
1107 | ||
1108 | mutex_lock(&mq->lock); | |
1109 | r = __remove_cblock(mq, cblock); | |
1110 | mutex_unlock(&mq->lock); | |
1111 | ||
1112 | return r; | |
1113 | } | |
1114 | ||
01911c19 JT |
1115 | static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, |
1116 | dm_cblock_t *cblock) | |
1117 | { | |
1118 | struct entry *e = pop(mq, &mq->cache_dirty); | |
1119 | ||
1120 | if (!e) | |
1121 | return -ENODATA; | |
1122 | ||
1123 | *oblock = e->oblock; | |
633618e3 | 1124 | *cblock = infer_cblock(&mq->cache_pool, e); |
01911c19 JT |
1125 | e->dirty = false; |
1126 | push(mq, e); | |
1127 | ||
1128 | return 0; | |
1129 | } | |
1130 | ||
1131 | static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, | |
1132 | dm_cblock_t *cblock) | |
1133 | { | |
1134 | int r; | |
1135 | struct mq_policy *mq = to_mq_policy(p); | |
1136 | ||
1137 | mutex_lock(&mq->lock); | |
1138 | r = __mq_writeback_work(mq, oblock, cblock); | |
1139 | mutex_unlock(&mq->lock); | |
1140 | ||
1141 | return r; | |
1142 | } | |
1143 | ||
633618e3 JT |
1144 | static void __force_mapping(struct mq_policy *mq, |
1145 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
f2836352 JT |
1146 | { |
1147 | struct entry *e = hash_lookup(mq, current_oblock); | |
1148 | ||
633618e3 JT |
1149 | if (e && in_cache(mq, e)) { |
1150 | del(mq, e); | |
1151 | e->oblock = new_oblock; | |
1152 | e->dirty = true; | |
1153 | push(mq, e); | |
1154 | } | |
f2836352 JT |
1155 | } |
1156 | ||
1157 | static void mq_force_mapping(struct dm_cache_policy *p, | |
1158 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
1159 | { | |
1160 | struct mq_policy *mq = to_mq_policy(p); | |
1161 | ||
1162 | mutex_lock(&mq->lock); | |
633618e3 | 1163 | __force_mapping(mq, current_oblock, new_oblock); |
f2836352 JT |
1164 | mutex_unlock(&mq->lock); |
1165 | } | |
1166 | ||
1167 | static dm_cblock_t mq_residency(struct dm_cache_policy *p) | |
1168 | { | |
99ba2ae4 | 1169 | dm_cblock_t r; |
f2836352 JT |
1170 | struct mq_policy *mq = to_mq_policy(p); |
1171 | ||
99ba2ae4 | 1172 | mutex_lock(&mq->lock); |
633618e3 | 1173 | r = to_cblock(mq->cache_pool.nr_allocated); |
99ba2ae4 JT |
1174 | mutex_unlock(&mq->lock); |
1175 | ||
1176 | return r; | |
f2836352 JT |
1177 | } |
1178 | ||
1179 | static void mq_tick(struct dm_cache_policy *p) | |
1180 | { | |
1181 | struct mq_policy *mq = to_mq_policy(p); | |
1182 | unsigned long flags; | |
1183 | ||
1184 | spin_lock_irqsave(&mq->tick_lock, flags); | |
1185 | mq->tick_protected++; | |
1186 | spin_unlock_irqrestore(&mq->tick_lock, flags); | |
1187 | } | |
1188 | ||
1189 | static int mq_set_config_value(struct dm_cache_policy *p, | |
1190 | const char *key, const char *value) | |
1191 | { | |
1192 | struct mq_policy *mq = to_mq_policy(p); | |
f2836352 JT |
1193 | unsigned long tmp; |
1194 | ||
f2836352 JT |
1195 | if (kstrtoul(value, 10, &tmp)) |
1196 | return -EINVAL; | |
1197 | ||
78e03d69 JT |
1198 | if (!strcasecmp(key, "random_threshold")) { |
1199 | mq->tracker.thresholds[PATTERN_RANDOM] = tmp; | |
1200 | ||
1201 | } else if (!strcasecmp(key, "sequential_threshold")) { | |
1202 | mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp; | |
1203 | ||
1204 | } else if (!strcasecmp(key, "discard_promote_adjustment")) | |
1205 | mq->discard_promote_adjustment = tmp; | |
1206 | ||
1207 | else if (!strcasecmp(key, "read_promote_adjustment")) | |
1208 | mq->read_promote_adjustment = tmp; | |
1209 | ||
1210 | else if (!strcasecmp(key, "write_promote_adjustment")) | |
1211 | mq->write_promote_adjustment = tmp; | |
1212 | ||
1213 | else | |
1214 | return -EINVAL; | |
f2836352 JT |
1215 | |
1216 | return 0; | |
1217 | } | |
1218 | ||
1219 | static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen) | |
1220 | { | |
1221 | ssize_t sz = 0; | |
1222 | struct mq_policy *mq = to_mq_policy(p); | |
1223 | ||
78e03d69 JT |
1224 | DMEMIT("10 random_threshold %u " |
1225 | "sequential_threshold %u " | |
1226 | "discard_promote_adjustment %u " | |
1227 | "read_promote_adjustment %u " | |
1228 | "write_promote_adjustment %u", | |
f2836352 | 1229 | mq->tracker.thresholds[PATTERN_RANDOM], |
78e03d69 JT |
1230 | mq->tracker.thresholds[PATTERN_SEQUENTIAL], |
1231 | mq->discard_promote_adjustment, | |
1232 | mq->read_promote_adjustment, | |
1233 | mq->write_promote_adjustment); | |
f2836352 JT |
1234 | |
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | /* Init the policy plugin interface function pointers. */ | |
1239 | static void init_policy_functions(struct mq_policy *mq) | |
1240 | { | |
1241 | mq->policy.destroy = mq_destroy; | |
1242 | mq->policy.map = mq_map; | |
1243 | mq->policy.lookup = mq_lookup; | |
01911c19 JT |
1244 | mq->policy.set_dirty = mq_set_dirty; |
1245 | mq->policy.clear_dirty = mq_clear_dirty; | |
f2836352 JT |
1246 | mq->policy.load_mapping = mq_load_mapping; |
1247 | mq->policy.walk_mappings = mq_walk_mappings; | |
1248 | mq->policy.remove_mapping = mq_remove_mapping; | |
532906aa | 1249 | mq->policy.remove_cblock = mq_remove_cblock; |
01911c19 | 1250 | mq->policy.writeback_work = mq_writeback_work; |
f2836352 JT |
1251 | mq->policy.force_mapping = mq_force_mapping; |
1252 | mq->policy.residency = mq_residency; | |
1253 | mq->policy.tick = mq_tick; | |
1254 | mq->policy.emit_config_values = mq_emit_config_values; | |
1255 | mq->policy.set_config_value = mq_set_config_value; | |
1256 | } | |
1257 | ||
1258 | static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, | |
1259 | sector_t origin_size, | |
1260 | sector_t cache_block_size) | |
1261 | { | |
f2836352 JT |
1262 | struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); |
1263 | ||
1264 | if (!mq) | |
1265 | return NULL; | |
1266 | ||
1267 | init_policy_functions(mq); | |
1268 | iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); | |
f2836352 | 1269 | mq->cache_size = cache_size; |
633618e3 JT |
1270 | |
1271 | if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) { | |
1272 | DMERR("couldn't initialize pool of pre-cache entries"); | |
1273 | goto bad_pre_cache_init; | |
1274 | } | |
1275 | ||
1276 | if (epool_init(&mq->cache_pool, from_cblock(cache_size))) { | |
1277 | DMERR("couldn't initialize pool of cache entries"); | |
1278 | goto bad_cache_init; | |
1279 | } | |
1280 | ||
f2836352 JT |
1281 | mq->tick_protected = 0; |
1282 | mq->tick = 0; | |
1283 | mq->hit_count = 0; | |
1284 | mq->generation = 0; | |
78e03d69 JT |
1285 | mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT; |
1286 | mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT; | |
1287 | mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT; | |
f2836352 JT |
1288 | mutex_init(&mq->lock); |
1289 | spin_lock_init(&mq->tick_lock); | |
f2836352 JT |
1290 | |
1291 | queue_init(&mq->pre_cache); | |
01911c19 JT |
1292 | queue_init(&mq->cache_clean); |
1293 | queue_init(&mq->cache_dirty); | |
1294 | ||
f2836352 JT |
1295 | mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); |
1296 | ||
f2836352 JT |
1297 | mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); |
1298 | mq->hash_bits = ffs(mq->nr_buckets) - 1; | |
14f398ca | 1299 | mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); |
f2836352 JT |
1300 | if (!mq->table) |
1301 | goto bad_alloc_table; | |
1302 | ||
f2836352 JT |
1303 | return &mq->policy; |
1304 | ||
f2836352 | 1305 | bad_alloc_table: |
633618e3 JT |
1306 | epool_exit(&mq->cache_pool); |
1307 | bad_cache_init: | |
1308 | epool_exit(&mq->pre_cache_pool); | |
1309 | bad_pre_cache_init: | |
f2836352 JT |
1310 | kfree(mq); |
1311 | ||
1312 | return NULL; | |
1313 | } | |
1314 | ||
1315 | /*----------------------------------------------------------------*/ | |
1316 | ||
1317 | static struct dm_cache_policy_type mq_policy_type = { | |
1318 | .name = "mq", | |
f1afb36a | 1319 | .version = {1, 3, 0}, |
f2836352 JT |
1320 | .hint_size = 4, |
1321 | .owner = THIS_MODULE, | |
1322 | .create = mq_create | |
1323 | }; | |
1324 | ||
1325 | static struct dm_cache_policy_type default_policy_type = { | |
1326 | .name = "default", | |
f1afb36a | 1327 | .version = {1, 3, 0}, |
f2836352 JT |
1328 | .hint_size = 4, |
1329 | .owner = THIS_MODULE, | |
2e68c4e6 MS |
1330 | .create = mq_create, |
1331 | .real = &mq_policy_type | |
f2836352 JT |
1332 | }; |
1333 | ||
1334 | static int __init mq_init(void) | |
1335 | { | |
1336 | int r; | |
1337 | ||
1338 | mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry", | |
1339 | sizeof(struct entry), | |
1340 | __alignof__(struct entry), | |
1341 | 0, NULL); | |
1342 | if (!mq_entry_cache) | |
1343 | goto bad; | |
1344 | ||
1345 | r = dm_cache_policy_register(&mq_policy_type); | |
1346 | if (r) { | |
1347 | DMERR("register failed %d", r); | |
1348 | goto bad_register_mq; | |
1349 | } | |
1350 | ||
1351 | r = dm_cache_policy_register(&default_policy_type); | |
1352 | if (!r) { | |
4e7f506f MS |
1353 | DMINFO("version %u.%u.%u loaded", |
1354 | mq_policy_type.version[0], | |
1355 | mq_policy_type.version[1], | |
1356 | mq_policy_type.version[2]); | |
f2836352 JT |
1357 | return 0; |
1358 | } | |
1359 | ||
1360 | DMERR("register failed (as default) %d", r); | |
1361 | ||
1362 | dm_cache_policy_unregister(&mq_policy_type); | |
1363 | bad_register_mq: | |
1364 | kmem_cache_destroy(mq_entry_cache); | |
1365 | bad: | |
1366 | return -ENOMEM; | |
1367 | } | |
1368 | ||
1369 | static void __exit mq_exit(void) | |
1370 | { | |
1371 | dm_cache_policy_unregister(&mq_policy_type); | |
1372 | dm_cache_policy_unregister(&default_policy_type); | |
1373 | ||
1374 | kmem_cache_destroy(mq_entry_cache); | |
1375 | } | |
1376 | ||
1377 | module_init(mq_init); | |
1378 | module_exit(mq_exit); | |
1379 | ||
1380 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | |
1381 | MODULE_LICENSE("GPL"); | |
1382 | MODULE_DESCRIPTION("mq cache policy"); | |
1383 | ||
1384 | MODULE_ALIAS("dm-cache-default"); |