1994cab6edf196708f679e33b11157cf8122c214
[deliverable/linux.git] / drivers / staging / zcache / zcache-main.c
1 /*
2 * zcache.c
3 *
4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
6 *
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
9 * lzo1x compression to improve density and an embedded allocator called
10 * "zbud" which "buddies" two compressed pages semi-optimally in each physical
11 * pageframe. Zbud is integrally tied into tmem to allow pageframes to
12 * be "reclaimed" efficiently.
13 */
14
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/highmem.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/atomic.h>
24 #include <linux/math64.h>
25 #include <linux/crypto.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/pagemap.h>
29 #include <linux/writeback.h>
30
31 #include <linux/cleancache.h>
32 #include <linux/frontswap.h>
33 #include "tmem.h"
34 #include "zcache.h"
35 #include "zbud.h"
36 #include "ramster.h"
37 #include "debug.h"
38 #ifdef CONFIG_RAMSTER
39 static bool ramster_enabled __read_mostly;
40 #else
41 #define ramster_enabled false
42 #endif
43
44 #ifndef __PG_WAS_ACTIVE
45 static inline bool PageWasActive(struct page *page)
46 {
47 return true;
48 }
49
50 static inline void SetPageWasActive(struct page *page)
51 {
52 }
53 #endif
54
55 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
56 static bool frontswap_has_exclusive_gets __read_mostly = true;
57 #else
58 static bool frontswap_has_exclusive_gets __read_mostly;
59 static inline void frontswap_tmem_exclusive_gets(bool b)
60 {
61 }
62 #endif
63
64 /*
65 * mark pampd to special value in order that later
66 * retrieve will identify zero-filled pages
67 */
68 #define ZERO_FILLED 0x2
69
70 /* enable (or fix code) when Seth's patches are accepted upstream */
71 #define zcache_writeback_enabled 0
72
73 static bool zcache_enabled __read_mostly;
74 static bool disable_cleancache __read_mostly;
75 static bool disable_frontswap __read_mostly;
76 static bool disable_frontswap_ignore_nonactive __read_mostly;
77 static bool disable_cleancache_ignore_nonactive __read_mostly;
78 static char *namestr __read_mostly = "zcache";
79
80 #define ZCACHE_GFP_MASK \
81 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
82
83 /* crypto API for zcache */
84 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
85 static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
86 static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
87
88 enum comp_op {
89 ZCACHE_COMPOP_COMPRESS,
90 ZCACHE_COMPOP_DECOMPRESS
91 };
92
93 static inline int zcache_comp_op(enum comp_op op,
94 const u8 *src, unsigned int slen,
95 u8 *dst, unsigned int *dlen)
96 {
97 struct crypto_comp *tfm;
98 int ret = -1;
99
100 BUG_ON(!zcache_comp_pcpu_tfms);
101 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
102 BUG_ON(!tfm);
103 switch (op) {
104 case ZCACHE_COMPOP_COMPRESS:
105 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
106 break;
107 case ZCACHE_COMPOP_DECOMPRESS:
108 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
109 break;
110 default:
111 ret = -EINVAL;
112 }
113 put_cpu();
114 return ret;
115 }
116
117 /*
118 * policy parameters
119 */
120
121 /*
122 * byte count defining poor compression; pages with greater zsize will be
123 * rejected
124 */
125 static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
126 /*
127 * byte count defining poor *mean* compression; pages with greater zsize
128 * will be rejected until sufficient better-compressed pages are accepted
129 * driving the mean below this threshold
130 */
131 static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
132
133 /*
134 * for now, used named slabs so can easily track usage; later can
135 * either just use kmalloc, or perhaps add a slab-like allocator
136 * to more carefully manage total memory utilization
137 */
138 static struct kmem_cache *zcache_objnode_cache;
139 static struct kmem_cache *zcache_obj_cache;
140
141 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
142
143 /* Used by debug.c */
144 ssize_t zcache_pers_zpages;
145 u64 zcache_pers_zbytes;
146 ssize_t zcache_eph_pageframes;
147 ssize_t zcache_pers_pageframes;
148
149 /* Used by this code. */
150 ssize_t zcache_last_active_file_pageframes;
151 ssize_t zcache_last_inactive_file_pageframes;
152 ssize_t zcache_last_active_anon_pageframes;
153 ssize_t zcache_last_inactive_anon_pageframes;
154 #ifdef CONFIG_ZCACHE_WRITEBACK
155 ssize_t zcache_writtenback_pages;
156 ssize_t zcache_outstanding_writeback_pages;
157 #endif
158 /*
159 * zcache core code starts here
160 */
161
162 static struct zcache_client zcache_host;
163 static struct zcache_client zcache_clients[MAX_CLIENTS];
164
165 static inline bool is_local_client(struct zcache_client *cli)
166 {
167 return cli == &zcache_host;
168 }
169
170 static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
171 {
172 struct zcache_client *cli = &zcache_host;
173
174 if (cli_id != LOCAL_CLIENT) {
175 if (cli_id >= MAX_CLIENTS)
176 goto out;
177 cli = &zcache_clients[cli_id];
178 }
179 out:
180 return cli;
181 }
182
183 /*
184 * Tmem operations assume the poolid implies the invoking client.
185 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
186 * RAMster has each client numbered by cluster node, and a KVM version
187 * of zcache would have one client per guest and each client might
188 * have a poolid==N.
189 */
190 struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
191 {
192 struct tmem_pool *pool = NULL;
193 struct zcache_client *cli = NULL;
194
195 cli = zcache_get_client_by_id(cli_id);
196 if (cli == NULL)
197 goto out;
198 if (!is_local_client(cli))
199 atomic_inc(&cli->refcount);
200 if (poolid < MAX_POOLS_PER_CLIENT) {
201 pool = cli->tmem_pools[poolid];
202 if (pool != NULL)
203 atomic_inc(&pool->refcount);
204 }
205 out:
206 return pool;
207 }
208
209 void zcache_put_pool(struct tmem_pool *pool)
210 {
211 struct zcache_client *cli = NULL;
212
213 if (pool == NULL)
214 BUG();
215 cli = pool->client;
216 atomic_dec(&pool->refcount);
217 if (!is_local_client(cli))
218 atomic_dec(&cli->refcount);
219 }
220
221 int zcache_new_client(uint16_t cli_id)
222 {
223 struct zcache_client *cli;
224 int ret = -1;
225
226 cli = zcache_get_client_by_id(cli_id);
227 if (cli == NULL)
228 goto out;
229 if (cli->allocated)
230 goto out;
231 cli->allocated = 1;
232 ret = 0;
233 out:
234 return ret;
235 }
236
237 /*
238 * zcache implementation for tmem host ops
239 */
240
241 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
242 {
243 struct tmem_objnode *objnode = NULL;
244 struct zcache_preload *kp;
245 int i;
246
247 kp = &__get_cpu_var(zcache_preloads);
248 for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
249 objnode = kp->objnodes[i];
250 if (objnode != NULL) {
251 kp->objnodes[i] = NULL;
252 break;
253 }
254 }
255 BUG_ON(objnode == NULL);
256 inc_zcache_objnode_count();
257 return objnode;
258 }
259
260 static void zcache_objnode_free(struct tmem_objnode *objnode,
261 struct tmem_pool *pool)
262 {
263 dec_zcache_objnode_count();
264 kmem_cache_free(zcache_objnode_cache, objnode);
265 }
266
267 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
268 {
269 struct tmem_obj *obj = NULL;
270 struct zcache_preload *kp;
271
272 kp = &__get_cpu_var(zcache_preloads);
273 obj = kp->obj;
274 BUG_ON(obj == NULL);
275 kp->obj = NULL;
276 inc_zcache_obj_count();
277 return obj;
278 }
279
280 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
281 {
282 dec_zcache_obj_count();
283 kmem_cache_free(zcache_obj_cache, obj);
284 }
285
286 /*
287 * Compressing zero-filled pages will waste memory and introduce
288 * serious fragmentation, skip it to avoid overhead.
289 */
290 static bool page_is_zero_filled(struct page *p)
291 {
292 unsigned int pos;
293 char *page;
294
295 page = kmap_atomic(p);
296 for (pos = 0; pos < PAGE_SIZE / sizeof(*page); pos++) {
297 if (page[pos]) {
298 kunmap_atomic(page);
299 return false;
300 }
301 }
302 kunmap_atomic(page);
303
304 return true;
305 }
306
307 static void handle_zero_filled_page(void *p)
308 {
309 void *user_mem;
310 struct page *page = (struct page *)p;
311
312 user_mem = kmap_atomic(page);
313 memset(user_mem, 0, PAGE_SIZE);
314 kunmap_atomic(user_mem);
315
316 flush_dcache_page(page);
317 }
318
319 static struct tmem_hostops zcache_hostops = {
320 .obj_alloc = zcache_obj_alloc,
321 .obj_free = zcache_obj_free,
322 .objnode_alloc = zcache_objnode_alloc,
323 .objnode_free = zcache_objnode_free,
324 };
325
326 static struct page *zcache_alloc_page(void)
327 {
328 struct page *page = alloc_page(ZCACHE_GFP_MASK);
329
330 if (page != NULL)
331 inc_zcache_pageframes_alloced();
332 return page;
333 }
334
335 static void zcache_free_page(struct page *page)
336 {
337 long curr_pageframes;
338 static long max_pageframes, min_pageframes;
339
340 if (page == NULL)
341 BUG();
342 __free_page(page);
343 inc_zcache_pageframes_freed();
344 curr_pageframes = curr_pageframes_count();
345 if (curr_pageframes > max_pageframes)
346 max_pageframes = curr_pageframes;
347 if (curr_pageframes < min_pageframes)
348 min_pageframes = curr_pageframes;
349 #ifdef CONFIG_ZCACHE_DEBUG
350 if (curr_pageframes > 2L || curr_pageframes < -2L) {
351 /* pr_info here */
352 }
353 #endif
354 }
355
356 /*
357 * zcache implementations for PAM page descriptor ops
358 */
359
360 /* forward reference */
361 static void zcache_compress(struct page *from,
362 void **out_va, unsigned *out_len);
363
364 static struct page *zcache_evict_eph_pageframe(void);
365
366 static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
367 struct tmem_handle *th)
368 {
369 void *pampd = NULL, *cdata = data;
370 unsigned clen = size;
371 bool zero_filled = false;
372 struct page *page = (struct page *)(data), *newpage;
373
374 if (page_is_zero_filled(page)) {
375 clen = 0;
376 zero_filled = true;
377 goto got_pampd;
378 }
379
380 if (!raw) {
381 zcache_compress(page, &cdata, &clen);
382 if (clen > zbud_max_buddy_size()) {
383 inc_zcache_compress_poor();
384 goto out;
385 }
386 } else {
387 BUG_ON(clen > zbud_max_buddy_size());
388 }
389
390 /* look for space via an existing match first */
391 pampd = (void *)zbud_match_prep(th, true, cdata, clen);
392 if (pampd != NULL)
393 goto got_pampd;
394
395 /* no match, now we need to find (or free up) a full page */
396 newpage = zcache_alloc_page();
397 if (newpage != NULL)
398 goto create_in_new_page;
399
400 inc_zcache_failed_getfreepages();
401 /* can't allocate a page, evict an ephemeral page via LRU */
402 newpage = zcache_evict_eph_pageframe();
403 if (newpage == NULL) {
404 inc_zcache_eph_ate_tail_failed();
405 goto out;
406 }
407 inc_zcache_eph_ate_tail();
408
409 create_in_new_page:
410 pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
411 BUG_ON(pampd == NULL);
412 inc_zcache_eph_pageframes();
413
414 got_pampd:
415 inc_zcache_eph_zbytes(clen);
416 inc_zcache_eph_zpages();
417 if (ramster_enabled && raw)
418 ramster_count_foreign_pages(true, 1);
419 if (zero_filled)
420 pampd = (void *)ZERO_FILLED;
421 out:
422 return pampd;
423 }
424
425 static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
426 struct tmem_handle *th)
427 {
428 void *pampd = NULL, *cdata = data;
429 unsigned clen = size;
430 bool zero_filled = false;
431 struct page *page = (struct page *)(data), *newpage;
432 unsigned long zbud_mean_zsize;
433 unsigned long curr_pers_zpages, total_zsize;
434
435 if (data == NULL) {
436 BUG_ON(!ramster_enabled);
437 goto create_pampd;
438 }
439
440 if (page_is_zero_filled(page)) {
441 clen = 0;
442 zero_filled = true;
443 goto got_pampd;
444 }
445
446 curr_pers_zpages = zcache_pers_zpages;
447 /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
448 if (!raw)
449 zcache_compress(page, &cdata, &clen);
450 /* reject if compression is too poor */
451 if (clen > zbud_max_zsize) {
452 inc_zcache_compress_poor();
453 goto out;
454 }
455 /* reject if mean compression is too poor */
456 if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
457 total_zsize = zcache_pers_zbytes;
458 if ((long)total_zsize < 0)
459 total_zsize = 0;
460 zbud_mean_zsize = div_u64(total_zsize,
461 curr_pers_zpages);
462 if (zbud_mean_zsize > zbud_max_mean_zsize) {
463 inc_zcache_mean_compress_poor();
464 goto out;
465 }
466 }
467
468 create_pampd:
469 /* look for space via an existing match first */
470 pampd = (void *)zbud_match_prep(th, false, cdata, clen);
471 if (pampd != NULL)
472 goto got_pampd;
473
474 /* no match, now we need to find (or free up) a full page */
475 newpage = zcache_alloc_page();
476 if (newpage != NULL)
477 goto create_in_new_page;
478 /*
479 * FIXME do the following only if eph is oversized?
480 * if (zcache_eph_pageframes >
481 * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
482 * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
483 */
484 inc_zcache_failed_getfreepages();
485 /* can't allocate a page, evict an ephemeral page via LRU */
486 newpage = zcache_evict_eph_pageframe();
487 if (newpage == NULL) {
488 inc_zcache_pers_ate_eph_failed();
489 goto out;
490 }
491 inc_zcache_pers_ate_eph();
492
493 create_in_new_page:
494 pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
495 BUG_ON(pampd == NULL);
496 inc_zcache_pers_pageframes();
497
498 got_pampd:
499 inc_zcache_pers_zpages();
500 inc_zcache_pers_zbytes(clen);
501 if (ramster_enabled && raw)
502 ramster_count_foreign_pages(false, 1);
503 if (zero_filled)
504 pampd = (void *)ZERO_FILLED;
505 out:
506 return pampd;
507 }
508
509 /*
510 * This is called directly from zcache_put_page to pre-allocate space
511 * to store a zpage.
512 */
513 void *zcache_pampd_create(char *data, unsigned int size, bool raw,
514 int eph, struct tmem_handle *th)
515 {
516 void *pampd = NULL;
517 struct zcache_preload *kp;
518 struct tmem_objnode *objnode;
519 struct tmem_obj *obj;
520 int i;
521
522 BUG_ON(!irqs_disabled());
523 /* pre-allocate per-cpu metadata */
524 BUG_ON(zcache_objnode_cache == NULL);
525 BUG_ON(zcache_obj_cache == NULL);
526 kp = &__get_cpu_var(zcache_preloads);
527 for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
528 objnode = kp->objnodes[i];
529 if (objnode == NULL) {
530 objnode = kmem_cache_alloc(zcache_objnode_cache,
531 ZCACHE_GFP_MASK);
532 if (unlikely(objnode == NULL)) {
533 inc_zcache_failed_alloc();
534 goto out;
535 }
536 kp->objnodes[i] = objnode;
537 }
538 }
539 if (kp->obj == NULL) {
540 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
541 kp->obj = obj;
542 }
543 if (unlikely(kp->obj == NULL)) {
544 inc_zcache_failed_alloc();
545 goto out;
546 }
547 /*
548 * ok, have all the metadata pre-allocated, now do the data
549 * but since how we allocate the data is dependent on ephemeral
550 * or persistent, we split the call here to different sub-functions
551 */
552 if (eph)
553 pampd = zcache_pampd_eph_create(data, size, raw, th);
554 else
555 pampd = zcache_pampd_pers_create(data, size, raw, th);
556 out:
557 return pampd;
558 }
559
560 /*
561 * This is a pamops called via tmem_put and is necessary to "finish"
562 * a pampd creation.
563 */
564 void zcache_pampd_create_finish(void *pampd, bool eph)
565 {
566 if (pampd != (void *)ZERO_FILLED)
567 zbud_create_finish((struct zbudref *)pampd, eph);
568 }
569
570 /*
571 * This is passed as a function parameter to zbud_decompress so that
572 * zbud need not be familiar with the details of crypto. It assumes that
573 * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
574 * kmapped. It must be successful, else there is a logic bug somewhere.
575 */
576 static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
577 {
578 int ret;
579 unsigned int outlen = PAGE_SIZE;
580
581 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
582 to_va, &outlen);
583 BUG_ON(ret);
584 BUG_ON(outlen != PAGE_SIZE);
585 }
586
587 /*
588 * Decompress from the kernel va to a pageframe
589 */
590 void zcache_decompress_to_page(char *from_va, unsigned int size,
591 struct page *to_page)
592 {
593 char *to_va = kmap_atomic(to_page);
594 zcache_decompress(from_va, size, to_va);
595 kunmap_atomic(to_va);
596 }
597
598 /*
599 * fill the pageframe corresponding to the struct page with the data
600 * from the passed pampd
601 */
602 static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
603 void *pampd, struct tmem_pool *pool,
604 struct tmem_oid *oid, uint32_t index)
605 {
606 int ret;
607 bool eph = !is_persistent(pool);
608
609 BUG_ON(preemptible());
610 BUG_ON(eph); /* fix later if shared pools get implemented */
611 BUG_ON(pampd_is_remote(pampd));
612
613 if (pampd == (void *)ZERO_FILLED) {
614 handle_zero_filled_page(data);
615 if (!raw)
616 *sizep = PAGE_SIZE;
617 return 0;
618 }
619
620 if (raw)
621 ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
622 sizep, eph);
623 else {
624 ret = zbud_decompress((struct page *)(data),
625 (struct zbudref *)pampd, false,
626 zcache_decompress);
627 *sizep = PAGE_SIZE;
628 }
629 return ret;
630 }
631
632 /*
633 * fill the pageframe corresponding to the struct page with the data
634 * from the passed pampd
635 */
636 static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
637 void *pampd, struct tmem_pool *pool,
638 struct tmem_oid *oid, uint32_t index)
639 {
640 int ret = 0;
641 bool eph = !is_persistent(pool), zero_filled = false;
642 struct page *page = NULL;
643 unsigned int zsize, zpages;
644
645 BUG_ON(preemptible());
646 BUG_ON(pampd_is_remote(pampd));
647
648 if (pampd == (void *)ZERO_FILLED) {
649 handle_zero_filled_page(data);
650 zero_filled = true;
651 zsize = 0;
652 zpages = 1;
653 if (!raw)
654 *sizep = PAGE_SIZE;
655 goto zero_fill;
656 }
657
658 if (raw)
659 ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
660 sizep, eph);
661 else {
662 ret = zbud_decompress((struct page *)(data),
663 (struct zbudref *)pampd, eph,
664 zcache_decompress);
665 *sizep = PAGE_SIZE;
666 }
667 page = zbud_free_and_delist((struct zbudref *)pampd, eph,
668 &zsize, &zpages);
669 zero_fill:
670 if (eph) {
671 if (page)
672 dec_zcache_eph_pageframes();
673 dec_zcache_eph_zpages(zpages);
674 dec_zcache_eph_zbytes(zsize);
675 } else {
676 if (page)
677 dec_zcache_pers_pageframes();
678 dec_zcache_pers_zpages(zpages);
679 dec_zcache_pers_zbytes(zsize);
680 }
681 if (!is_local_client(pool->client))
682 ramster_count_foreign_pages(eph, -1);
683 if (page && !zero_filled)
684 zcache_free_page(page);
685 return ret;
686 }
687
688 /*
689 * free the pampd and remove it from any zcache lists
690 * pampd must no longer be pointed to from any tmem data structures!
691 */
692 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
693 struct tmem_oid *oid, uint32_t index, bool acct)
694 {
695 struct page *page = NULL;
696 unsigned int zsize, zpages;
697 bool zero_filled = false;
698
699 BUG_ON(preemptible());
700
701 if (pampd == (void *)ZERO_FILLED) {
702 zero_filled = true;
703 zsize = 0;
704 zpages = 1;
705 }
706
707 if (pampd_is_remote(pampd) && !zero_filled) {
708 BUG_ON(!ramster_enabled);
709 pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
710 if (pampd == NULL)
711 return;
712 }
713 if (is_ephemeral(pool)) {
714 if (!zero_filled)
715 page = zbud_free_and_delist((struct zbudref *)pampd,
716 true, &zsize, &zpages);
717 if (page)
718 dec_zcache_eph_pageframes();
719 dec_zcache_eph_zpages(zpages);
720 dec_zcache_eph_zbytes(zsize);
721 /* FIXME CONFIG_RAMSTER... check acct parameter? */
722 } else {
723 if (!zero_filled)
724 page = zbud_free_and_delist((struct zbudref *)pampd,
725 false, &zsize, &zpages);
726 if (page)
727 dec_zcache_pers_pageframes();
728 dec_zcache_pers_zpages(zpages);
729 dec_zcache_pers_zbytes(zsize);
730 }
731 if (!is_local_client(pool->client))
732 ramster_count_foreign_pages(is_ephemeral(pool), -1);
733 if (page && !zero_filled)
734 zcache_free_page(page);
735 }
736
737 static struct tmem_pamops zcache_pamops = {
738 .create_finish = zcache_pampd_create_finish,
739 .get_data = zcache_pampd_get_data,
740 .get_data_and_free = zcache_pampd_get_data_and_free,
741 .free = zcache_pampd_free,
742 };
743
744 /*
745 * zcache compression/decompression and related per-cpu stuff
746 */
747
748 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
749 #define ZCACHE_DSTMEM_ORDER 1
750
751 static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
752 {
753 int ret;
754 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
755 char *from_va;
756
757 BUG_ON(!irqs_disabled());
758 /* no buffer or no compressor so can't compress */
759 BUG_ON(dmem == NULL);
760 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
761 from_va = kmap_atomic(from);
762 mb();
763 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
764 out_len);
765 BUG_ON(ret);
766 *out_va = dmem;
767 kunmap_atomic(from_va);
768 }
769
770 static int zcache_comp_cpu_up(int cpu)
771 {
772 struct crypto_comp *tfm;
773
774 tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
775 if (IS_ERR(tfm))
776 return NOTIFY_BAD;
777 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
778 return NOTIFY_OK;
779 }
780
781 static void zcache_comp_cpu_down(int cpu)
782 {
783 struct crypto_comp *tfm;
784
785 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
786 crypto_free_comp(tfm);
787 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
788 }
789
790 static int zcache_cpu_notifier(struct notifier_block *nb,
791 unsigned long action, void *pcpu)
792 {
793 int ret, i, cpu = (long)pcpu;
794 struct zcache_preload *kp;
795
796 switch (action) {
797 case CPU_UP_PREPARE:
798 ret = zcache_comp_cpu_up(cpu);
799 if (ret != NOTIFY_OK) {
800 pr_err("%s: can't allocate compressor xform\n",
801 namestr);
802 return ret;
803 }
804 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
805 GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
806 if (ramster_enabled)
807 ramster_cpu_up(cpu);
808 break;
809 case CPU_DEAD:
810 case CPU_UP_CANCELED:
811 zcache_comp_cpu_down(cpu);
812 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
813 ZCACHE_DSTMEM_ORDER);
814 per_cpu(zcache_dstmem, cpu) = NULL;
815 kp = &per_cpu(zcache_preloads, cpu);
816 for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
817 if (kp->objnodes[i])
818 kmem_cache_free(zcache_objnode_cache,
819 kp->objnodes[i]);
820 }
821 if (kp->obj) {
822 kmem_cache_free(zcache_obj_cache, kp->obj);
823 kp->obj = NULL;
824 }
825 if (ramster_enabled)
826 ramster_cpu_down(cpu);
827 break;
828 default:
829 break;
830 }
831 return NOTIFY_OK;
832 }
833
834 static struct notifier_block zcache_cpu_notifier_block = {
835 .notifier_call = zcache_cpu_notifier
836 };
837
838 /*
839 * The following code interacts with the zbud eviction and zbud
840 * zombify code to access LRU pages
841 */
842
843 static struct page *zcache_evict_eph_pageframe(void)
844 {
845 struct page *page;
846 unsigned int zsize = 0, zpages = 0;
847
848 page = zbud_evict_pageframe_lru(&zsize, &zpages);
849 if (page == NULL)
850 goto out;
851 dec_zcache_eph_zbytes(zsize);
852 dec_zcache_eph_zpages(zpages);
853 inc_zcache_evicted_eph_zpages(zpages);
854 dec_zcache_eph_pageframes();
855 inc_zcache_evicted_eph_pageframes();
856 out:
857 return page;
858 }
859
860 #ifdef CONFIG_ZCACHE_WRITEBACK
861
862 static atomic_t zcache_outstanding_writeback_pages_atomic = ATOMIC_INIT(0);
863
864 static inline void inc_zcache_outstanding_writeback_pages(void)
865 {
866 zcache_outstanding_writeback_pages =
867 atomic_inc_return(&zcache_outstanding_writeback_pages_atomic);
868 }
869 static inline void dec_zcache_outstanding_writeback_pages(void)
870 {
871 zcache_outstanding_writeback_pages =
872 atomic_dec_return(&zcache_outstanding_writeback_pages_atomic);
873 };
874 static void unswiz(struct tmem_oid oid, u32 index,
875 unsigned *type, pgoff_t *offset);
876
877 /*
878 * Choose an LRU persistent pageframe and attempt to write it back to
879 * the backing swap disk by calling frontswap_writeback on both zpages.
880 *
881 * This is work-in-progress.
882 */
883
884 static void zcache_end_swap_write(struct bio *bio, int err)
885 {
886 end_swap_bio_write(bio, err);
887 dec_zcache_outstanding_writeback_pages();
888 zcache_writtenback_pages++;
889 }
890
891 /*
892 * zcache_get_swap_cache_page
893 *
894 * This is an adaption of read_swap_cache_async()
895 *
896 * If success, page is returned in retpage
897 * Returns 0 if page was already in the swap cache, page is not locked
898 * Returns 1 if the new page needs to be populated, page is locked
899 */
900 static int zcache_get_swap_cache_page(int type, pgoff_t offset,
901 struct page *new_page)
902 {
903 struct page *found_page;
904 swp_entry_t entry = swp_entry(type, offset);
905 int err;
906
907 BUG_ON(new_page == NULL);
908 do {
909 /*
910 * First check the swap cache. Since this is normally
911 * called after lookup_swap_cache() failed, re-calling
912 * that would confuse statistics.
913 */
914 found_page = find_get_page(&swapper_space, entry.val);
915 if (found_page)
916 return 0;
917
918 /*
919 * call radix_tree_preload() while we can wait.
920 */
921 err = radix_tree_preload(GFP_KERNEL);
922 if (err)
923 break;
924
925 /*
926 * Swap entry may have been freed since our caller observed it.
927 */
928 err = swapcache_prepare(entry);
929 if (err == -EEXIST) { /* seems racy */
930 radix_tree_preload_end();
931 continue;
932 }
933 if (err) { /* swp entry is obsolete ? */
934 radix_tree_preload_end();
935 break;
936 }
937
938 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
939 __set_page_locked(new_page);
940 SetPageSwapBacked(new_page);
941 err = __add_to_swap_cache(new_page, entry);
942 if (likely(!err)) {
943 radix_tree_preload_end();
944 lru_cache_add_anon(new_page);
945 return 1;
946 }
947 radix_tree_preload_end();
948 ClearPageSwapBacked(new_page);
949 __clear_page_locked(new_page);
950 /*
951 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
952 * clear SWAP_HAS_CACHE flag.
953 */
954 swapcache_free(entry, NULL);
955 /* FIXME: is it possible to get here without err==-ENOMEM?
956 * If not, we can dispense with the do loop, use goto retry */
957 } while (err != -ENOMEM);
958
959 return -ENOMEM;
960 }
961
962 /*
963 * Given a frontswap zpage in zcache (identified by type/offset) and
964 * an empty page, put the page into the swap cache, use frontswap
965 * to get the page from zcache into the empty page, then give it
966 * to the swap subsystem to send to disk (carefully avoiding the
967 * possibility that frontswap might snatch it back).
968 * Returns < 0 if error, 0 if successful, and 1 if successful but
969 * the newpage passed in not needed and should be freed.
970 */
971 static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
972 struct page *newpage)
973 {
974 struct page *page = newpage;
975 int ret;
976 struct writeback_control wbc = {
977 .sync_mode = WB_SYNC_NONE,
978 };
979
980 ret = zcache_get_swap_cache_page(type, offset, page);
981 if (ret < 0)
982 return ret;
983 else if (ret == 0) {
984 /* more uptodate page is already in swapcache */
985 __frontswap_invalidate_page(type, offset);
986 return 1;
987 }
988
989 BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
990 /* FIXME: how is it possible to get here when page is unlocked? */
991 __frontswap_load(page);
992 SetPageUptodate(page); /* above does SetPageDirty, is that enough? */
993
994 /* start writeback */
995 SetPageReclaim(page);
996 /*
997 * Return value is ignored here because it doesn't change anything
998 * for us. Page is returned unlocked.
999 */
1000 (void)__swap_writepage(page, &wbc, zcache_end_swap_write);
1001 page_cache_release(page);
1002 inc_zcache_outstanding_writeback_pages();
1003
1004 return 0;
1005 }
1006
1007 /*
1008 * The following is still a magic number... we want to allow forward progress
1009 * for writeback because it clears out needed RAM when under pressure, but
1010 * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
1011 * pages if the swap device is very slow.
1012 */
1013 #define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
1014
1015 /*
1016 * Try to allocate two free pages, first using a non-aggressive alloc,
1017 * then by evicting zcache ephemeral (clean pagecache) pages, and last
1018 * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
1019 * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
1020 * function above for each.
1021 */
1022 static int zcache_frontswap_writeback(void)
1023 {
1024 struct tmem_handle th[2];
1025 int ret = 0;
1026 int nzbuds, writeback_ret;
1027 unsigned type;
1028 struct page *znewpage1 = NULL, *znewpage2 = NULL;
1029 struct page *evictpage1 = NULL, *evictpage2 = NULL;
1030 struct page *newpage1 = NULL, *newpage2 = NULL;
1031 struct page *page1 = NULL, *page2 = NULL;
1032 pgoff_t offset;
1033
1034 znewpage1 = alloc_page(ZCACHE_GFP_MASK);
1035 znewpage2 = alloc_page(ZCACHE_GFP_MASK);
1036 if (znewpage1 == NULL)
1037 evictpage1 = zcache_evict_eph_pageframe();
1038 if (znewpage2 == NULL)
1039 evictpage2 = zcache_evict_eph_pageframe();
1040
1041 if ((evictpage1 == NULL || evictpage2 == NULL) &&
1042 atomic_read(&zcache_outstanding_writeback_pages_atomic) >
1043 ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES) {
1044 goto free_and_out;
1045 }
1046 if (znewpage1 == NULL && evictpage1 == NULL)
1047 newpage1 = alloc_page(GFP_KERNEL);
1048 if (znewpage2 == NULL && evictpage2 == NULL)
1049 newpage2 = alloc_page(GFP_KERNEL);
1050 if (newpage1 == NULL || newpage2 == NULL)
1051 goto free_and_out;
1052
1053 /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
1054 nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
1055 if (nzbuds == 0) {
1056 ret = -ENOENT;
1057 goto free_and_out;
1058 }
1059
1060 /* process the first zbud */
1061 unswiz(th[0].oid, th[0].index, &type, &offset);
1062 page1 = (znewpage1 != NULL) ? znewpage1 :
1063 ((newpage1 != NULL) ? newpage1 : evictpage1);
1064 writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page1);
1065 if (writeback_ret < 0) {
1066 ret = -ENOMEM;
1067 goto free_and_out;
1068 }
1069 if (evictpage1 != NULL)
1070 zcache_pageframes_freed =
1071 atomic_inc_return(&zcache_pageframes_freed_atomic);
1072 if (writeback_ret == 0) {
1073 /* zcache_get_swap_cache_page will free, don't double free */
1074 znewpage1 = NULL;
1075 newpage1 = NULL;
1076 evictpage1 = NULL;
1077 }
1078 if (nzbuds < 2)
1079 goto free_and_out;
1080
1081 /* if there is a second zbud, process it */
1082 unswiz(th[1].oid, th[1].index, &type, &offset);
1083 page2 = (znewpage2 != NULL) ? znewpage2 :
1084 ((newpage2 != NULL) ? newpage2 : evictpage2);
1085 writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page2);
1086 if (writeback_ret < 0) {
1087 ret = -ENOMEM;
1088 goto free_and_out;
1089 }
1090 if (evictpage2 != NULL)
1091 zcache_pageframes_freed =
1092 atomic_inc_return(&zcache_pageframes_freed_atomic);
1093 if (writeback_ret == 0) {
1094 znewpage2 = NULL;
1095 newpage2 = NULL;
1096 evictpage2 = NULL;
1097 }
1098
1099 free_and_out:
1100 if (znewpage1 != NULL)
1101 page_cache_release(znewpage1);
1102 if (znewpage2 != NULL)
1103 page_cache_release(znewpage2);
1104 if (newpage1 != NULL)
1105 page_cache_release(newpage1);
1106 if (newpage2 != NULL)
1107 page_cache_release(newpage2);
1108 if (evictpage1 != NULL)
1109 zcache_free_page(evictpage1);
1110 if (evictpage2 != NULL)
1111 zcache_free_page(evictpage2);
1112 return ret;
1113 }
1114 #endif /* CONFIG_ZCACHE_WRITEBACK */
1115
1116 /*
1117 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1118 * but all puts (and thus all other operations that require memory allocation)
1119 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1120 * data consistency requires all puts while frozen to be converted into
1121 * flushes.
1122 */
1123 static bool zcache_freeze;
1124
1125 /*
1126 * This zcache shrinker interface reduces the number of ephemeral pageframes
1127 * used by zcache to approximately the same as the total number of LRU_FILE
1128 * pageframes in use, and now also reduces the number of persistent pageframes
1129 * used by zcache to approximately the same as the total number of LRU_ANON
1130 * pageframes in use. FIXME POLICY: Probably the writeback should only occur
1131 * if the eviction doesn't free enough pages.
1132 */
1133 static int shrink_zcache_memory(struct shrinker *shrink,
1134 struct shrink_control *sc)
1135 {
1136 static bool in_progress;
1137 int ret = -1;
1138 int nr = sc->nr_to_scan;
1139 int nr_evict = 0;
1140 int nr_writeback = 0;
1141 struct page *page;
1142 int file_pageframes_inuse, anon_pageframes_inuse;
1143
1144 if (nr <= 0)
1145 goto skip_evict;
1146
1147 /* don't allow more than one eviction thread at a time */
1148 if (in_progress)
1149 goto skip_evict;
1150
1151 in_progress = true;
1152
1153 /* we are going to ignore nr, and target a different value */
1154 zcache_last_active_file_pageframes =
1155 global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
1156 zcache_last_inactive_file_pageframes =
1157 global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
1158 file_pageframes_inuse = zcache_last_active_file_pageframes +
1159 zcache_last_inactive_file_pageframes;
1160 if (zcache_eph_pageframes > file_pageframes_inuse)
1161 nr_evict = zcache_eph_pageframes - file_pageframes_inuse;
1162 else
1163 nr_evict = 0;
1164 while (nr_evict-- > 0) {
1165 page = zcache_evict_eph_pageframe();
1166 if (page == NULL)
1167 break;
1168 zcache_free_page(page);
1169 }
1170
1171 zcache_last_active_anon_pageframes =
1172 global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
1173 zcache_last_inactive_anon_pageframes =
1174 global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
1175 anon_pageframes_inuse = zcache_last_active_anon_pageframes +
1176 zcache_last_inactive_anon_pageframes;
1177 if (zcache_pers_pageframes > anon_pageframes_inuse)
1178 nr_writeback = zcache_pers_pageframes - anon_pageframes_inuse;
1179 else
1180 nr_writeback = 0;
1181 while (nr_writeback-- > 0) {
1182 #ifdef CONFIG_ZCACHE_WRITEBACK
1183 int writeback_ret;
1184 writeback_ret = zcache_frontswap_writeback();
1185 if (writeback_ret == -ENOMEM)
1186 #endif
1187 break;
1188 }
1189 in_progress = false;
1190
1191 skip_evict:
1192 /* resample: has changed, but maybe not all the way yet */
1193 zcache_last_active_file_pageframes =
1194 global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
1195 zcache_last_inactive_file_pageframes =
1196 global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
1197 ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
1198 zcache_last_inactive_file_pageframes;
1199 if (ret < 0)
1200 ret = 0;
1201 return ret;
1202 }
1203
1204 static struct shrinker zcache_shrinker = {
1205 .shrink = shrink_zcache_memory,
1206 .seeks = DEFAULT_SEEKS,
1207 };
1208
1209 /*
1210 * zcache shims between cleancache/frontswap ops and tmem
1211 */
1212
1213 /* FIXME rename these core routines to zcache_tmemput etc? */
1214 int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1215 uint32_t index, void *page,
1216 unsigned int size, bool raw, int ephemeral)
1217 {
1218 struct tmem_pool *pool;
1219 struct tmem_handle th;
1220 int ret = -1;
1221 void *pampd = NULL;
1222
1223 BUG_ON(!irqs_disabled());
1224 pool = zcache_get_pool_by_id(cli_id, pool_id);
1225 if (unlikely(pool == NULL))
1226 goto out;
1227 if (!zcache_freeze) {
1228 ret = 0;
1229 th.client_id = cli_id;
1230 th.pool_id = pool_id;
1231 th.oid = *oidp;
1232 th.index = index;
1233 pampd = zcache_pampd_create((char *)page, size, raw,
1234 ephemeral, &th);
1235 if (pampd == NULL) {
1236 ret = -ENOMEM;
1237 if (ephemeral)
1238 inc_zcache_failed_eph_puts();
1239 else
1240 inc_zcache_failed_pers_puts();
1241 } else {
1242 if (ramster_enabled)
1243 ramster_do_preload_flnode(pool);
1244 ret = tmem_put(pool, oidp, index, 0, pampd);
1245 if (ret < 0)
1246 BUG();
1247 }
1248 zcache_put_pool(pool);
1249 } else {
1250 inc_zcache_put_to_flush();
1251 if (ramster_enabled)
1252 ramster_do_preload_flnode(pool);
1253 if (atomic_read(&pool->obj_count) > 0)
1254 /* the put fails whether the flush succeeds or not */
1255 (void)tmem_flush_page(pool, oidp, index);
1256 zcache_put_pool(pool);
1257 }
1258 out:
1259 return ret;
1260 }
1261
1262 int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1263 uint32_t index, void *page,
1264 size_t *sizep, bool raw, int get_and_free)
1265 {
1266 struct tmem_pool *pool;
1267 int ret = -1;
1268 bool eph;
1269
1270 if (!raw) {
1271 BUG_ON(irqs_disabled());
1272 BUG_ON(in_softirq());
1273 }
1274 pool = zcache_get_pool_by_id(cli_id, pool_id);
1275 eph = is_ephemeral(pool);
1276 if (likely(pool != NULL)) {
1277 if (atomic_read(&pool->obj_count) > 0)
1278 ret = tmem_get(pool, oidp, index, (char *)(page),
1279 sizep, raw, get_and_free);
1280 zcache_put_pool(pool);
1281 }
1282 WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
1283 "zcache_get fails on persistent pool, "
1284 "bad things are very likely to happen soon\n");
1285 #ifdef RAMSTER_TESTING
1286 if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
1287 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
1288 #endif
1289 return ret;
1290 }
1291
1292 int zcache_flush_page(int cli_id, int pool_id,
1293 struct tmem_oid *oidp, uint32_t index)
1294 {
1295 struct tmem_pool *pool;
1296 int ret = -1;
1297 unsigned long flags;
1298
1299 local_irq_save(flags);
1300 inc_zcache_flush_total();
1301 pool = zcache_get_pool_by_id(cli_id, pool_id);
1302 if (ramster_enabled)
1303 ramster_do_preload_flnode(pool);
1304 if (likely(pool != NULL)) {
1305 if (atomic_read(&pool->obj_count) > 0)
1306 ret = tmem_flush_page(pool, oidp, index);
1307 zcache_put_pool(pool);
1308 }
1309 if (ret >= 0)
1310 inc_zcache_flush_found();
1311 local_irq_restore(flags);
1312 return ret;
1313 }
1314
1315 int zcache_flush_object(int cli_id, int pool_id,
1316 struct tmem_oid *oidp)
1317 {
1318 struct tmem_pool *pool;
1319 int ret = -1;
1320 unsigned long flags;
1321
1322 local_irq_save(flags);
1323 inc_zcache_flobj_total();
1324 pool = zcache_get_pool_by_id(cli_id, pool_id);
1325 if (ramster_enabled)
1326 ramster_do_preload_flnode(pool);
1327 if (likely(pool != NULL)) {
1328 if (atomic_read(&pool->obj_count) > 0)
1329 ret = tmem_flush_object(pool, oidp);
1330 zcache_put_pool(pool);
1331 }
1332 if (ret >= 0)
1333 inc_zcache_flobj_found();
1334 local_irq_restore(flags);
1335 return ret;
1336 }
1337
1338 static int zcache_client_destroy_pool(int cli_id, int pool_id)
1339 {
1340 struct tmem_pool *pool = NULL;
1341 struct zcache_client *cli = NULL;
1342 int ret = -1;
1343
1344 if (pool_id < 0)
1345 goto out;
1346 if (cli_id == LOCAL_CLIENT)
1347 cli = &zcache_host;
1348 else if ((unsigned int)cli_id < MAX_CLIENTS)
1349 cli = &zcache_clients[cli_id];
1350 if (cli == NULL)
1351 goto out;
1352 atomic_inc(&cli->refcount);
1353 pool = cli->tmem_pools[pool_id];
1354 if (pool == NULL)
1355 goto out;
1356 cli->tmem_pools[pool_id] = NULL;
1357 /* wait for pool activity on other cpus to quiesce */
1358 while (atomic_read(&pool->refcount) != 0)
1359 ;
1360 atomic_dec(&cli->refcount);
1361 local_bh_disable();
1362 ret = tmem_destroy_pool(pool);
1363 local_bh_enable();
1364 kfree(pool);
1365 if (cli_id == LOCAL_CLIENT)
1366 pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
1367 else
1368 pr_info("%s: destroyed pool id=%d, client=%d\n",
1369 namestr, pool_id, cli_id);
1370 out:
1371 return ret;
1372 }
1373
1374 int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1375 {
1376 int poolid = -1;
1377 struct tmem_pool *pool;
1378 struct zcache_client *cli = NULL;
1379
1380 if (cli_id == LOCAL_CLIENT)
1381 cli = &zcache_host;
1382 else if ((unsigned int)cli_id < MAX_CLIENTS)
1383 cli = &zcache_clients[cli_id];
1384 if (cli == NULL)
1385 goto out;
1386 atomic_inc(&cli->refcount);
1387 pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1388 if (pool == NULL)
1389 goto out;
1390
1391 for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1392 if (cli->tmem_pools[poolid] == NULL)
1393 break;
1394 if (poolid >= MAX_POOLS_PER_CLIENT) {
1395 pr_info("%s: pool creation failed: max exceeded\n", namestr);
1396 kfree(pool);
1397 poolid = -1;
1398 goto out;
1399 }
1400 atomic_set(&pool->refcount, 0);
1401 pool->client = cli;
1402 pool->pool_id = poolid;
1403 tmem_new_pool(pool, flags);
1404 cli->tmem_pools[poolid] = pool;
1405 if (cli_id == LOCAL_CLIENT)
1406 pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
1407 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1408 poolid);
1409 else
1410 pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
1411 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1412 poolid, cli_id);
1413 out:
1414 if (cli != NULL)
1415 atomic_dec(&cli->refcount);
1416 return poolid;
1417 }
1418
1419 static int zcache_local_new_pool(uint32_t flags)
1420 {
1421 return zcache_new_pool(LOCAL_CLIENT, flags);
1422 }
1423
1424 int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
1425 {
1426 struct tmem_pool *pool;
1427 struct zcache_client *cli = NULL;
1428 uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
1429 int ret = -1;
1430
1431 BUG_ON(!ramster_enabled);
1432 if (cli_id == LOCAL_CLIENT)
1433 goto out;
1434 if (pool_id >= MAX_POOLS_PER_CLIENT)
1435 goto out;
1436 if (cli_id >= MAX_CLIENTS)
1437 goto out;
1438
1439 cli = &zcache_clients[cli_id];
1440 if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
1441 pr_err("zcache_autocreate_pool: pool type disabled\n");
1442 goto out;
1443 }
1444 if (!cli->allocated) {
1445 if (zcache_new_client(cli_id)) {
1446 pr_err("zcache_autocreate_pool: can't create client\n");
1447 goto out;
1448 }
1449 cli = &zcache_clients[cli_id];
1450 }
1451 atomic_inc(&cli->refcount);
1452 pool = cli->tmem_pools[pool_id];
1453 if (pool != NULL) {
1454 if (pool->persistent && eph) {
1455 pr_err("zcache_autocreate_pool: type mismatch\n");
1456 goto out;
1457 }
1458 ret = 0;
1459 goto out;
1460 }
1461 pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
1462 if (pool == NULL)
1463 goto out;
1464
1465 atomic_set(&pool->refcount, 0);
1466 pool->client = cli;
1467 pool->pool_id = pool_id;
1468 tmem_new_pool(pool, flags);
1469 cli->tmem_pools[pool_id] = pool;
1470 pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
1471 namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1472 pool_id, cli_id);
1473 ret = 0;
1474 out:
1475 if (cli != NULL)
1476 atomic_dec(&cli->refcount);
1477 return ret;
1478 }
1479
1480 /**********
1481 * Two kernel functionalities currently can be layered on top of tmem.
1482 * These are "cleancache" which is used as a second-chance cache for clean
1483 * page cache pages; and "frontswap" which is used for swap pages
1484 * to avoid writes to disk. A generic "shim" is provided here for each
1485 * to translate in-kernel semantics to zcache semantics.
1486 */
1487
1488 static void zcache_cleancache_put_page(int pool_id,
1489 struct cleancache_filekey key,
1490 pgoff_t index, struct page *page)
1491 {
1492 u32 ind = (u32) index;
1493 struct tmem_oid oid = *(struct tmem_oid *)&key;
1494
1495 if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
1496 inc_zcache_eph_nonactive_puts_ignored();
1497 return;
1498 }
1499 if (likely(ind == index))
1500 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
1501 page, PAGE_SIZE, false, 1);
1502 }
1503
1504 static int zcache_cleancache_get_page(int pool_id,
1505 struct cleancache_filekey key,
1506 pgoff_t index, struct page *page)
1507 {
1508 u32 ind = (u32) index;
1509 struct tmem_oid oid = *(struct tmem_oid *)&key;
1510 size_t size;
1511 int ret = -1;
1512
1513 if (likely(ind == index)) {
1514 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
1515 page, &size, false, 0);
1516 BUG_ON(ret >= 0 && size != PAGE_SIZE);
1517 if (ret == 0)
1518 SetPageWasActive(page);
1519 }
1520 return ret;
1521 }
1522
1523 static void zcache_cleancache_flush_page(int pool_id,
1524 struct cleancache_filekey key,
1525 pgoff_t index)
1526 {
1527 u32 ind = (u32) index;
1528 struct tmem_oid oid = *(struct tmem_oid *)&key;
1529
1530 if (likely(ind == index))
1531 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1532 }
1533
1534 static void zcache_cleancache_flush_inode(int pool_id,
1535 struct cleancache_filekey key)
1536 {
1537 struct tmem_oid oid = *(struct tmem_oid *)&key;
1538
1539 (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1540 }
1541
1542 static void zcache_cleancache_flush_fs(int pool_id)
1543 {
1544 if (pool_id >= 0)
1545 (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
1546 }
1547
1548 static int zcache_cleancache_init_fs(size_t pagesize)
1549 {
1550 BUG_ON(sizeof(struct cleancache_filekey) !=
1551 sizeof(struct tmem_oid));
1552 BUG_ON(pagesize != PAGE_SIZE);
1553 return zcache_local_new_pool(0);
1554 }
1555
1556 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1557 {
1558 /* shared pools are unsupported and map to private */
1559 BUG_ON(sizeof(struct cleancache_filekey) !=
1560 sizeof(struct tmem_oid));
1561 BUG_ON(pagesize != PAGE_SIZE);
1562 return zcache_local_new_pool(0);
1563 }
1564
1565 static struct cleancache_ops zcache_cleancache_ops = {
1566 .put_page = zcache_cleancache_put_page,
1567 .get_page = zcache_cleancache_get_page,
1568 .invalidate_page = zcache_cleancache_flush_page,
1569 .invalidate_inode = zcache_cleancache_flush_inode,
1570 .invalidate_fs = zcache_cleancache_flush_fs,
1571 .init_shared_fs = zcache_cleancache_init_shared_fs,
1572 .init_fs = zcache_cleancache_init_fs
1573 };
1574
1575 struct cleancache_ops zcache_cleancache_register_ops(void)
1576 {
1577 struct cleancache_ops old_ops =
1578 cleancache_register_ops(&zcache_cleancache_ops);
1579
1580 return old_ops;
1581 }
1582
1583 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1584 static int zcache_frontswap_poolid __read_mostly = -1;
1585
1586 /*
1587 * Swizzling increases objects per swaptype, increasing tmem concurrency
1588 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1589 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1590 * frontswap_get_page(), but has side-effects. Hence using 8.
1591 */
1592 #define SWIZ_BITS 8
1593 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1594 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1595 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1596
1597 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1598 {
1599 struct tmem_oid oid = { .oid = { 0 } };
1600 oid.oid[0] = _oswiz(type, ind);
1601 return oid;
1602 }
1603
1604 #ifdef CONFIG_ZCACHE_WRITEBACK
1605 static void unswiz(struct tmem_oid oid, u32 index,
1606 unsigned *type, pgoff_t *offset)
1607 {
1608 *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
1609 *offset = (pgoff_t)((index << SWIZ_BITS) |
1610 (oid.oid[0] & SWIZ_MASK));
1611 }
1612 #endif
1613
1614 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1615 struct page *page)
1616 {
1617 u64 ind64 = (u64)offset;
1618 u32 ind = (u32)offset;
1619 struct tmem_oid oid = oswiz(type, ind);
1620 int ret = -1;
1621 unsigned long flags;
1622
1623 BUG_ON(!PageLocked(page));
1624 if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
1625 inc_zcache_pers_nonactive_puts_ignored();
1626 ret = -ERANGE;
1627 goto out;
1628 }
1629 if (likely(ind64 == ind)) {
1630 local_irq_save(flags);
1631 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1632 &oid, iswiz(ind),
1633 page, PAGE_SIZE, false, 0);
1634 local_irq_restore(flags);
1635 }
1636 out:
1637 return ret;
1638 }
1639
1640 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1641 * was not present (should never happen!) */
1642 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
1643 struct page *page)
1644 {
1645 u64 ind64 = (u64)offset;
1646 u32 ind = (u32)offset;
1647 struct tmem_oid oid = oswiz(type, ind);
1648 size_t size;
1649 int ret = -1, get_and_free;
1650
1651 if (frontswap_has_exclusive_gets)
1652 get_and_free = 1;
1653 else
1654 get_and_free = -1;
1655 BUG_ON(!PageLocked(page));
1656 if (likely(ind64 == ind)) {
1657 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1658 &oid, iswiz(ind),
1659 page, &size, false, get_and_free);
1660 BUG_ON(ret >= 0 && size != PAGE_SIZE);
1661 }
1662 return ret;
1663 }
1664
1665 /* flush a single page from frontswap */
1666 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1667 {
1668 u64 ind64 = (u64)offset;
1669 u32 ind = (u32)offset;
1670 struct tmem_oid oid = oswiz(type, ind);
1671
1672 if (likely(ind64 == ind))
1673 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1674 &oid, iswiz(ind));
1675 }
1676
1677 /* flush all pages from the passed swaptype */
1678 static void zcache_frontswap_flush_area(unsigned type)
1679 {
1680 struct tmem_oid oid;
1681 int ind;
1682
1683 for (ind = SWIZ_MASK; ind >= 0; ind--) {
1684 oid = oswiz(type, ind);
1685 (void)zcache_flush_object(LOCAL_CLIENT,
1686 zcache_frontswap_poolid, &oid);
1687 }
1688 }
1689
1690 static void zcache_frontswap_init(unsigned ignored)
1691 {
1692 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1693 if (zcache_frontswap_poolid < 0)
1694 zcache_frontswap_poolid =
1695 zcache_local_new_pool(TMEM_POOL_PERSIST);
1696 }
1697
1698 static struct frontswap_ops zcache_frontswap_ops = {
1699 .store = zcache_frontswap_put_page,
1700 .load = zcache_frontswap_get_page,
1701 .invalidate_page = zcache_frontswap_flush_page,
1702 .invalidate_area = zcache_frontswap_flush_area,
1703 .init = zcache_frontswap_init
1704 };
1705
1706 struct frontswap_ops zcache_frontswap_register_ops(void)
1707 {
1708 struct frontswap_ops old_ops =
1709 frontswap_register_ops(&zcache_frontswap_ops);
1710
1711 return old_ops;
1712 }
1713
1714 /*
1715 * zcache initialization
1716 * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
1717 * OR NOTHING HAPPENS!
1718 */
1719
1720 static int __init enable_zcache(char *s)
1721 {
1722 zcache_enabled = true;
1723 return 1;
1724 }
1725 __setup("zcache", enable_zcache);
1726
1727 static int __init enable_ramster(char *s)
1728 {
1729 zcache_enabled = true;
1730 #ifdef CONFIG_RAMSTER
1731 ramster_enabled = true;
1732 #endif
1733 return 1;
1734 }
1735 __setup("ramster", enable_ramster);
1736
1737 /* allow independent dynamic disabling of cleancache and frontswap */
1738
1739 static int __init no_cleancache(char *s)
1740 {
1741 disable_cleancache = true;
1742 return 1;
1743 }
1744
1745 __setup("nocleancache", no_cleancache);
1746
1747 static int __init no_frontswap(char *s)
1748 {
1749 disable_frontswap = true;
1750 return 1;
1751 }
1752
1753 __setup("nofrontswap", no_frontswap);
1754
1755 static int __init no_frontswap_exclusive_gets(char *s)
1756 {
1757 frontswap_has_exclusive_gets = false;
1758 return 1;
1759 }
1760
1761 __setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
1762
1763 static int __init no_frontswap_ignore_nonactive(char *s)
1764 {
1765 disable_frontswap_ignore_nonactive = true;
1766 return 1;
1767 }
1768
1769 __setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
1770
1771 static int __init no_cleancache_ignore_nonactive(char *s)
1772 {
1773 disable_cleancache_ignore_nonactive = true;
1774 return 1;
1775 }
1776
1777 __setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
1778
1779 static int __init enable_zcache_compressor(char *s)
1780 {
1781 strlcpy(zcache_comp_name, s, sizeof(zcache_comp_name));
1782 zcache_enabled = true;
1783 return 1;
1784 }
1785 __setup("zcache=", enable_zcache_compressor);
1786
1787
1788 static int __init zcache_comp_init(void)
1789 {
1790 int ret = 0;
1791
1792 /* check crypto algorithm */
1793 if (*zcache_comp_name != '\0') {
1794 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1795 if (!ret)
1796 pr_info("zcache: %s not supported\n",
1797 zcache_comp_name);
1798 }
1799 if (!ret)
1800 strcpy(zcache_comp_name, "lzo");
1801 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1802 if (!ret) {
1803 ret = 1;
1804 goto out;
1805 }
1806 pr_info("zcache: using %s compressor\n", zcache_comp_name);
1807
1808 /* alloc percpu transforms */
1809 ret = 0;
1810 zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
1811 if (!zcache_comp_pcpu_tfms)
1812 ret = 1;
1813 out:
1814 return ret;
1815 }
1816
1817 static int __init zcache_init(void)
1818 {
1819 int ret = 0;
1820
1821 if (ramster_enabled) {
1822 namestr = "ramster";
1823 ramster_register_pamops(&zcache_pamops);
1824 }
1825 zcache_debugfs_init();
1826 if (zcache_enabled) {
1827 unsigned int cpu;
1828
1829 tmem_register_hostops(&zcache_hostops);
1830 tmem_register_pamops(&zcache_pamops);
1831 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
1832 if (ret) {
1833 pr_err("%s: can't register cpu notifier\n", namestr);
1834 goto out;
1835 }
1836 ret = zcache_comp_init();
1837 if (ret) {
1838 pr_err("%s: compressor initialization failed\n",
1839 namestr);
1840 goto out;
1841 }
1842 for_each_online_cpu(cpu) {
1843 void *pcpu = (void *)(long)cpu;
1844 zcache_cpu_notifier(&zcache_cpu_notifier_block,
1845 CPU_UP_PREPARE, pcpu);
1846 }
1847 }
1848 zcache_objnode_cache = kmem_cache_create("zcache_objnode",
1849 sizeof(struct tmem_objnode), 0, 0, NULL);
1850 zcache_obj_cache = kmem_cache_create("zcache_obj",
1851 sizeof(struct tmem_obj), 0, 0, NULL);
1852 ret = zcache_new_client(LOCAL_CLIENT);
1853 if (ret) {
1854 pr_err("%s: can't create client\n", namestr);
1855 goto out;
1856 }
1857 zbud_init();
1858 if (zcache_enabled && !disable_cleancache) {
1859 struct cleancache_ops old_ops;
1860
1861 register_shrinker(&zcache_shrinker);
1862 old_ops = zcache_cleancache_register_ops();
1863 pr_info("%s: cleancache enabled using kernel transcendent "
1864 "memory and compression buddies\n", namestr);
1865 #ifdef CONFIG_ZCACHE_DEBUG
1866 pr_info("%s: cleancache: ignorenonactive = %d\n",
1867 namestr, !disable_cleancache_ignore_nonactive);
1868 #endif
1869 if (old_ops.init_fs != NULL)
1870 pr_warn("%s: cleancache_ops overridden\n", namestr);
1871 }
1872 if (zcache_enabled && !disable_frontswap) {
1873 struct frontswap_ops old_ops;
1874
1875 old_ops = zcache_frontswap_register_ops();
1876 if (frontswap_has_exclusive_gets)
1877 frontswap_tmem_exclusive_gets(true);
1878 pr_info("%s: frontswap enabled using kernel transcendent "
1879 "memory and compression buddies\n", namestr);
1880 #ifdef CONFIG_ZCACHE_DEBUG
1881 pr_info("%s: frontswap: excl gets = %d active only = %d\n",
1882 namestr, frontswap_has_exclusive_gets,
1883 !disable_frontswap_ignore_nonactive);
1884 #endif
1885 if (old_ops.init != NULL)
1886 pr_warn("%s: frontswap_ops overridden\n", namestr);
1887 }
1888 if (ramster_enabled)
1889 ramster_init(!disable_cleancache, !disable_frontswap,
1890 frontswap_has_exclusive_gets);
1891 out:
1892 return ret;
1893 }
1894
1895 late_initcall(zcache_init);
This page took 0.102212 seconds and 4 git commands to generate.