4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
9 * lzo1x compression to improve density and an embedded allocator called
10 * "zbud" which "buddies" two compressed pages semi-optimally in each physical
11 * pageframe. Zbud is integrally tied into tmem to allow pageframes to
12 * be "reclaimed" efficiently.
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/highmem.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/atomic.h>
24 #include <linux/math64.h>
25 #include <linux/crypto.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/pagemap.h>
29 #include <linux/writeback.h>
31 #include <linux/cleancache.h>
32 #include <linux/frontswap.h>
39 static bool ramster_enabled __read_mostly
;
41 #define ramster_enabled false
44 #ifndef __PG_WAS_ACTIVE
45 static inline bool PageWasActive(struct page
*page
)
50 static inline void SetPageWasActive(struct page
*page
)
55 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
56 static bool frontswap_has_exclusive_gets __read_mostly
= true;
58 static bool frontswap_has_exclusive_gets __read_mostly
;
59 static inline void frontswap_tmem_exclusive_gets(bool b
)
65 * mark pampd to special value in order that later
66 * retrieve will identify zero-filled pages
68 #define ZERO_FILLED 0x2
70 /* enable (or fix code) when Seth's patches are accepted upstream */
71 #define zcache_writeback_enabled 0
73 static bool zcache_enabled __read_mostly
;
74 static bool disable_cleancache __read_mostly
;
75 static bool disable_frontswap __read_mostly
;
76 static bool disable_frontswap_ignore_nonactive __read_mostly
;
77 static bool disable_cleancache_ignore_nonactive __read_mostly
;
78 static char *namestr __read_mostly
= "zcache";
80 #define ZCACHE_GFP_MASK \
81 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
83 /* crypto API for zcache */
84 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
85 static char zcache_comp_name
[ZCACHE_COMP_NAME_SZ
] __read_mostly
;
86 static struct crypto_comp
* __percpu
*zcache_comp_pcpu_tfms __read_mostly
;
89 ZCACHE_COMPOP_COMPRESS
,
90 ZCACHE_COMPOP_DECOMPRESS
93 static inline int zcache_comp_op(enum comp_op op
,
94 const u8
*src
, unsigned int slen
,
95 u8
*dst
, unsigned int *dlen
)
97 struct crypto_comp
*tfm
;
100 BUG_ON(!zcache_comp_pcpu_tfms
);
101 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, get_cpu());
104 case ZCACHE_COMPOP_COMPRESS
:
105 ret
= crypto_comp_compress(tfm
, src
, slen
, dst
, dlen
);
107 case ZCACHE_COMPOP_DECOMPRESS
:
108 ret
= crypto_comp_decompress(tfm
, src
, slen
, dst
, dlen
);
122 * byte count defining poor compression; pages with greater zsize will be
125 static unsigned int zbud_max_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 7;
127 * byte count defining poor *mean* compression; pages with greater zsize
128 * will be rejected until sufficient better-compressed pages are accepted
129 * driving the mean below this threshold
131 static unsigned int zbud_max_mean_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 5;
134 * for now, used named slabs so can easily track usage; later can
135 * either just use kmalloc, or perhaps add a slab-like allocator
136 * to more carefully manage total memory utilization
138 static struct kmem_cache
*zcache_objnode_cache
;
139 static struct kmem_cache
*zcache_obj_cache
;
141 static DEFINE_PER_CPU(struct zcache_preload
, zcache_preloads
) = { 0, };
143 /* Used by debug.c */
144 ssize_t zcache_pers_zpages
;
145 u64 zcache_pers_zbytes
;
146 ssize_t zcache_eph_pageframes
;
147 ssize_t zcache_pers_pageframes
;
149 /* Used by this code. */
150 ssize_t zcache_last_active_file_pageframes
;
151 ssize_t zcache_last_inactive_file_pageframes
;
152 ssize_t zcache_last_active_anon_pageframes
;
153 ssize_t zcache_last_inactive_anon_pageframes
;
154 #ifdef CONFIG_ZCACHE_WRITEBACK
155 ssize_t zcache_writtenback_pages
;
156 ssize_t zcache_outstanding_writeback_pages
;
159 * zcache core code starts here
162 static struct zcache_client zcache_host
;
163 static struct zcache_client zcache_clients
[MAX_CLIENTS
];
165 static inline bool is_local_client(struct zcache_client
*cli
)
167 return cli
== &zcache_host
;
170 static struct zcache_client
*zcache_get_client_by_id(uint16_t cli_id
)
172 struct zcache_client
*cli
= &zcache_host
;
174 if (cli_id
!= LOCAL_CLIENT
) {
175 if (cli_id
>= MAX_CLIENTS
)
177 cli
= &zcache_clients
[cli_id
];
184 * Tmem operations assume the poolid implies the invoking client.
185 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
186 * RAMster has each client numbered by cluster node, and a KVM version
187 * of zcache would have one client per guest and each client might
190 struct tmem_pool
*zcache_get_pool_by_id(uint16_t cli_id
, uint16_t poolid
)
192 struct tmem_pool
*pool
= NULL
;
193 struct zcache_client
*cli
= NULL
;
195 cli
= zcache_get_client_by_id(cli_id
);
198 if (!is_local_client(cli
))
199 atomic_inc(&cli
->refcount
);
200 if (poolid
< MAX_POOLS_PER_CLIENT
) {
201 pool
= cli
->tmem_pools
[poolid
];
203 atomic_inc(&pool
->refcount
);
209 void zcache_put_pool(struct tmem_pool
*pool
)
211 struct zcache_client
*cli
= NULL
;
216 atomic_dec(&pool
->refcount
);
217 if (!is_local_client(cli
))
218 atomic_dec(&cli
->refcount
);
221 int zcache_new_client(uint16_t cli_id
)
223 struct zcache_client
*cli
;
226 cli
= zcache_get_client_by_id(cli_id
);
238 * zcache implementation for tmem host ops
241 static struct tmem_objnode
*zcache_objnode_alloc(struct tmem_pool
*pool
)
243 struct tmem_objnode
*objnode
= NULL
;
244 struct zcache_preload
*kp
;
247 kp
= &__get_cpu_var(zcache_preloads
);
248 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
249 objnode
= kp
->objnodes
[i
];
250 if (objnode
!= NULL
) {
251 kp
->objnodes
[i
] = NULL
;
255 BUG_ON(objnode
== NULL
);
256 inc_zcache_objnode_count();
260 static void zcache_objnode_free(struct tmem_objnode
*objnode
,
261 struct tmem_pool
*pool
)
263 dec_zcache_objnode_count();
264 kmem_cache_free(zcache_objnode_cache
, objnode
);
267 static struct tmem_obj
*zcache_obj_alloc(struct tmem_pool
*pool
)
269 struct tmem_obj
*obj
= NULL
;
270 struct zcache_preload
*kp
;
272 kp
= &__get_cpu_var(zcache_preloads
);
276 inc_zcache_obj_count();
280 static void zcache_obj_free(struct tmem_obj
*obj
, struct tmem_pool
*pool
)
282 dec_zcache_obj_count();
283 kmem_cache_free(zcache_obj_cache
, obj
);
287 * Compressing zero-filled pages will waste memory and introduce
288 * serious fragmentation, skip it to avoid overhead.
290 static bool page_is_zero_filled(struct page
*p
)
295 page
= kmap_atomic(p
);
296 for (pos
= 0; pos
< PAGE_SIZE
/ sizeof(*page
); pos
++) {
307 static void handle_zero_filled_page(void *p
)
310 struct page
*page
= (struct page
*)p
;
312 user_mem
= kmap_atomic(page
);
313 memset(user_mem
, 0, PAGE_SIZE
);
314 kunmap_atomic(user_mem
);
316 flush_dcache_page(page
);
319 static struct tmem_hostops zcache_hostops
= {
320 .obj_alloc
= zcache_obj_alloc
,
321 .obj_free
= zcache_obj_free
,
322 .objnode_alloc
= zcache_objnode_alloc
,
323 .objnode_free
= zcache_objnode_free
,
326 static struct page
*zcache_alloc_page(void)
328 struct page
*page
= alloc_page(ZCACHE_GFP_MASK
);
331 inc_zcache_pageframes_alloced();
335 static void zcache_free_page(struct page
*page
)
337 long curr_pageframes
;
338 static long max_pageframes
, min_pageframes
;
343 inc_zcache_pageframes_freed();
344 curr_pageframes
= curr_pageframes_count();
345 if (curr_pageframes
> max_pageframes
)
346 max_pageframes
= curr_pageframes
;
347 if (curr_pageframes
< min_pageframes
)
348 min_pageframes
= curr_pageframes
;
349 #ifdef CONFIG_ZCACHE_DEBUG
350 if (curr_pageframes
> 2L || curr_pageframes
< -2L) {
357 * zcache implementations for PAM page descriptor ops
360 /* forward reference */
361 static void zcache_compress(struct page
*from
,
362 void **out_va
, unsigned *out_len
);
364 static struct page
*zcache_evict_eph_pageframe(void);
366 static void *zcache_pampd_eph_create(char *data
, size_t size
, bool raw
,
367 struct tmem_handle
*th
)
369 void *pampd
= NULL
, *cdata
= data
;
370 unsigned clen
= size
;
371 bool zero_filled
= false;
372 struct page
*page
= (struct page
*)(data
), *newpage
;
374 if (page_is_zero_filled(page
)) {
377 inc_zcache_zero_filled_pages();
382 zcache_compress(page
, &cdata
, &clen
);
383 if (clen
> zbud_max_buddy_size()) {
384 inc_zcache_compress_poor();
388 BUG_ON(clen
> zbud_max_buddy_size());
391 /* look for space via an existing match first */
392 pampd
= (void *)zbud_match_prep(th
, true, cdata
, clen
);
396 /* no match, now we need to find (or free up) a full page */
397 newpage
= zcache_alloc_page();
399 goto create_in_new_page
;
401 inc_zcache_failed_getfreepages();
402 /* can't allocate a page, evict an ephemeral page via LRU */
403 newpage
= zcache_evict_eph_pageframe();
404 if (newpage
== NULL
) {
405 inc_zcache_eph_ate_tail_failed();
408 inc_zcache_eph_ate_tail();
411 pampd
= (void *)zbud_create_prep(th
, true, cdata
, clen
, newpage
);
412 BUG_ON(pampd
== NULL
);
413 inc_zcache_eph_pageframes();
416 inc_zcache_eph_zbytes(clen
);
417 inc_zcache_eph_zpages();
418 if (ramster_enabled
&& raw
)
419 ramster_count_foreign_pages(true, 1);
421 pampd
= (void *)ZERO_FILLED
;
426 static void *zcache_pampd_pers_create(char *data
, size_t size
, bool raw
,
427 struct tmem_handle
*th
)
429 void *pampd
= NULL
, *cdata
= data
;
430 unsigned clen
= size
;
431 bool zero_filled
= false;
432 struct page
*page
= (struct page
*)(data
), *newpage
;
433 unsigned long zbud_mean_zsize
;
434 unsigned long curr_pers_zpages
, total_zsize
;
437 BUG_ON(!ramster_enabled
);
441 if (page_is_zero_filled(page
)) {
444 inc_zcache_zero_filled_pages();
448 curr_pers_zpages
= zcache_pers_zpages
;
449 /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
451 zcache_compress(page
, &cdata
, &clen
);
452 /* reject if compression is too poor */
453 if (clen
> zbud_max_zsize
) {
454 inc_zcache_compress_poor();
457 /* reject if mean compression is too poor */
458 if ((clen
> zbud_max_mean_zsize
) && (curr_pers_zpages
> 0)) {
459 total_zsize
= zcache_pers_zbytes
;
460 if ((long)total_zsize
< 0)
462 zbud_mean_zsize
= div_u64(total_zsize
,
464 if (zbud_mean_zsize
> zbud_max_mean_zsize
) {
465 inc_zcache_mean_compress_poor();
471 /* look for space via an existing match first */
472 pampd
= (void *)zbud_match_prep(th
, false, cdata
, clen
);
476 /* no match, now we need to find (or free up) a full page */
477 newpage
= zcache_alloc_page();
479 goto create_in_new_page
;
481 * FIXME do the following only if eph is oversized?
482 * if (zcache_eph_pageframes >
483 * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
484 * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
486 inc_zcache_failed_getfreepages();
487 /* can't allocate a page, evict an ephemeral page via LRU */
488 newpage
= zcache_evict_eph_pageframe();
489 if (newpage
== NULL
) {
490 inc_zcache_pers_ate_eph_failed();
493 inc_zcache_pers_ate_eph();
496 pampd
= (void *)zbud_create_prep(th
, false, cdata
, clen
, newpage
);
497 BUG_ON(pampd
== NULL
);
498 inc_zcache_pers_pageframes();
501 inc_zcache_pers_zpages();
502 inc_zcache_pers_zbytes(clen
);
503 if (ramster_enabled
&& raw
)
504 ramster_count_foreign_pages(false, 1);
506 pampd
= (void *)ZERO_FILLED
;
512 * This is called directly from zcache_put_page to pre-allocate space
515 void *zcache_pampd_create(char *data
, unsigned int size
, bool raw
,
516 int eph
, struct tmem_handle
*th
)
519 struct zcache_preload
*kp
;
520 struct tmem_objnode
*objnode
;
521 struct tmem_obj
*obj
;
524 BUG_ON(!irqs_disabled());
525 /* pre-allocate per-cpu metadata */
526 BUG_ON(zcache_objnode_cache
== NULL
);
527 BUG_ON(zcache_obj_cache
== NULL
);
528 kp
= &__get_cpu_var(zcache_preloads
);
529 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
530 objnode
= kp
->objnodes
[i
];
531 if (objnode
== NULL
) {
532 objnode
= kmem_cache_alloc(zcache_objnode_cache
,
534 if (unlikely(objnode
== NULL
)) {
535 inc_zcache_failed_alloc();
538 kp
->objnodes
[i
] = objnode
;
541 if (kp
->obj
== NULL
) {
542 obj
= kmem_cache_alloc(zcache_obj_cache
, ZCACHE_GFP_MASK
);
545 if (unlikely(kp
->obj
== NULL
)) {
546 inc_zcache_failed_alloc();
550 * ok, have all the metadata pre-allocated, now do the data
551 * but since how we allocate the data is dependent on ephemeral
552 * or persistent, we split the call here to different sub-functions
555 pampd
= zcache_pampd_eph_create(data
, size
, raw
, th
);
557 pampd
= zcache_pampd_pers_create(data
, size
, raw
, th
);
563 * This is a pamops called via tmem_put and is necessary to "finish"
566 void zcache_pampd_create_finish(void *pampd
, bool eph
)
568 if (pampd
!= (void *)ZERO_FILLED
)
569 zbud_create_finish((struct zbudref
*)pampd
, eph
);
573 * This is passed as a function parameter to zbud_decompress so that
574 * zbud need not be familiar with the details of crypto. It assumes that
575 * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
576 * kmapped. It must be successful, else there is a logic bug somewhere.
578 static void zcache_decompress(char *from_va
, unsigned int size
, char *to_va
)
581 unsigned int outlen
= PAGE_SIZE
;
583 ret
= zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS
, from_va
, size
,
586 BUG_ON(outlen
!= PAGE_SIZE
);
590 * Decompress from the kernel va to a pageframe
592 void zcache_decompress_to_page(char *from_va
, unsigned int size
,
593 struct page
*to_page
)
595 char *to_va
= kmap_atomic(to_page
);
596 zcache_decompress(from_va
, size
, to_va
);
597 kunmap_atomic(to_va
);
601 * fill the pageframe corresponding to the struct page with the data
602 * from the passed pampd
604 static int zcache_pampd_get_data(char *data
, size_t *sizep
, bool raw
,
605 void *pampd
, struct tmem_pool
*pool
,
606 struct tmem_oid
*oid
, uint32_t index
)
609 bool eph
= !is_persistent(pool
);
611 BUG_ON(preemptible());
612 BUG_ON(eph
); /* fix later if shared pools get implemented */
613 BUG_ON(pampd_is_remote(pampd
));
615 if (pampd
== (void *)ZERO_FILLED
) {
616 handle_zero_filled_page(data
);
623 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
626 ret
= zbud_decompress((struct page
*)(data
),
627 (struct zbudref
*)pampd
, false,
635 * fill the pageframe corresponding to the struct page with the data
636 * from the passed pampd
638 static int zcache_pampd_get_data_and_free(char *data
, size_t *sizep
, bool raw
,
639 void *pampd
, struct tmem_pool
*pool
,
640 struct tmem_oid
*oid
, uint32_t index
)
643 bool eph
= !is_persistent(pool
), zero_filled
= false;
644 struct page
*page
= NULL
;
645 unsigned int zsize
, zpages
;
647 BUG_ON(preemptible());
648 BUG_ON(pampd_is_remote(pampd
));
650 if (pampd
== (void *)ZERO_FILLED
) {
651 handle_zero_filled_page(data
);
657 dec_zcache_zero_filled_pages();
662 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
665 ret
= zbud_decompress((struct page
*)(data
),
666 (struct zbudref
*)pampd
, eph
,
670 page
= zbud_free_and_delist((struct zbudref
*)pampd
, eph
,
675 dec_zcache_eph_pageframes();
676 dec_zcache_eph_zpages(zpages
);
677 dec_zcache_eph_zbytes(zsize
);
680 dec_zcache_pers_pageframes();
681 dec_zcache_pers_zpages(zpages
);
682 dec_zcache_pers_zbytes(zsize
);
684 if (!is_local_client(pool
->client
))
685 ramster_count_foreign_pages(eph
, -1);
686 if (page
&& !zero_filled
)
687 zcache_free_page(page
);
692 * free the pampd and remove it from any zcache lists
693 * pampd must no longer be pointed to from any tmem data structures!
695 static void zcache_pampd_free(void *pampd
, struct tmem_pool
*pool
,
696 struct tmem_oid
*oid
, uint32_t index
, bool acct
)
698 struct page
*page
= NULL
;
699 unsigned int zsize
, zpages
;
700 bool zero_filled
= false;
702 BUG_ON(preemptible());
704 if (pampd
== (void *)ZERO_FILLED
) {
708 dec_zcache_zero_filled_pages();
711 if (pampd_is_remote(pampd
) && !zero_filled
) {
712 BUG_ON(!ramster_enabled
);
713 pampd
= ramster_pampd_free(pampd
, pool
, oid
, index
, acct
);
717 if (is_ephemeral(pool
)) {
719 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
720 true, &zsize
, &zpages
);
722 dec_zcache_eph_pageframes();
723 dec_zcache_eph_zpages(zpages
);
724 dec_zcache_eph_zbytes(zsize
);
725 /* FIXME CONFIG_RAMSTER... check acct parameter? */
728 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
729 false, &zsize
, &zpages
);
731 dec_zcache_pers_pageframes();
732 dec_zcache_pers_zpages(zpages
);
733 dec_zcache_pers_zbytes(zsize
);
735 if (!is_local_client(pool
->client
))
736 ramster_count_foreign_pages(is_ephemeral(pool
), -1);
737 if (page
&& !zero_filled
)
738 zcache_free_page(page
);
741 static struct tmem_pamops zcache_pamops
= {
742 .create_finish
= zcache_pampd_create_finish
,
743 .get_data
= zcache_pampd_get_data
,
744 .get_data_and_free
= zcache_pampd_get_data_and_free
,
745 .free
= zcache_pampd_free
,
749 * zcache compression/decompression and related per-cpu stuff
752 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem
);
753 #define ZCACHE_DSTMEM_ORDER 1
755 static void zcache_compress(struct page
*from
, void **out_va
, unsigned *out_len
)
758 unsigned char *dmem
= __get_cpu_var(zcache_dstmem
);
761 BUG_ON(!irqs_disabled());
762 /* no buffer or no compressor so can't compress */
763 BUG_ON(dmem
== NULL
);
764 *out_len
= PAGE_SIZE
<< ZCACHE_DSTMEM_ORDER
;
765 from_va
= kmap_atomic(from
);
767 ret
= zcache_comp_op(ZCACHE_COMPOP_COMPRESS
, from_va
, PAGE_SIZE
, dmem
,
771 kunmap_atomic(from_va
);
774 static int zcache_comp_cpu_up(int cpu
)
776 struct crypto_comp
*tfm
;
778 tfm
= crypto_alloc_comp(zcache_comp_name
, 0, 0);
781 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = tfm
;
785 static void zcache_comp_cpu_down(int cpu
)
787 struct crypto_comp
*tfm
;
789 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
);
790 crypto_free_comp(tfm
);
791 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = NULL
;
794 static int zcache_cpu_notifier(struct notifier_block
*nb
,
795 unsigned long action
, void *pcpu
)
797 int ret
, i
, cpu
= (long)pcpu
;
798 struct zcache_preload
*kp
;
802 ret
= zcache_comp_cpu_up(cpu
);
803 if (ret
!= NOTIFY_OK
) {
804 pr_err("%s: can't allocate compressor xform\n",
808 per_cpu(zcache_dstmem
, cpu
) = (void *)__get_free_pages(
809 GFP_KERNEL
| __GFP_REPEAT
, ZCACHE_DSTMEM_ORDER
);
814 case CPU_UP_CANCELED
:
815 zcache_comp_cpu_down(cpu
);
816 free_pages((unsigned long)per_cpu(zcache_dstmem
, cpu
),
817 ZCACHE_DSTMEM_ORDER
);
818 per_cpu(zcache_dstmem
, cpu
) = NULL
;
819 kp
= &per_cpu(zcache_preloads
, cpu
);
820 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
822 kmem_cache_free(zcache_objnode_cache
,
826 kmem_cache_free(zcache_obj_cache
, kp
->obj
);
830 ramster_cpu_down(cpu
);
838 static struct notifier_block zcache_cpu_notifier_block
= {
839 .notifier_call
= zcache_cpu_notifier
843 * The following code interacts with the zbud eviction and zbud
844 * zombify code to access LRU pages
847 static struct page
*zcache_evict_eph_pageframe(void)
850 unsigned int zsize
= 0, zpages
= 0;
852 page
= zbud_evict_pageframe_lru(&zsize
, &zpages
);
855 dec_zcache_eph_zbytes(zsize
);
856 dec_zcache_eph_zpages(zpages
);
857 inc_zcache_evicted_eph_zpages(zpages
);
858 dec_zcache_eph_pageframes();
859 inc_zcache_evicted_eph_pageframes();
864 #ifdef CONFIG_ZCACHE_WRITEBACK
866 static atomic_t zcache_outstanding_writeback_pages_atomic
= ATOMIC_INIT(0);
868 static inline void inc_zcache_outstanding_writeback_pages(void)
870 zcache_outstanding_writeback_pages
=
871 atomic_inc_return(&zcache_outstanding_writeback_pages_atomic
);
873 static inline void dec_zcache_outstanding_writeback_pages(void)
875 zcache_outstanding_writeback_pages
=
876 atomic_dec_return(&zcache_outstanding_writeback_pages_atomic
);
878 static void unswiz(struct tmem_oid oid
, u32 index
,
879 unsigned *type
, pgoff_t
*offset
);
882 * Choose an LRU persistent pageframe and attempt to write it back to
883 * the backing swap disk by calling frontswap_writeback on both zpages.
885 * This is work-in-progress.
888 static void zcache_end_swap_write(struct bio
*bio
, int err
)
890 end_swap_bio_write(bio
, err
);
891 dec_zcache_outstanding_writeback_pages();
892 zcache_writtenback_pages
++;
896 * zcache_get_swap_cache_page
898 * This is an adaption of read_swap_cache_async()
900 * If success, page is returned in retpage
901 * Returns 0 if page was already in the swap cache, page is not locked
902 * Returns 1 if the new page needs to be populated, page is locked
904 static int zcache_get_swap_cache_page(int type
, pgoff_t offset
,
905 struct page
*new_page
)
907 struct page
*found_page
;
908 swp_entry_t entry
= swp_entry(type
, offset
);
911 BUG_ON(new_page
== NULL
);
914 * First check the swap cache. Since this is normally
915 * called after lookup_swap_cache() failed, re-calling
916 * that would confuse statistics.
918 found_page
= find_get_page(&swapper_space
, entry
.val
);
923 * call radix_tree_preload() while we can wait.
925 err
= radix_tree_preload(GFP_KERNEL
);
930 * Swap entry may have been freed since our caller observed it.
932 err
= swapcache_prepare(entry
);
933 if (err
== -EEXIST
) { /* seems racy */
934 radix_tree_preload_end();
937 if (err
) { /* swp entry is obsolete ? */
938 radix_tree_preload_end();
942 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
943 __set_page_locked(new_page
);
944 SetPageSwapBacked(new_page
);
945 err
= __add_to_swap_cache(new_page
, entry
);
947 radix_tree_preload_end();
948 lru_cache_add_anon(new_page
);
951 radix_tree_preload_end();
952 ClearPageSwapBacked(new_page
);
953 __clear_page_locked(new_page
);
955 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
956 * clear SWAP_HAS_CACHE flag.
958 swapcache_free(entry
, NULL
);
959 /* FIXME: is it possible to get here without err==-ENOMEM?
960 * If not, we can dispense with the do loop, use goto retry */
961 } while (err
!= -ENOMEM
);
967 * Given a frontswap zpage in zcache (identified by type/offset) and
968 * an empty page, put the page into the swap cache, use frontswap
969 * to get the page from zcache into the empty page, then give it
970 * to the swap subsystem to send to disk (carefully avoiding the
971 * possibility that frontswap might snatch it back).
972 * Returns < 0 if error, 0 if successful, and 1 if successful but
973 * the newpage passed in not needed and should be freed.
975 static int zcache_frontswap_writeback_zpage(int type
, pgoff_t offset
,
976 struct page
*newpage
)
978 struct page
*page
= newpage
;
980 struct writeback_control wbc
= {
981 .sync_mode
= WB_SYNC_NONE
,
984 ret
= zcache_get_swap_cache_page(type
, offset
, page
);
988 /* more uptodate page is already in swapcache */
989 __frontswap_invalidate_page(type
, offset
);
993 BUG_ON(!frontswap_has_exclusive_gets
); /* load must also invalidate */
994 /* FIXME: how is it possible to get here when page is unlocked? */
995 __frontswap_load(page
);
996 SetPageUptodate(page
); /* above does SetPageDirty, is that enough? */
998 /* start writeback */
999 SetPageReclaim(page
);
1001 * Return value is ignored here because it doesn't change anything
1002 * for us. Page is returned unlocked.
1004 (void)__swap_writepage(page
, &wbc
, zcache_end_swap_write
);
1005 page_cache_release(page
);
1006 inc_zcache_outstanding_writeback_pages();
1012 * The following is still a magic number... we want to allow forward progress
1013 * for writeback because it clears out needed RAM when under pressure, but
1014 * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
1015 * pages if the swap device is very slow.
1017 #define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
1020 * Try to allocate two free pages, first using a non-aggressive alloc,
1021 * then by evicting zcache ephemeral (clean pagecache) pages, and last
1022 * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
1023 * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
1024 * function above for each.
1026 static int zcache_frontswap_writeback(void)
1028 struct tmem_handle th
[2];
1030 int nzbuds
, writeback_ret
;
1032 struct page
*znewpage1
= NULL
, *znewpage2
= NULL
;
1033 struct page
*evictpage1
= NULL
, *evictpage2
= NULL
;
1034 struct page
*newpage1
= NULL
, *newpage2
= NULL
;
1035 struct page
*page1
= NULL
, *page2
= NULL
;
1038 znewpage1
= alloc_page(ZCACHE_GFP_MASK
);
1039 znewpage2
= alloc_page(ZCACHE_GFP_MASK
);
1040 if (znewpage1
== NULL
)
1041 evictpage1
= zcache_evict_eph_pageframe();
1042 if (znewpage2
== NULL
)
1043 evictpage2
= zcache_evict_eph_pageframe();
1045 if ((evictpage1
== NULL
|| evictpage2
== NULL
) &&
1046 atomic_read(&zcache_outstanding_writeback_pages_atomic
) >
1047 ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES
) {
1050 if (znewpage1
== NULL
&& evictpage1
== NULL
)
1051 newpage1
= alloc_page(GFP_KERNEL
);
1052 if (znewpage2
== NULL
&& evictpage2
== NULL
)
1053 newpage2
= alloc_page(GFP_KERNEL
);
1054 if (newpage1
== NULL
|| newpage2
== NULL
)
1057 /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
1058 nzbuds
= zbud_make_zombie_lru(&th
[0], NULL
, NULL
, false);
1064 /* process the first zbud */
1065 unswiz(th
[0].oid
, th
[0].index
, &type
, &offset
);
1066 page1
= (znewpage1
!= NULL
) ? znewpage1
:
1067 ((newpage1
!= NULL
) ? newpage1
: evictpage1
);
1068 writeback_ret
= zcache_frontswap_writeback_zpage(type
, offset
, page1
);
1069 if (writeback_ret
< 0) {
1073 if (evictpage1
!= NULL
)
1074 zcache_pageframes_freed
=
1075 atomic_inc_return(&zcache_pageframes_freed_atomic
);
1076 if (writeback_ret
== 0) {
1077 /* zcache_get_swap_cache_page will free, don't double free */
1085 /* if there is a second zbud, process it */
1086 unswiz(th
[1].oid
, th
[1].index
, &type
, &offset
);
1087 page2
= (znewpage2
!= NULL
) ? znewpage2
:
1088 ((newpage2
!= NULL
) ? newpage2
: evictpage2
);
1089 writeback_ret
= zcache_frontswap_writeback_zpage(type
, offset
, page2
);
1090 if (writeback_ret
< 0) {
1094 if (evictpage2
!= NULL
)
1095 zcache_pageframes_freed
=
1096 atomic_inc_return(&zcache_pageframes_freed_atomic
);
1097 if (writeback_ret
== 0) {
1104 if (znewpage1
!= NULL
)
1105 page_cache_release(znewpage1
);
1106 if (znewpage2
!= NULL
)
1107 page_cache_release(znewpage2
);
1108 if (newpage1
!= NULL
)
1109 page_cache_release(newpage1
);
1110 if (newpage2
!= NULL
)
1111 page_cache_release(newpage2
);
1112 if (evictpage1
!= NULL
)
1113 zcache_free_page(evictpage1
);
1114 if (evictpage2
!= NULL
)
1115 zcache_free_page(evictpage2
);
1118 #endif /* CONFIG_ZCACHE_WRITEBACK */
1121 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1122 * but all puts (and thus all other operations that require memory allocation)
1123 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1124 * data consistency requires all puts while frozen to be converted into
1127 static bool zcache_freeze
;
1130 * This zcache shrinker interface reduces the number of ephemeral pageframes
1131 * used by zcache to approximately the same as the total number of LRU_FILE
1132 * pageframes in use, and now also reduces the number of persistent pageframes
1133 * used by zcache to approximately the same as the total number of LRU_ANON
1134 * pageframes in use. FIXME POLICY: Probably the writeback should only occur
1135 * if the eviction doesn't free enough pages.
1137 static int shrink_zcache_memory(struct shrinker
*shrink
,
1138 struct shrink_control
*sc
)
1140 static bool in_progress
;
1142 int nr
= sc
->nr_to_scan
;
1144 int nr_writeback
= 0;
1146 int file_pageframes_inuse
, anon_pageframes_inuse
;
1151 /* don't allow more than one eviction thread at a time */
1157 /* we are going to ignore nr, and target a different value */
1158 zcache_last_active_file_pageframes
=
1159 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1160 zcache_last_inactive_file_pageframes
=
1161 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1162 file_pageframes_inuse
= zcache_last_active_file_pageframes
+
1163 zcache_last_inactive_file_pageframes
;
1164 if (zcache_eph_pageframes
> file_pageframes_inuse
)
1165 nr_evict
= zcache_eph_pageframes
- file_pageframes_inuse
;
1168 while (nr_evict
-- > 0) {
1169 page
= zcache_evict_eph_pageframe();
1172 zcache_free_page(page
);
1175 zcache_last_active_anon_pageframes
=
1176 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_ANON
);
1177 zcache_last_inactive_anon_pageframes
=
1178 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_ANON
);
1179 anon_pageframes_inuse
= zcache_last_active_anon_pageframes
+
1180 zcache_last_inactive_anon_pageframes
;
1181 if (zcache_pers_pageframes
> anon_pageframes_inuse
)
1182 nr_writeback
= zcache_pers_pageframes
- anon_pageframes_inuse
;
1185 while (nr_writeback
-- > 0) {
1186 #ifdef CONFIG_ZCACHE_WRITEBACK
1188 writeback_ret
= zcache_frontswap_writeback();
1189 if (writeback_ret
== -ENOMEM
)
1193 in_progress
= false;
1196 /* resample: has changed, but maybe not all the way yet */
1197 zcache_last_active_file_pageframes
=
1198 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1199 zcache_last_inactive_file_pageframes
=
1200 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1201 ret
= zcache_eph_pageframes
- zcache_last_active_file_pageframes
+
1202 zcache_last_inactive_file_pageframes
;
1208 static struct shrinker zcache_shrinker
= {
1209 .shrink
= shrink_zcache_memory
,
1210 .seeks
= DEFAULT_SEEKS
,
1214 * zcache shims between cleancache/frontswap ops and tmem
1217 /* FIXME rename these core routines to zcache_tmemput etc? */
1218 int zcache_put_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1219 uint32_t index
, void *page
,
1220 unsigned int size
, bool raw
, int ephemeral
)
1222 struct tmem_pool
*pool
;
1223 struct tmem_handle th
;
1227 BUG_ON(!irqs_disabled());
1228 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1229 if (unlikely(pool
== NULL
))
1231 if (!zcache_freeze
) {
1233 th
.client_id
= cli_id
;
1234 th
.pool_id
= pool_id
;
1237 pampd
= zcache_pampd_create((char *)page
, size
, raw
,
1239 if (pampd
== NULL
) {
1242 inc_zcache_failed_eph_puts();
1244 inc_zcache_failed_pers_puts();
1246 if (ramster_enabled
)
1247 ramster_do_preload_flnode(pool
);
1248 ret
= tmem_put(pool
, oidp
, index
, 0, pampd
);
1252 zcache_put_pool(pool
);
1254 inc_zcache_put_to_flush();
1255 if (ramster_enabled
)
1256 ramster_do_preload_flnode(pool
);
1257 if (atomic_read(&pool
->obj_count
) > 0)
1258 /* the put fails whether the flush succeeds or not */
1259 (void)tmem_flush_page(pool
, oidp
, index
);
1260 zcache_put_pool(pool
);
1266 int zcache_get_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1267 uint32_t index
, void *page
,
1268 size_t *sizep
, bool raw
, int get_and_free
)
1270 struct tmem_pool
*pool
;
1275 BUG_ON(irqs_disabled());
1276 BUG_ON(in_softirq());
1278 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1279 eph
= is_ephemeral(pool
);
1280 if (likely(pool
!= NULL
)) {
1281 if (atomic_read(&pool
->obj_count
) > 0)
1282 ret
= tmem_get(pool
, oidp
, index
, (char *)(page
),
1283 sizep
, raw
, get_and_free
);
1284 zcache_put_pool(pool
);
1286 WARN_ONCE((!is_ephemeral(pool
) && (ret
!= 0)),
1287 "zcache_get fails on persistent pool, "
1288 "bad things are very likely to happen soon\n");
1289 #ifdef RAMSTER_TESTING
1290 if (ret
!= 0 && ret
!= -1 && !(ret
== -EINVAL
&& is_ephemeral(pool
)))
1291 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret
);
1296 int zcache_flush_page(int cli_id
, int pool_id
,
1297 struct tmem_oid
*oidp
, uint32_t index
)
1299 struct tmem_pool
*pool
;
1301 unsigned long flags
;
1303 local_irq_save(flags
);
1304 inc_zcache_flush_total();
1305 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1306 if (ramster_enabled
)
1307 ramster_do_preload_flnode(pool
);
1308 if (likely(pool
!= NULL
)) {
1309 if (atomic_read(&pool
->obj_count
) > 0)
1310 ret
= tmem_flush_page(pool
, oidp
, index
);
1311 zcache_put_pool(pool
);
1314 inc_zcache_flush_found();
1315 local_irq_restore(flags
);
1319 int zcache_flush_object(int cli_id
, int pool_id
,
1320 struct tmem_oid
*oidp
)
1322 struct tmem_pool
*pool
;
1324 unsigned long flags
;
1326 local_irq_save(flags
);
1327 inc_zcache_flobj_total();
1328 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1329 if (ramster_enabled
)
1330 ramster_do_preload_flnode(pool
);
1331 if (likely(pool
!= NULL
)) {
1332 if (atomic_read(&pool
->obj_count
) > 0)
1333 ret
= tmem_flush_object(pool
, oidp
);
1334 zcache_put_pool(pool
);
1337 inc_zcache_flobj_found();
1338 local_irq_restore(flags
);
1342 static int zcache_client_destroy_pool(int cli_id
, int pool_id
)
1344 struct tmem_pool
*pool
= NULL
;
1345 struct zcache_client
*cli
= NULL
;
1350 if (cli_id
== LOCAL_CLIENT
)
1352 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1353 cli
= &zcache_clients
[cli_id
];
1356 atomic_inc(&cli
->refcount
);
1357 pool
= cli
->tmem_pools
[pool_id
];
1360 cli
->tmem_pools
[pool_id
] = NULL
;
1361 /* wait for pool activity on other cpus to quiesce */
1362 while (atomic_read(&pool
->refcount
) != 0)
1364 atomic_dec(&cli
->refcount
);
1366 ret
= tmem_destroy_pool(pool
);
1369 if (cli_id
== LOCAL_CLIENT
)
1370 pr_info("%s: destroyed local pool id=%d\n", namestr
, pool_id
);
1372 pr_info("%s: destroyed pool id=%d, client=%d\n",
1373 namestr
, pool_id
, cli_id
);
1378 int zcache_new_pool(uint16_t cli_id
, uint32_t flags
)
1381 struct tmem_pool
*pool
;
1382 struct zcache_client
*cli
= NULL
;
1384 if (cli_id
== LOCAL_CLIENT
)
1386 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1387 cli
= &zcache_clients
[cli_id
];
1390 atomic_inc(&cli
->refcount
);
1391 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_ATOMIC
);
1395 for (poolid
= 0; poolid
< MAX_POOLS_PER_CLIENT
; poolid
++)
1396 if (cli
->tmem_pools
[poolid
] == NULL
)
1398 if (poolid
>= MAX_POOLS_PER_CLIENT
) {
1399 pr_info("%s: pool creation failed: max exceeded\n", namestr
);
1404 atomic_set(&pool
->refcount
, 0);
1406 pool
->pool_id
= poolid
;
1407 tmem_new_pool(pool
, flags
);
1408 cli
->tmem_pools
[poolid
] = pool
;
1409 if (cli_id
== LOCAL_CLIENT
)
1410 pr_info("%s: created %s local tmem pool, id=%d\n", namestr
,
1411 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1414 pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr
,
1415 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1419 atomic_dec(&cli
->refcount
);
1423 static int zcache_local_new_pool(uint32_t flags
)
1425 return zcache_new_pool(LOCAL_CLIENT
, flags
);
1428 int zcache_autocreate_pool(unsigned int cli_id
, unsigned int pool_id
, bool eph
)
1430 struct tmem_pool
*pool
;
1431 struct zcache_client
*cli
= NULL
;
1432 uint32_t flags
= eph
? 0 : TMEM_POOL_PERSIST
;
1435 BUG_ON(!ramster_enabled
);
1436 if (cli_id
== LOCAL_CLIENT
)
1438 if (pool_id
>= MAX_POOLS_PER_CLIENT
)
1440 if (cli_id
>= MAX_CLIENTS
)
1443 cli
= &zcache_clients
[cli_id
];
1444 if ((eph
&& disable_cleancache
) || (!eph
&& disable_frontswap
)) {
1445 pr_err("zcache_autocreate_pool: pool type disabled\n");
1448 if (!cli
->allocated
) {
1449 if (zcache_new_client(cli_id
)) {
1450 pr_err("zcache_autocreate_pool: can't create client\n");
1453 cli
= &zcache_clients
[cli_id
];
1455 atomic_inc(&cli
->refcount
);
1456 pool
= cli
->tmem_pools
[pool_id
];
1458 if (pool
->persistent
&& eph
) {
1459 pr_err("zcache_autocreate_pool: type mismatch\n");
1465 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_KERNEL
);
1469 atomic_set(&pool
->refcount
, 0);
1471 pool
->pool_id
= pool_id
;
1472 tmem_new_pool(pool
, flags
);
1473 cli
->tmem_pools
[pool_id
] = pool
;
1474 pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
1475 namestr
, flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1480 atomic_dec(&cli
->refcount
);
1485 * Two kernel functionalities currently can be layered on top of tmem.
1486 * These are "cleancache" which is used as a second-chance cache for clean
1487 * page cache pages; and "frontswap" which is used for swap pages
1488 * to avoid writes to disk. A generic "shim" is provided here for each
1489 * to translate in-kernel semantics to zcache semantics.
1492 static void zcache_cleancache_put_page(int pool_id
,
1493 struct cleancache_filekey key
,
1494 pgoff_t index
, struct page
*page
)
1496 u32 ind
= (u32
) index
;
1497 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1499 if (!disable_cleancache_ignore_nonactive
&& !PageWasActive(page
)) {
1500 inc_zcache_eph_nonactive_puts_ignored();
1503 if (likely(ind
== index
))
1504 (void)zcache_put_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1505 page
, PAGE_SIZE
, false, 1);
1508 static int zcache_cleancache_get_page(int pool_id
,
1509 struct cleancache_filekey key
,
1510 pgoff_t index
, struct page
*page
)
1512 u32 ind
= (u32
) index
;
1513 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1517 if (likely(ind
== index
)) {
1518 ret
= zcache_get_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1519 page
, &size
, false, 0);
1520 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1522 SetPageWasActive(page
);
1527 static void zcache_cleancache_flush_page(int pool_id
,
1528 struct cleancache_filekey key
,
1531 u32 ind
= (u32
) index
;
1532 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1534 if (likely(ind
== index
))
1535 (void)zcache_flush_page(LOCAL_CLIENT
, pool_id
, &oid
, ind
);
1538 static void zcache_cleancache_flush_inode(int pool_id
,
1539 struct cleancache_filekey key
)
1541 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1543 (void)zcache_flush_object(LOCAL_CLIENT
, pool_id
, &oid
);
1546 static void zcache_cleancache_flush_fs(int pool_id
)
1549 (void)zcache_client_destroy_pool(LOCAL_CLIENT
, pool_id
);
1552 static int zcache_cleancache_init_fs(size_t pagesize
)
1554 BUG_ON(sizeof(struct cleancache_filekey
) !=
1555 sizeof(struct tmem_oid
));
1556 BUG_ON(pagesize
!= PAGE_SIZE
);
1557 return zcache_local_new_pool(0);
1560 static int zcache_cleancache_init_shared_fs(char *uuid
, size_t pagesize
)
1562 /* shared pools are unsupported and map to private */
1563 BUG_ON(sizeof(struct cleancache_filekey
) !=
1564 sizeof(struct tmem_oid
));
1565 BUG_ON(pagesize
!= PAGE_SIZE
);
1566 return zcache_local_new_pool(0);
1569 static struct cleancache_ops zcache_cleancache_ops
= {
1570 .put_page
= zcache_cleancache_put_page
,
1571 .get_page
= zcache_cleancache_get_page
,
1572 .invalidate_page
= zcache_cleancache_flush_page
,
1573 .invalidate_inode
= zcache_cleancache_flush_inode
,
1574 .invalidate_fs
= zcache_cleancache_flush_fs
,
1575 .init_shared_fs
= zcache_cleancache_init_shared_fs
,
1576 .init_fs
= zcache_cleancache_init_fs
1579 struct cleancache_ops
zcache_cleancache_register_ops(void)
1581 struct cleancache_ops old_ops
=
1582 cleancache_register_ops(&zcache_cleancache_ops
);
1587 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1588 static int zcache_frontswap_poolid __read_mostly
= -1;
1591 * Swizzling increases objects per swaptype, increasing tmem concurrency
1592 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1593 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1594 * frontswap_get_page(), but has side-effects. Hence using 8.
1597 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1598 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1599 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1601 static inline struct tmem_oid
oswiz(unsigned type
, u32 ind
)
1603 struct tmem_oid oid
= { .oid
= { 0 } };
1604 oid
.oid
[0] = _oswiz(type
, ind
);
1608 #ifdef CONFIG_ZCACHE_WRITEBACK
1609 static void unswiz(struct tmem_oid oid
, u32 index
,
1610 unsigned *type
, pgoff_t
*offset
)
1612 *type
= (unsigned)(oid
.oid
[0] >> SWIZ_BITS
);
1613 *offset
= (pgoff_t
)((index
<< SWIZ_BITS
) |
1614 (oid
.oid
[0] & SWIZ_MASK
));
1618 static int zcache_frontswap_put_page(unsigned type
, pgoff_t offset
,
1621 u64 ind64
= (u64
)offset
;
1622 u32 ind
= (u32
)offset
;
1623 struct tmem_oid oid
= oswiz(type
, ind
);
1625 unsigned long flags
;
1627 BUG_ON(!PageLocked(page
));
1628 if (!disable_frontswap_ignore_nonactive
&& !PageWasActive(page
)) {
1629 inc_zcache_pers_nonactive_puts_ignored();
1633 if (likely(ind64
== ind
)) {
1634 local_irq_save(flags
);
1635 ret
= zcache_put_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1637 page
, PAGE_SIZE
, false, 0);
1638 local_irq_restore(flags
);
1644 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1645 * was not present (should never happen!) */
1646 static int zcache_frontswap_get_page(unsigned type
, pgoff_t offset
,
1649 u64 ind64
= (u64
)offset
;
1650 u32 ind
= (u32
)offset
;
1651 struct tmem_oid oid
= oswiz(type
, ind
);
1653 int ret
= -1, get_and_free
;
1655 if (frontswap_has_exclusive_gets
)
1659 BUG_ON(!PageLocked(page
));
1660 if (likely(ind64
== ind
)) {
1661 ret
= zcache_get_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1663 page
, &size
, false, get_and_free
);
1664 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1669 /* flush a single page from frontswap */
1670 static void zcache_frontswap_flush_page(unsigned type
, pgoff_t offset
)
1672 u64 ind64
= (u64
)offset
;
1673 u32 ind
= (u32
)offset
;
1674 struct tmem_oid oid
= oswiz(type
, ind
);
1676 if (likely(ind64
== ind
))
1677 (void)zcache_flush_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1681 /* flush all pages from the passed swaptype */
1682 static void zcache_frontswap_flush_area(unsigned type
)
1684 struct tmem_oid oid
;
1687 for (ind
= SWIZ_MASK
; ind
>= 0; ind
--) {
1688 oid
= oswiz(type
, ind
);
1689 (void)zcache_flush_object(LOCAL_CLIENT
,
1690 zcache_frontswap_poolid
, &oid
);
1694 static void zcache_frontswap_init(unsigned ignored
)
1696 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1697 if (zcache_frontswap_poolid
< 0)
1698 zcache_frontswap_poolid
=
1699 zcache_local_new_pool(TMEM_POOL_PERSIST
);
1702 static struct frontswap_ops zcache_frontswap_ops
= {
1703 .store
= zcache_frontswap_put_page
,
1704 .load
= zcache_frontswap_get_page
,
1705 .invalidate_page
= zcache_frontswap_flush_page
,
1706 .invalidate_area
= zcache_frontswap_flush_area
,
1707 .init
= zcache_frontswap_init
1710 struct frontswap_ops
zcache_frontswap_register_ops(void)
1712 struct frontswap_ops old_ops
=
1713 frontswap_register_ops(&zcache_frontswap_ops
);
1719 * zcache initialization
1720 * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
1721 * OR NOTHING HAPPENS!
1724 static int __init
enable_zcache(char *s
)
1726 zcache_enabled
= true;
1729 __setup("zcache", enable_zcache
);
1731 static int __init
enable_ramster(char *s
)
1733 zcache_enabled
= true;
1734 #ifdef CONFIG_RAMSTER
1735 ramster_enabled
= true;
1739 __setup("ramster", enable_ramster
);
1741 /* allow independent dynamic disabling of cleancache and frontswap */
1743 static int __init
no_cleancache(char *s
)
1745 disable_cleancache
= true;
1749 __setup("nocleancache", no_cleancache
);
1751 static int __init
no_frontswap(char *s
)
1753 disable_frontswap
= true;
1757 __setup("nofrontswap", no_frontswap
);
1759 static int __init
no_frontswap_exclusive_gets(char *s
)
1761 frontswap_has_exclusive_gets
= false;
1765 __setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets
);
1767 static int __init
no_frontswap_ignore_nonactive(char *s
)
1769 disable_frontswap_ignore_nonactive
= true;
1773 __setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive
);
1775 static int __init
no_cleancache_ignore_nonactive(char *s
)
1777 disable_cleancache_ignore_nonactive
= true;
1781 __setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive
);
1783 static int __init
enable_zcache_compressor(char *s
)
1785 strlcpy(zcache_comp_name
, s
, sizeof(zcache_comp_name
));
1786 zcache_enabled
= true;
1789 __setup("zcache=", enable_zcache_compressor
);
1792 static int __init
zcache_comp_init(void)
1796 /* check crypto algorithm */
1797 if (*zcache_comp_name
!= '\0') {
1798 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1800 pr_info("zcache: %s not supported\n",
1804 strcpy(zcache_comp_name
, "lzo");
1805 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1810 pr_info("zcache: using %s compressor\n", zcache_comp_name
);
1812 /* alloc percpu transforms */
1814 zcache_comp_pcpu_tfms
= alloc_percpu(struct crypto_comp
*);
1815 if (!zcache_comp_pcpu_tfms
)
1821 static int __init
zcache_init(void)
1825 if (ramster_enabled
) {
1826 namestr
= "ramster";
1827 ramster_register_pamops(&zcache_pamops
);
1829 zcache_debugfs_init();
1830 if (zcache_enabled
) {
1833 tmem_register_hostops(&zcache_hostops
);
1834 tmem_register_pamops(&zcache_pamops
);
1835 ret
= register_cpu_notifier(&zcache_cpu_notifier_block
);
1837 pr_err("%s: can't register cpu notifier\n", namestr
);
1840 ret
= zcache_comp_init();
1842 pr_err("%s: compressor initialization failed\n",
1846 for_each_online_cpu(cpu
) {
1847 void *pcpu
= (void *)(long)cpu
;
1848 zcache_cpu_notifier(&zcache_cpu_notifier_block
,
1849 CPU_UP_PREPARE
, pcpu
);
1852 zcache_objnode_cache
= kmem_cache_create("zcache_objnode",
1853 sizeof(struct tmem_objnode
), 0, 0, NULL
);
1854 zcache_obj_cache
= kmem_cache_create("zcache_obj",
1855 sizeof(struct tmem_obj
), 0, 0, NULL
);
1856 ret
= zcache_new_client(LOCAL_CLIENT
);
1858 pr_err("%s: can't create client\n", namestr
);
1862 if (zcache_enabled
&& !disable_cleancache
) {
1863 struct cleancache_ops old_ops
;
1865 register_shrinker(&zcache_shrinker
);
1866 old_ops
= zcache_cleancache_register_ops();
1867 pr_info("%s: cleancache enabled using kernel transcendent "
1868 "memory and compression buddies\n", namestr
);
1869 #ifdef CONFIG_ZCACHE_DEBUG
1870 pr_info("%s: cleancache: ignorenonactive = %d\n",
1871 namestr
, !disable_cleancache_ignore_nonactive
);
1873 if (old_ops
.init_fs
!= NULL
)
1874 pr_warn("%s: cleancache_ops overridden\n", namestr
);
1876 if (zcache_enabled
&& !disable_frontswap
) {
1877 struct frontswap_ops old_ops
;
1879 old_ops
= zcache_frontswap_register_ops();
1880 if (frontswap_has_exclusive_gets
)
1881 frontswap_tmem_exclusive_gets(true);
1882 pr_info("%s: frontswap enabled using kernel transcendent "
1883 "memory and compression buddies\n", namestr
);
1884 #ifdef CONFIG_ZCACHE_DEBUG
1885 pr_info("%s: frontswap: excl gets = %d active only = %d\n",
1886 namestr
, frontswap_has_exclusive_gets
,
1887 !disable_frontswap_ignore_nonactive
);
1889 if (old_ops
.init
!= NULL
)
1890 pr_warn("%s: frontswap_ops overridden\n", namestr
);
1892 if (ramster_enabled
)
1893 ramster_init(!disable_cleancache
, !disable_frontswap
,
1894 frontswap_has_exclusive_gets
);
1899 late_initcall(zcache_init
);