4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
9 * lzo1x compression to improve density and an embedded allocator called
10 * "zbud" which "buddies" two compressed pages semi-optimally in each physical
11 * pageframe. Zbud is integrally tied into tmem to allow pageframes to
12 * be "reclaimed" efficiently.
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/highmem.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/atomic.h>
24 #include <linux/math64.h>
25 #include <linux/crypto.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/pagemap.h>
29 #include <linux/writeback.h>
31 #include <linux/cleancache.h>
32 #include <linux/frontswap.h>
39 static bool ramster_enabled __read_mostly
;
41 #define ramster_enabled false
44 #ifndef __PG_WAS_ACTIVE
45 static inline bool PageWasActive(struct page
*page
)
50 static inline void SetPageWasActive(struct page
*page
)
55 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
56 static bool frontswap_has_exclusive_gets __read_mostly
= true;
58 static bool frontswap_has_exclusive_gets __read_mostly
;
59 static inline void frontswap_tmem_exclusive_gets(bool b
)
65 * mark pampd to special value in order that later
66 * retrieve will identify zero-filled pages
68 #define ZERO_FILLED 0x2
70 /* enable (or fix code) when Seth's patches are accepted upstream */
71 #define zcache_writeback_enabled 0
73 static bool zcache_enabled __read_mostly
;
74 static bool disable_cleancache __read_mostly
;
75 static bool disable_frontswap __read_mostly
;
76 static bool disable_frontswap_ignore_nonactive __read_mostly
;
77 static bool disable_cleancache_ignore_nonactive __read_mostly
;
78 static char *namestr __read_mostly
= "zcache";
80 #define ZCACHE_GFP_MASK \
81 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
83 /* crypto API for zcache */
84 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
85 static char zcache_comp_name
[ZCACHE_COMP_NAME_SZ
] __read_mostly
;
86 static struct crypto_comp
* __percpu
*zcache_comp_pcpu_tfms __read_mostly
;
89 ZCACHE_COMPOP_COMPRESS
,
90 ZCACHE_COMPOP_DECOMPRESS
93 static inline int zcache_comp_op(enum comp_op op
,
94 const u8
*src
, unsigned int slen
,
95 u8
*dst
, unsigned int *dlen
)
97 struct crypto_comp
*tfm
;
100 BUG_ON(!zcache_comp_pcpu_tfms
);
101 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, get_cpu());
104 case ZCACHE_COMPOP_COMPRESS
:
105 ret
= crypto_comp_compress(tfm
, src
, slen
, dst
, dlen
);
107 case ZCACHE_COMPOP_DECOMPRESS
:
108 ret
= crypto_comp_decompress(tfm
, src
, slen
, dst
, dlen
);
122 * byte count defining poor compression; pages with greater zsize will be
125 static unsigned int zbud_max_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 7;
127 * byte count defining poor *mean* compression; pages with greater zsize
128 * will be rejected until sufficient better-compressed pages are accepted
129 * driving the mean below this threshold
131 static unsigned int zbud_max_mean_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 5;
134 * for now, used named slabs so can easily track usage; later can
135 * either just use kmalloc, or perhaps add a slab-like allocator
136 * to more carefully manage total memory utilization
138 static struct kmem_cache
*zcache_objnode_cache
;
139 static struct kmem_cache
*zcache_obj_cache
;
141 static DEFINE_PER_CPU(struct zcache_preload
, zcache_preloads
) = { 0, };
143 /* Used by debug.c */
144 ssize_t zcache_pers_zpages
;
145 u64 zcache_pers_zbytes
;
146 ssize_t zcache_eph_pageframes
;
147 ssize_t zcache_pers_pageframes
;
149 /* Used by this code. */
150 ssize_t zcache_last_active_file_pageframes
;
151 ssize_t zcache_last_inactive_file_pageframes
;
152 ssize_t zcache_last_active_anon_pageframes
;
153 ssize_t zcache_last_inactive_anon_pageframes
;
154 #ifdef CONFIG_ZCACHE_WRITEBACK
155 ssize_t zcache_writtenback_pages
;
156 ssize_t zcache_outstanding_writeback_pages
;
159 * zcache core code starts here
162 static struct zcache_client zcache_host
;
163 static struct zcache_client zcache_clients
[MAX_CLIENTS
];
165 static inline bool is_local_client(struct zcache_client
*cli
)
167 return cli
== &zcache_host
;
170 static struct zcache_client
*zcache_get_client_by_id(uint16_t cli_id
)
172 struct zcache_client
*cli
= &zcache_host
;
174 if (cli_id
!= LOCAL_CLIENT
) {
175 if (cli_id
>= MAX_CLIENTS
)
177 cli
= &zcache_clients
[cli_id
];
184 * Tmem operations assume the poolid implies the invoking client.
185 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
186 * RAMster has each client numbered by cluster node, and a KVM version
187 * of zcache would have one client per guest and each client might
190 struct tmem_pool
*zcache_get_pool_by_id(uint16_t cli_id
, uint16_t poolid
)
192 struct tmem_pool
*pool
= NULL
;
193 struct zcache_client
*cli
= NULL
;
195 cli
= zcache_get_client_by_id(cli_id
);
198 if (!is_local_client(cli
))
199 atomic_inc(&cli
->refcount
);
200 if (poolid
< MAX_POOLS_PER_CLIENT
) {
201 pool
= cli
->tmem_pools
[poolid
];
203 atomic_inc(&pool
->refcount
);
209 void zcache_put_pool(struct tmem_pool
*pool
)
211 struct zcache_client
*cli
= NULL
;
216 atomic_dec(&pool
->refcount
);
217 if (!is_local_client(cli
))
218 atomic_dec(&cli
->refcount
);
221 int zcache_new_client(uint16_t cli_id
)
223 struct zcache_client
*cli
;
226 cli
= zcache_get_client_by_id(cli_id
);
238 * zcache implementation for tmem host ops
241 static struct tmem_objnode
*zcache_objnode_alloc(struct tmem_pool
*pool
)
243 struct tmem_objnode
*objnode
= NULL
;
244 struct zcache_preload
*kp
;
247 kp
= &__get_cpu_var(zcache_preloads
);
248 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
249 objnode
= kp
->objnodes
[i
];
250 if (objnode
!= NULL
) {
251 kp
->objnodes
[i
] = NULL
;
255 BUG_ON(objnode
== NULL
);
256 inc_zcache_objnode_count();
260 static void zcache_objnode_free(struct tmem_objnode
*objnode
,
261 struct tmem_pool
*pool
)
263 dec_zcache_objnode_count();
264 kmem_cache_free(zcache_objnode_cache
, objnode
);
267 static struct tmem_obj
*zcache_obj_alloc(struct tmem_pool
*pool
)
269 struct tmem_obj
*obj
= NULL
;
270 struct zcache_preload
*kp
;
272 kp
= &__get_cpu_var(zcache_preloads
);
276 inc_zcache_obj_count();
280 static void zcache_obj_free(struct tmem_obj
*obj
, struct tmem_pool
*pool
)
282 dec_zcache_obj_count();
283 kmem_cache_free(zcache_obj_cache
, obj
);
287 * Compressing zero-filled pages will waste memory and introduce
288 * serious fragmentation, skip it to avoid overhead.
290 static bool page_is_zero_filled(struct page
*p
)
295 page
= kmap_atomic(p
);
296 for (pos
= 0; pos
< PAGE_SIZE
/ sizeof(*page
); pos
++) {
307 static void handle_zero_filled_page(void *p
)
310 struct page
*page
= (struct page
*)p
;
312 user_mem
= kmap_atomic(page
);
313 memset(user_mem
, 0, PAGE_SIZE
);
314 kunmap_atomic(user_mem
);
316 flush_dcache_page(page
);
319 static struct tmem_hostops zcache_hostops
= {
320 .obj_alloc
= zcache_obj_alloc
,
321 .obj_free
= zcache_obj_free
,
322 .objnode_alloc
= zcache_objnode_alloc
,
323 .objnode_free
= zcache_objnode_free
,
326 static struct page
*zcache_alloc_page(void)
328 struct page
*page
= alloc_page(ZCACHE_GFP_MASK
);
331 inc_zcache_pageframes_alloced();
335 static void zcache_free_page(struct page
*page
)
337 long curr_pageframes
;
338 static long max_pageframes
, min_pageframes
;
343 inc_zcache_pageframes_freed();
344 curr_pageframes
= curr_pageframes_count();
345 if (curr_pageframes
> max_pageframes
)
346 max_pageframes
= curr_pageframes
;
347 if (curr_pageframes
< min_pageframes
)
348 min_pageframes
= curr_pageframes
;
349 #ifdef CONFIG_ZCACHE_DEBUG
350 if (curr_pageframes
> 2L || curr_pageframes
< -2L) {
357 * zcache implementations for PAM page descriptor ops
360 /* forward reference */
361 static void zcache_compress(struct page
*from
,
362 void **out_va
, unsigned *out_len
);
364 static struct page
*zcache_evict_eph_pageframe(void);
366 static void *zcache_pampd_eph_create(char *data
, size_t size
, bool raw
,
367 struct tmem_handle
*th
)
369 void *pampd
= NULL
, *cdata
= data
;
370 unsigned clen
= size
;
371 bool zero_filled
= false;
372 struct page
*page
= (struct page
*)(data
), *newpage
;
374 if (page_is_zero_filled(page
)) {
381 zcache_compress(page
, &cdata
, &clen
);
382 if (clen
> zbud_max_buddy_size()) {
383 inc_zcache_compress_poor();
387 BUG_ON(clen
> zbud_max_buddy_size());
390 /* look for space via an existing match first */
391 pampd
= (void *)zbud_match_prep(th
, true, cdata
, clen
);
395 /* no match, now we need to find (or free up) a full page */
396 newpage
= zcache_alloc_page();
398 goto create_in_new_page
;
400 inc_zcache_failed_getfreepages();
401 /* can't allocate a page, evict an ephemeral page via LRU */
402 newpage
= zcache_evict_eph_pageframe();
403 if (newpage
== NULL
) {
404 inc_zcache_eph_ate_tail_failed();
407 inc_zcache_eph_ate_tail();
410 pampd
= (void *)zbud_create_prep(th
, true, cdata
, clen
, newpage
);
411 BUG_ON(pampd
== NULL
);
412 inc_zcache_eph_pageframes();
415 inc_zcache_eph_zbytes(clen
);
416 inc_zcache_eph_zpages();
417 if (ramster_enabled
&& raw
)
418 ramster_count_foreign_pages(true, 1);
420 pampd
= (void *)ZERO_FILLED
;
425 static void *zcache_pampd_pers_create(char *data
, size_t size
, bool raw
,
426 struct tmem_handle
*th
)
428 void *pampd
= NULL
, *cdata
= data
;
429 unsigned clen
= size
;
430 bool zero_filled
= false;
431 struct page
*page
= (struct page
*)(data
), *newpage
;
432 unsigned long zbud_mean_zsize
;
433 unsigned long curr_pers_zpages
, total_zsize
;
436 BUG_ON(!ramster_enabled
);
440 if (page_is_zero_filled(page
)) {
446 curr_pers_zpages
= zcache_pers_zpages
;
447 /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
449 zcache_compress(page
, &cdata
, &clen
);
450 /* reject if compression is too poor */
451 if (clen
> zbud_max_zsize
) {
452 inc_zcache_compress_poor();
455 /* reject if mean compression is too poor */
456 if ((clen
> zbud_max_mean_zsize
) && (curr_pers_zpages
> 0)) {
457 total_zsize
= zcache_pers_zbytes
;
458 if ((long)total_zsize
< 0)
460 zbud_mean_zsize
= div_u64(total_zsize
,
462 if (zbud_mean_zsize
> zbud_max_mean_zsize
) {
463 inc_zcache_mean_compress_poor();
469 /* look for space via an existing match first */
470 pampd
= (void *)zbud_match_prep(th
, false, cdata
, clen
);
474 /* no match, now we need to find (or free up) a full page */
475 newpage
= zcache_alloc_page();
477 goto create_in_new_page
;
479 * FIXME do the following only if eph is oversized?
480 * if (zcache_eph_pageframes >
481 * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
482 * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
484 inc_zcache_failed_getfreepages();
485 /* can't allocate a page, evict an ephemeral page via LRU */
486 newpage
= zcache_evict_eph_pageframe();
487 if (newpage
== NULL
) {
488 inc_zcache_pers_ate_eph_failed();
491 inc_zcache_pers_ate_eph();
494 pampd
= (void *)zbud_create_prep(th
, false, cdata
, clen
, newpage
);
495 BUG_ON(pampd
== NULL
);
496 inc_zcache_pers_pageframes();
499 inc_zcache_pers_zpages();
500 inc_zcache_pers_zbytes(clen
);
501 if (ramster_enabled
&& raw
)
502 ramster_count_foreign_pages(false, 1);
504 pampd
= (void *)ZERO_FILLED
;
510 * This is called directly from zcache_put_page to pre-allocate space
513 void *zcache_pampd_create(char *data
, unsigned int size
, bool raw
,
514 int eph
, struct tmem_handle
*th
)
517 struct zcache_preload
*kp
;
518 struct tmem_objnode
*objnode
;
519 struct tmem_obj
*obj
;
522 BUG_ON(!irqs_disabled());
523 /* pre-allocate per-cpu metadata */
524 BUG_ON(zcache_objnode_cache
== NULL
);
525 BUG_ON(zcache_obj_cache
== NULL
);
526 kp
= &__get_cpu_var(zcache_preloads
);
527 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
528 objnode
= kp
->objnodes
[i
];
529 if (objnode
== NULL
) {
530 objnode
= kmem_cache_alloc(zcache_objnode_cache
,
532 if (unlikely(objnode
== NULL
)) {
533 inc_zcache_failed_alloc();
536 kp
->objnodes
[i
] = objnode
;
539 if (kp
->obj
== NULL
) {
540 obj
= kmem_cache_alloc(zcache_obj_cache
, ZCACHE_GFP_MASK
);
543 if (unlikely(kp
->obj
== NULL
)) {
544 inc_zcache_failed_alloc();
548 * ok, have all the metadata pre-allocated, now do the data
549 * but since how we allocate the data is dependent on ephemeral
550 * or persistent, we split the call here to different sub-functions
553 pampd
= zcache_pampd_eph_create(data
, size
, raw
, th
);
555 pampd
= zcache_pampd_pers_create(data
, size
, raw
, th
);
561 * This is a pamops called via tmem_put and is necessary to "finish"
564 void zcache_pampd_create_finish(void *pampd
, bool eph
)
566 if (pampd
!= (void *)ZERO_FILLED
)
567 zbud_create_finish((struct zbudref
*)pampd
, eph
);
571 * This is passed as a function parameter to zbud_decompress so that
572 * zbud need not be familiar with the details of crypto. It assumes that
573 * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
574 * kmapped. It must be successful, else there is a logic bug somewhere.
576 static void zcache_decompress(char *from_va
, unsigned int size
, char *to_va
)
579 unsigned int outlen
= PAGE_SIZE
;
581 ret
= zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS
, from_va
, size
,
584 BUG_ON(outlen
!= PAGE_SIZE
);
588 * Decompress from the kernel va to a pageframe
590 void zcache_decompress_to_page(char *from_va
, unsigned int size
,
591 struct page
*to_page
)
593 char *to_va
= kmap_atomic(to_page
);
594 zcache_decompress(from_va
, size
, to_va
);
595 kunmap_atomic(to_va
);
599 * fill the pageframe corresponding to the struct page with the data
600 * from the passed pampd
602 static int zcache_pampd_get_data(char *data
, size_t *sizep
, bool raw
,
603 void *pampd
, struct tmem_pool
*pool
,
604 struct tmem_oid
*oid
, uint32_t index
)
607 bool eph
= !is_persistent(pool
);
609 BUG_ON(preemptible());
610 BUG_ON(eph
); /* fix later if shared pools get implemented */
611 BUG_ON(pampd_is_remote(pampd
));
613 if (pampd
== (void *)ZERO_FILLED
) {
614 handle_zero_filled_page(data
);
621 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
624 ret
= zbud_decompress((struct page
*)(data
),
625 (struct zbudref
*)pampd
, false,
633 * fill the pageframe corresponding to the struct page with the data
634 * from the passed pampd
636 static int zcache_pampd_get_data_and_free(char *data
, size_t *sizep
, bool raw
,
637 void *pampd
, struct tmem_pool
*pool
,
638 struct tmem_oid
*oid
, uint32_t index
)
641 bool eph
= !is_persistent(pool
), zero_filled
= false;
642 struct page
*page
= NULL
;
643 unsigned int zsize
, zpages
;
645 BUG_ON(preemptible());
646 BUG_ON(pampd_is_remote(pampd
));
648 if (pampd
== (void *)ZERO_FILLED
) {
649 handle_zero_filled_page(data
);
659 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
662 ret
= zbud_decompress((struct page
*)(data
),
663 (struct zbudref
*)pampd
, eph
,
667 page
= zbud_free_and_delist((struct zbudref
*)pampd
, eph
,
672 dec_zcache_eph_pageframes();
673 dec_zcache_eph_zpages(zpages
);
674 dec_zcache_eph_zbytes(zsize
);
677 dec_zcache_pers_pageframes();
678 dec_zcache_pers_zpages(zpages
);
679 dec_zcache_pers_zbytes(zsize
);
681 if (!is_local_client(pool
->client
))
682 ramster_count_foreign_pages(eph
, -1);
683 if (page
&& !zero_filled
)
684 zcache_free_page(page
);
689 * free the pampd and remove it from any zcache lists
690 * pampd must no longer be pointed to from any tmem data structures!
692 static void zcache_pampd_free(void *pampd
, struct tmem_pool
*pool
,
693 struct tmem_oid
*oid
, uint32_t index
, bool acct
)
695 struct page
*page
= NULL
;
696 unsigned int zsize
, zpages
;
697 bool zero_filled
= false;
699 BUG_ON(preemptible());
701 if (pampd
== (void *)ZERO_FILLED
) {
707 if (pampd_is_remote(pampd
) && !zero_filled
) {
708 BUG_ON(!ramster_enabled
);
709 pampd
= ramster_pampd_free(pampd
, pool
, oid
, index
, acct
);
713 if (is_ephemeral(pool
)) {
715 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
716 true, &zsize
, &zpages
);
718 dec_zcache_eph_pageframes();
719 dec_zcache_eph_zpages(zpages
);
720 dec_zcache_eph_zbytes(zsize
);
721 /* FIXME CONFIG_RAMSTER... check acct parameter? */
724 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
725 false, &zsize
, &zpages
);
727 dec_zcache_pers_pageframes();
728 dec_zcache_pers_zpages(zpages
);
729 dec_zcache_pers_zbytes(zsize
);
731 if (!is_local_client(pool
->client
))
732 ramster_count_foreign_pages(is_ephemeral(pool
), -1);
733 if (page
&& !zero_filled
)
734 zcache_free_page(page
);
737 static struct tmem_pamops zcache_pamops
= {
738 .create_finish
= zcache_pampd_create_finish
,
739 .get_data
= zcache_pampd_get_data
,
740 .get_data_and_free
= zcache_pampd_get_data_and_free
,
741 .free
= zcache_pampd_free
,
745 * zcache compression/decompression and related per-cpu stuff
748 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem
);
749 #define ZCACHE_DSTMEM_ORDER 1
751 static void zcache_compress(struct page
*from
, void **out_va
, unsigned *out_len
)
754 unsigned char *dmem
= __get_cpu_var(zcache_dstmem
);
757 BUG_ON(!irqs_disabled());
758 /* no buffer or no compressor so can't compress */
759 BUG_ON(dmem
== NULL
);
760 *out_len
= PAGE_SIZE
<< ZCACHE_DSTMEM_ORDER
;
761 from_va
= kmap_atomic(from
);
763 ret
= zcache_comp_op(ZCACHE_COMPOP_COMPRESS
, from_va
, PAGE_SIZE
, dmem
,
767 kunmap_atomic(from_va
);
770 static int zcache_comp_cpu_up(int cpu
)
772 struct crypto_comp
*tfm
;
774 tfm
= crypto_alloc_comp(zcache_comp_name
, 0, 0);
777 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = tfm
;
781 static void zcache_comp_cpu_down(int cpu
)
783 struct crypto_comp
*tfm
;
785 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
);
786 crypto_free_comp(tfm
);
787 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = NULL
;
790 static int zcache_cpu_notifier(struct notifier_block
*nb
,
791 unsigned long action
, void *pcpu
)
793 int ret
, i
, cpu
= (long)pcpu
;
794 struct zcache_preload
*kp
;
798 ret
= zcache_comp_cpu_up(cpu
);
799 if (ret
!= NOTIFY_OK
) {
800 pr_err("%s: can't allocate compressor xform\n",
804 per_cpu(zcache_dstmem
, cpu
) = (void *)__get_free_pages(
805 GFP_KERNEL
| __GFP_REPEAT
, ZCACHE_DSTMEM_ORDER
);
810 case CPU_UP_CANCELED
:
811 zcache_comp_cpu_down(cpu
);
812 free_pages((unsigned long)per_cpu(zcache_dstmem
, cpu
),
813 ZCACHE_DSTMEM_ORDER
);
814 per_cpu(zcache_dstmem
, cpu
) = NULL
;
815 kp
= &per_cpu(zcache_preloads
, cpu
);
816 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
818 kmem_cache_free(zcache_objnode_cache
,
822 kmem_cache_free(zcache_obj_cache
, kp
->obj
);
826 ramster_cpu_down(cpu
);
834 static struct notifier_block zcache_cpu_notifier_block
= {
835 .notifier_call
= zcache_cpu_notifier
839 * The following code interacts with the zbud eviction and zbud
840 * zombify code to access LRU pages
843 static struct page
*zcache_evict_eph_pageframe(void)
846 unsigned int zsize
= 0, zpages
= 0;
848 page
= zbud_evict_pageframe_lru(&zsize
, &zpages
);
851 dec_zcache_eph_zbytes(zsize
);
852 dec_zcache_eph_zpages(zpages
);
853 inc_zcache_evicted_eph_zpages(zpages
);
854 dec_zcache_eph_pageframes();
855 inc_zcache_evicted_eph_pageframes();
860 #ifdef CONFIG_ZCACHE_WRITEBACK
862 static atomic_t zcache_outstanding_writeback_pages_atomic
= ATOMIC_INIT(0);
864 static inline void inc_zcache_outstanding_writeback_pages(void)
866 zcache_outstanding_writeback_pages
=
867 atomic_inc_return(&zcache_outstanding_writeback_pages_atomic
);
869 static inline void dec_zcache_outstanding_writeback_pages(void)
871 zcache_outstanding_writeback_pages
=
872 atomic_dec_return(&zcache_outstanding_writeback_pages_atomic
);
874 static void unswiz(struct tmem_oid oid
, u32 index
,
875 unsigned *type
, pgoff_t
*offset
);
878 * Choose an LRU persistent pageframe and attempt to write it back to
879 * the backing swap disk by calling frontswap_writeback on both zpages.
881 * This is work-in-progress.
884 static void zcache_end_swap_write(struct bio
*bio
, int err
)
886 end_swap_bio_write(bio
, err
);
887 dec_zcache_outstanding_writeback_pages();
888 zcache_writtenback_pages
++;
892 * zcache_get_swap_cache_page
894 * This is an adaption of read_swap_cache_async()
896 * If success, page is returned in retpage
897 * Returns 0 if page was already in the swap cache, page is not locked
898 * Returns 1 if the new page needs to be populated, page is locked
900 static int zcache_get_swap_cache_page(int type
, pgoff_t offset
,
901 struct page
*new_page
)
903 struct page
*found_page
;
904 swp_entry_t entry
= swp_entry(type
, offset
);
907 BUG_ON(new_page
== NULL
);
910 * First check the swap cache. Since this is normally
911 * called after lookup_swap_cache() failed, re-calling
912 * that would confuse statistics.
914 found_page
= find_get_page(&swapper_space
, entry
.val
);
919 * call radix_tree_preload() while we can wait.
921 err
= radix_tree_preload(GFP_KERNEL
);
926 * Swap entry may have been freed since our caller observed it.
928 err
= swapcache_prepare(entry
);
929 if (err
== -EEXIST
) { /* seems racy */
930 radix_tree_preload_end();
933 if (err
) { /* swp entry is obsolete ? */
934 radix_tree_preload_end();
938 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
939 __set_page_locked(new_page
);
940 SetPageSwapBacked(new_page
);
941 err
= __add_to_swap_cache(new_page
, entry
);
943 radix_tree_preload_end();
944 lru_cache_add_anon(new_page
);
947 radix_tree_preload_end();
948 ClearPageSwapBacked(new_page
);
949 __clear_page_locked(new_page
);
951 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
952 * clear SWAP_HAS_CACHE flag.
954 swapcache_free(entry
, NULL
);
955 /* FIXME: is it possible to get here without err==-ENOMEM?
956 * If not, we can dispense with the do loop, use goto retry */
957 } while (err
!= -ENOMEM
);
963 * Given a frontswap zpage in zcache (identified by type/offset) and
964 * an empty page, put the page into the swap cache, use frontswap
965 * to get the page from zcache into the empty page, then give it
966 * to the swap subsystem to send to disk (carefully avoiding the
967 * possibility that frontswap might snatch it back).
968 * Returns < 0 if error, 0 if successful, and 1 if successful but
969 * the newpage passed in not needed and should be freed.
971 static int zcache_frontswap_writeback_zpage(int type
, pgoff_t offset
,
972 struct page
*newpage
)
974 struct page
*page
= newpage
;
976 struct writeback_control wbc
= {
977 .sync_mode
= WB_SYNC_NONE
,
980 ret
= zcache_get_swap_cache_page(type
, offset
, page
);
984 /* more uptodate page is already in swapcache */
985 __frontswap_invalidate_page(type
, offset
);
989 BUG_ON(!frontswap_has_exclusive_gets
); /* load must also invalidate */
990 /* FIXME: how is it possible to get here when page is unlocked? */
991 __frontswap_load(page
);
992 SetPageUptodate(page
); /* above does SetPageDirty, is that enough? */
994 /* start writeback */
995 SetPageReclaim(page
);
997 * Return value is ignored here because it doesn't change anything
998 * for us. Page is returned unlocked.
1000 (void)__swap_writepage(page
, &wbc
, zcache_end_swap_write
);
1001 page_cache_release(page
);
1002 inc_zcache_outstanding_writeback_pages();
1008 * The following is still a magic number... we want to allow forward progress
1009 * for writeback because it clears out needed RAM when under pressure, but
1010 * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
1011 * pages if the swap device is very slow.
1013 #define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
1016 * Try to allocate two free pages, first using a non-aggressive alloc,
1017 * then by evicting zcache ephemeral (clean pagecache) pages, and last
1018 * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
1019 * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
1020 * function above for each.
1022 static int zcache_frontswap_writeback(void)
1024 struct tmem_handle th
[2];
1026 int nzbuds
, writeback_ret
;
1028 struct page
*znewpage1
= NULL
, *znewpage2
= NULL
;
1029 struct page
*evictpage1
= NULL
, *evictpage2
= NULL
;
1030 struct page
*newpage1
= NULL
, *newpage2
= NULL
;
1031 struct page
*page1
= NULL
, *page2
= NULL
;
1034 znewpage1
= alloc_page(ZCACHE_GFP_MASK
);
1035 znewpage2
= alloc_page(ZCACHE_GFP_MASK
);
1036 if (znewpage1
== NULL
)
1037 evictpage1
= zcache_evict_eph_pageframe();
1038 if (znewpage2
== NULL
)
1039 evictpage2
= zcache_evict_eph_pageframe();
1041 if ((evictpage1
== NULL
|| evictpage2
== NULL
) &&
1042 atomic_read(&zcache_outstanding_writeback_pages_atomic
) >
1043 ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES
) {
1046 if (znewpage1
== NULL
&& evictpage1
== NULL
)
1047 newpage1
= alloc_page(GFP_KERNEL
);
1048 if (znewpage2
== NULL
&& evictpage2
== NULL
)
1049 newpage2
= alloc_page(GFP_KERNEL
);
1050 if (newpage1
== NULL
|| newpage2
== NULL
)
1053 /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
1054 nzbuds
= zbud_make_zombie_lru(&th
[0], NULL
, NULL
, false);
1060 /* process the first zbud */
1061 unswiz(th
[0].oid
, th
[0].index
, &type
, &offset
);
1062 page1
= (znewpage1
!= NULL
) ? znewpage1
:
1063 ((newpage1
!= NULL
) ? newpage1
: evictpage1
);
1064 writeback_ret
= zcache_frontswap_writeback_zpage(type
, offset
, page1
);
1065 if (writeback_ret
< 0) {
1069 if (evictpage1
!= NULL
)
1070 zcache_pageframes_freed
=
1071 atomic_inc_return(&zcache_pageframes_freed_atomic
);
1072 if (writeback_ret
== 0) {
1073 /* zcache_get_swap_cache_page will free, don't double free */
1081 /* if there is a second zbud, process it */
1082 unswiz(th
[1].oid
, th
[1].index
, &type
, &offset
);
1083 page2
= (znewpage2
!= NULL
) ? znewpage2
:
1084 ((newpage2
!= NULL
) ? newpage2
: evictpage2
);
1085 writeback_ret
= zcache_frontswap_writeback_zpage(type
, offset
, page2
);
1086 if (writeback_ret
< 0) {
1090 if (evictpage2
!= NULL
)
1091 zcache_pageframes_freed
=
1092 atomic_inc_return(&zcache_pageframes_freed_atomic
);
1093 if (writeback_ret
== 0) {
1100 if (znewpage1
!= NULL
)
1101 page_cache_release(znewpage1
);
1102 if (znewpage2
!= NULL
)
1103 page_cache_release(znewpage2
);
1104 if (newpage1
!= NULL
)
1105 page_cache_release(newpage1
);
1106 if (newpage2
!= NULL
)
1107 page_cache_release(newpage2
);
1108 if (evictpage1
!= NULL
)
1109 zcache_free_page(evictpage1
);
1110 if (evictpage2
!= NULL
)
1111 zcache_free_page(evictpage2
);
1114 #endif /* CONFIG_ZCACHE_WRITEBACK */
1117 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1118 * but all puts (and thus all other operations that require memory allocation)
1119 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1120 * data consistency requires all puts while frozen to be converted into
1123 static bool zcache_freeze
;
1126 * This zcache shrinker interface reduces the number of ephemeral pageframes
1127 * used by zcache to approximately the same as the total number of LRU_FILE
1128 * pageframes in use, and now also reduces the number of persistent pageframes
1129 * used by zcache to approximately the same as the total number of LRU_ANON
1130 * pageframes in use. FIXME POLICY: Probably the writeback should only occur
1131 * if the eviction doesn't free enough pages.
1133 static int shrink_zcache_memory(struct shrinker
*shrink
,
1134 struct shrink_control
*sc
)
1136 static bool in_progress
;
1138 int nr
= sc
->nr_to_scan
;
1140 int nr_writeback
= 0;
1142 int file_pageframes_inuse
, anon_pageframes_inuse
;
1147 /* don't allow more than one eviction thread at a time */
1153 /* we are going to ignore nr, and target a different value */
1154 zcache_last_active_file_pageframes
=
1155 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1156 zcache_last_inactive_file_pageframes
=
1157 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1158 file_pageframes_inuse
= zcache_last_active_file_pageframes
+
1159 zcache_last_inactive_file_pageframes
;
1160 if (zcache_eph_pageframes
> file_pageframes_inuse
)
1161 nr_evict
= zcache_eph_pageframes
- file_pageframes_inuse
;
1164 while (nr_evict
-- > 0) {
1165 page
= zcache_evict_eph_pageframe();
1168 zcache_free_page(page
);
1171 zcache_last_active_anon_pageframes
=
1172 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_ANON
);
1173 zcache_last_inactive_anon_pageframes
=
1174 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_ANON
);
1175 anon_pageframes_inuse
= zcache_last_active_anon_pageframes
+
1176 zcache_last_inactive_anon_pageframes
;
1177 if (zcache_pers_pageframes
> anon_pageframes_inuse
)
1178 nr_writeback
= zcache_pers_pageframes
- anon_pageframes_inuse
;
1181 while (nr_writeback
-- > 0) {
1182 #ifdef CONFIG_ZCACHE_WRITEBACK
1184 writeback_ret
= zcache_frontswap_writeback();
1185 if (writeback_ret
== -ENOMEM
)
1189 in_progress
= false;
1192 /* resample: has changed, but maybe not all the way yet */
1193 zcache_last_active_file_pageframes
=
1194 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1195 zcache_last_inactive_file_pageframes
=
1196 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1197 ret
= zcache_eph_pageframes
- zcache_last_active_file_pageframes
+
1198 zcache_last_inactive_file_pageframes
;
1204 static struct shrinker zcache_shrinker
= {
1205 .shrink
= shrink_zcache_memory
,
1206 .seeks
= DEFAULT_SEEKS
,
1210 * zcache shims between cleancache/frontswap ops and tmem
1213 /* FIXME rename these core routines to zcache_tmemput etc? */
1214 int zcache_put_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1215 uint32_t index
, void *page
,
1216 unsigned int size
, bool raw
, int ephemeral
)
1218 struct tmem_pool
*pool
;
1219 struct tmem_handle th
;
1223 BUG_ON(!irqs_disabled());
1224 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1225 if (unlikely(pool
== NULL
))
1227 if (!zcache_freeze
) {
1229 th
.client_id
= cli_id
;
1230 th
.pool_id
= pool_id
;
1233 pampd
= zcache_pampd_create((char *)page
, size
, raw
,
1235 if (pampd
== NULL
) {
1238 inc_zcache_failed_eph_puts();
1240 inc_zcache_failed_pers_puts();
1242 if (ramster_enabled
)
1243 ramster_do_preload_flnode(pool
);
1244 ret
= tmem_put(pool
, oidp
, index
, 0, pampd
);
1248 zcache_put_pool(pool
);
1250 inc_zcache_put_to_flush();
1251 if (ramster_enabled
)
1252 ramster_do_preload_flnode(pool
);
1253 if (atomic_read(&pool
->obj_count
) > 0)
1254 /* the put fails whether the flush succeeds or not */
1255 (void)tmem_flush_page(pool
, oidp
, index
);
1256 zcache_put_pool(pool
);
1262 int zcache_get_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1263 uint32_t index
, void *page
,
1264 size_t *sizep
, bool raw
, int get_and_free
)
1266 struct tmem_pool
*pool
;
1271 BUG_ON(irqs_disabled());
1272 BUG_ON(in_softirq());
1274 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1275 eph
= is_ephemeral(pool
);
1276 if (likely(pool
!= NULL
)) {
1277 if (atomic_read(&pool
->obj_count
) > 0)
1278 ret
= tmem_get(pool
, oidp
, index
, (char *)(page
),
1279 sizep
, raw
, get_and_free
);
1280 zcache_put_pool(pool
);
1282 WARN_ONCE((!is_ephemeral(pool
) && (ret
!= 0)),
1283 "zcache_get fails on persistent pool, "
1284 "bad things are very likely to happen soon\n");
1285 #ifdef RAMSTER_TESTING
1286 if (ret
!= 0 && ret
!= -1 && !(ret
== -EINVAL
&& is_ephemeral(pool
)))
1287 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret
);
1292 int zcache_flush_page(int cli_id
, int pool_id
,
1293 struct tmem_oid
*oidp
, uint32_t index
)
1295 struct tmem_pool
*pool
;
1297 unsigned long flags
;
1299 local_irq_save(flags
);
1300 inc_zcache_flush_total();
1301 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1302 if (ramster_enabled
)
1303 ramster_do_preload_flnode(pool
);
1304 if (likely(pool
!= NULL
)) {
1305 if (atomic_read(&pool
->obj_count
) > 0)
1306 ret
= tmem_flush_page(pool
, oidp
, index
);
1307 zcache_put_pool(pool
);
1310 inc_zcache_flush_found();
1311 local_irq_restore(flags
);
1315 int zcache_flush_object(int cli_id
, int pool_id
,
1316 struct tmem_oid
*oidp
)
1318 struct tmem_pool
*pool
;
1320 unsigned long flags
;
1322 local_irq_save(flags
);
1323 inc_zcache_flobj_total();
1324 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1325 if (ramster_enabled
)
1326 ramster_do_preload_flnode(pool
);
1327 if (likely(pool
!= NULL
)) {
1328 if (atomic_read(&pool
->obj_count
) > 0)
1329 ret
= tmem_flush_object(pool
, oidp
);
1330 zcache_put_pool(pool
);
1333 inc_zcache_flobj_found();
1334 local_irq_restore(flags
);
1338 static int zcache_client_destroy_pool(int cli_id
, int pool_id
)
1340 struct tmem_pool
*pool
= NULL
;
1341 struct zcache_client
*cli
= NULL
;
1346 if (cli_id
== LOCAL_CLIENT
)
1348 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1349 cli
= &zcache_clients
[cli_id
];
1352 atomic_inc(&cli
->refcount
);
1353 pool
= cli
->tmem_pools
[pool_id
];
1356 cli
->tmem_pools
[pool_id
] = NULL
;
1357 /* wait for pool activity on other cpus to quiesce */
1358 while (atomic_read(&pool
->refcount
) != 0)
1360 atomic_dec(&cli
->refcount
);
1362 ret
= tmem_destroy_pool(pool
);
1365 if (cli_id
== LOCAL_CLIENT
)
1366 pr_info("%s: destroyed local pool id=%d\n", namestr
, pool_id
);
1368 pr_info("%s: destroyed pool id=%d, client=%d\n",
1369 namestr
, pool_id
, cli_id
);
1374 int zcache_new_pool(uint16_t cli_id
, uint32_t flags
)
1377 struct tmem_pool
*pool
;
1378 struct zcache_client
*cli
= NULL
;
1380 if (cli_id
== LOCAL_CLIENT
)
1382 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1383 cli
= &zcache_clients
[cli_id
];
1386 atomic_inc(&cli
->refcount
);
1387 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_ATOMIC
);
1391 for (poolid
= 0; poolid
< MAX_POOLS_PER_CLIENT
; poolid
++)
1392 if (cli
->tmem_pools
[poolid
] == NULL
)
1394 if (poolid
>= MAX_POOLS_PER_CLIENT
) {
1395 pr_info("%s: pool creation failed: max exceeded\n", namestr
);
1400 atomic_set(&pool
->refcount
, 0);
1402 pool
->pool_id
= poolid
;
1403 tmem_new_pool(pool
, flags
);
1404 cli
->tmem_pools
[poolid
] = pool
;
1405 if (cli_id
== LOCAL_CLIENT
)
1406 pr_info("%s: created %s local tmem pool, id=%d\n", namestr
,
1407 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1410 pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr
,
1411 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1415 atomic_dec(&cli
->refcount
);
1419 static int zcache_local_new_pool(uint32_t flags
)
1421 return zcache_new_pool(LOCAL_CLIENT
, flags
);
1424 int zcache_autocreate_pool(unsigned int cli_id
, unsigned int pool_id
, bool eph
)
1426 struct tmem_pool
*pool
;
1427 struct zcache_client
*cli
= NULL
;
1428 uint32_t flags
= eph
? 0 : TMEM_POOL_PERSIST
;
1431 BUG_ON(!ramster_enabled
);
1432 if (cli_id
== LOCAL_CLIENT
)
1434 if (pool_id
>= MAX_POOLS_PER_CLIENT
)
1436 if (cli_id
>= MAX_CLIENTS
)
1439 cli
= &zcache_clients
[cli_id
];
1440 if ((eph
&& disable_cleancache
) || (!eph
&& disable_frontswap
)) {
1441 pr_err("zcache_autocreate_pool: pool type disabled\n");
1444 if (!cli
->allocated
) {
1445 if (zcache_new_client(cli_id
)) {
1446 pr_err("zcache_autocreate_pool: can't create client\n");
1449 cli
= &zcache_clients
[cli_id
];
1451 atomic_inc(&cli
->refcount
);
1452 pool
= cli
->tmem_pools
[pool_id
];
1454 if (pool
->persistent
&& eph
) {
1455 pr_err("zcache_autocreate_pool: type mismatch\n");
1461 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_KERNEL
);
1465 atomic_set(&pool
->refcount
, 0);
1467 pool
->pool_id
= pool_id
;
1468 tmem_new_pool(pool
, flags
);
1469 cli
->tmem_pools
[pool_id
] = pool
;
1470 pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
1471 namestr
, flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1476 atomic_dec(&cli
->refcount
);
1481 * Two kernel functionalities currently can be layered on top of tmem.
1482 * These are "cleancache" which is used as a second-chance cache for clean
1483 * page cache pages; and "frontswap" which is used for swap pages
1484 * to avoid writes to disk. A generic "shim" is provided here for each
1485 * to translate in-kernel semantics to zcache semantics.
1488 static void zcache_cleancache_put_page(int pool_id
,
1489 struct cleancache_filekey key
,
1490 pgoff_t index
, struct page
*page
)
1492 u32 ind
= (u32
) index
;
1493 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1495 if (!disable_cleancache_ignore_nonactive
&& !PageWasActive(page
)) {
1496 inc_zcache_eph_nonactive_puts_ignored();
1499 if (likely(ind
== index
))
1500 (void)zcache_put_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1501 page
, PAGE_SIZE
, false, 1);
1504 static int zcache_cleancache_get_page(int pool_id
,
1505 struct cleancache_filekey key
,
1506 pgoff_t index
, struct page
*page
)
1508 u32 ind
= (u32
) index
;
1509 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1513 if (likely(ind
== index
)) {
1514 ret
= zcache_get_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1515 page
, &size
, false, 0);
1516 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1518 SetPageWasActive(page
);
1523 static void zcache_cleancache_flush_page(int pool_id
,
1524 struct cleancache_filekey key
,
1527 u32 ind
= (u32
) index
;
1528 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1530 if (likely(ind
== index
))
1531 (void)zcache_flush_page(LOCAL_CLIENT
, pool_id
, &oid
, ind
);
1534 static void zcache_cleancache_flush_inode(int pool_id
,
1535 struct cleancache_filekey key
)
1537 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1539 (void)zcache_flush_object(LOCAL_CLIENT
, pool_id
, &oid
);
1542 static void zcache_cleancache_flush_fs(int pool_id
)
1545 (void)zcache_client_destroy_pool(LOCAL_CLIENT
, pool_id
);
1548 static int zcache_cleancache_init_fs(size_t pagesize
)
1550 BUG_ON(sizeof(struct cleancache_filekey
) !=
1551 sizeof(struct tmem_oid
));
1552 BUG_ON(pagesize
!= PAGE_SIZE
);
1553 return zcache_local_new_pool(0);
1556 static int zcache_cleancache_init_shared_fs(char *uuid
, size_t pagesize
)
1558 /* shared pools are unsupported and map to private */
1559 BUG_ON(sizeof(struct cleancache_filekey
) !=
1560 sizeof(struct tmem_oid
));
1561 BUG_ON(pagesize
!= PAGE_SIZE
);
1562 return zcache_local_new_pool(0);
1565 static struct cleancache_ops zcache_cleancache_ops
= {
1566 .put_page
= zcache_cleancache_put_page
,
1567 .get_page
= zcache_cleancache_get_page
,
1568 .invalidate_page
= zcache_cleancache_flush_page
,
1569 .invalidate_inode
= zcache_cleancache_flush_inode
,
1570 .invalidate_fs
= zcache_cleancache_flush_fs
,
1571 .init_shared_fs
= zcache_cleancache_init_shared_fs
,
1572 .init_fs
= zcache_cleancache_init_fs
1575 struct cleancache_ops
zcache_cleancache_register_ops(void)
1577 struct cleancache_ops old_ops
=
1578 cleancache_register_ops(&zcache_cleancache_ops
);
1583 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1584 static int zcache_frontswap_poolid __read_mostly
= -1;
1587 * Swizzling increases objects per swaptype, increasing tmem concurrency
1588 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1589 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1590 * frontswap_get_page(), but has side-effects. Hence using 8.
1593 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1594 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1595 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1597 static inline struct tmem_oid
oswiz(unsigned type
, u32 ind
)
1599 struct tmem_oid oid
= { .oid
= { 0 } };
1600 oid
.oid
[0] = _oswiz(type
, ind
);
1604 #ifdef CONFIG_ZCACHE_WRITEBACK
1605 static void unswiz(struct tmem_oid oid
, u32 index
,
1606 unsigned *type
, pgoff_t
*offset
)
1608 *type
= (unsigned)(oid
.oid
[0] >> SWIZ_BITS
);
1609 *offset
= (pgoff_t
)((index
<< SWIZ_BITS
) |
1610 (oid
.oid
[0] & SWIZ_MASK
));
1614 static int zcache_frontswap_put_page(unsigned type
, pgoff_t offset
,
1617 u64 ind64
= (u64
)offset
;
1618 u32 ind
= (u32
)offset
;
1619 struct tmem_oid oid
= oswiz(type
, ind
);
1621 unsigned long flags
;
1623 BUG_ON(!PageLocked(page
));
1624 if (!disable_frontswap_ignore_nonactive
&& !PageWasActive(page
)) {
1625 inc_zcache_pers_nonactive_puts_ignored();
1629 if (likely(ind64
== ind
)) {
1630 local_irq_save(flags
);
1631 ret
= zcache_put_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1633 page
, PAGE_SIZE
, false, 0);
1634 local_irq_restore(flags
);
1640 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1641 * was not present (should never happen!) */
1642 static int zcache_frontswap_get_page(unsigned type
, pgoff_t offset
,
1645 u64 ind64
= (u64
)offset
;
1646 u32 ind
= (u32
)offset
;
1647 struct tmem_oid oid
= oswiz(type
, ind
);
1649 int ret
= -1, get_and_free
;
1651 if (frontswap_has_exclusive_gets
)
1655 BUG_ON(!PageLocked(page
));
1656 if (likely(ind64
== ind
)) {
1657 ret
= zcache_get_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1659 page
, &size
, false, get_and_free
);
1660 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1665 /* flush a single page from frontswap */
1666 static void zcache_frontswap_flush_page(unsigned type
, pgoff_t offset
)
1668 u64 ind64
= (u64
)offset
;
1669 u32 ind
= (u32
)offset
;
1670 struct tmem_oid oid
= oswiz(type
, ind
);
1672 if (likely(ind64
== ind
))
1673 (void)zcache_flush_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1677 /* flush all pages from the passed swaptype */
1678 static void zcache_frontswap_flush_area(unsigned type
)
1680 struct tmem_oid oid
;
1683 for (ind
= SWIZ_MASK
; ind
>= 0; ind
--) {
1684 oid
= oswiz(type
, ind
);
1685 (void)zcache_flush_object(LOCAL_CLIENT
,
1686 zcache_frontswap_poolid
, &oid
);
1690 static void zcache_frontswap_init(unsigned ignored
)
1692 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1693 if (zcache_frontswap_poolid
< 0)
1694 zcache_frontswap_poolid
=
1695 zcache_local_new_pool(TMEM_POOL_PERSIST
);
1698 static struct frontswap_ops zcache_frontswap_ops
= {
1699 .store
= zcache_frontswap_put_page
,
1700 .load
= zcache_frontswap_get_page
,
1701 .invalidate_page
= zcache_frontswap_flush_page
,
1702 .invalidate_area
= zcache_frontswap_flush_area
,
1703 .init
= zcache_frontswap_init
1706 struct frontswap_ops
zcache_frontswap_register_ops(void)
1708 struct frontswap_ops old_ops
=
1709 frontswap_register_ops(&zcache_frontswap_ops
);
1715 * zcache initialization
1716 * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
1717 * OR NOTHING HAPPENS!
1720 static int __init
enable_zcache(char *s
)
1722 zcache_enabled
= true;
1725 __setup("zcache", enable_zcache
);
1727 static int __init
enable_ramster(char *s
)
1729 zcache_enabled
= true;
1730 #ifdef CONFIG_RAMSTER
1731 ramster_enabled
= true;
1735 __setup("ramster", enable_ramster
);
1737 /* allow independent dynamic disabling of cleancache and frontswap */
1739 static int __init
no_cleancache(char *s
)
1741 disable_cleancache
= true;
1745 __setup("nocleancache", no_cleancache
);
1747 static int __init
no_frontswap(char *s
)
1749 disable_frontswap
= true;
1753 __setup("nofrontswap", no_frontswap
);
1755 static int __init
no_frontswap_exclusive_gets(char *s
)
1757 frontswap_has_exclusive_gets
= false;
1761 __setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets
);
1763 static int __init
no_frontswap_ignore_nonactive(char *s
)
1765 disable_frontswap_ignore_nonactive
= true;
1769 __setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive
);
1771 static int __init
no_cleancache_ignore_nonactive(char *s
)
1773 disable_cleancache_ignore_nonactive
= true;
1777 __setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive
);
1779 static int __init
enable_zcache_compressor(char *s
)
1781 strlcpy(zcache_comp_name
, s
, sizeof(zcache_comp_name
));
1782 zcache_enabled
= true;
1785 __setup("zcache=", enable_zcache_compressor
);
1788 static int __init
zcache_comp_init(void)
1792 /* check crypto algorithm */
1793 if (*zcache_comp_name
!= '\0') {
1794 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1796 pr_info("zcache: %s not supported\n",
1800 strcpy(zcache_comp_name
, "lzo");
1801 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1806 pr_info("zcache: using %s compressor\n", zcache_comp_name
);
1808 /* alloc percpu transforms */
1810 zcache_comp_pcpu_tfms
= alloc_percpu(struct crypto_comp
*);
1811 if (!zcache_comp_pcpu_tfms
)
1817 static int __init
zcache_init(void)
1821 if (ramster_enabled
) {
1822 namestr
= "ramster";
1823 ramster_register_pamops(&zcache_pamops
);
1825 zcache_debugfs_init();
1826 if (zcache_enabled
) {
1829 tmem_register_hostops(&zcache_hostops
);
1830 tmem_register_pamops(&zcache_pamops
);
1831 ret
= register_cpu_notifier(&zcache_cpu_notifier_block
);
1833 pr_err("%s: can't register cpu notifier\n", namestr
);
1836 ret
= zcache_comp_init();
1838 pr_err("%s: compressor initialization failed\n",
1842 for_each_online_cpu(cpu
) {
1843 void *pcpu
= (void *)(long)cpu
;
1844 zcache_cpu_notifier(&zcache_cpu_notifier_block
,
1845 CPU_UP_PREPARE
, pcpu
);
1848 zcache_objnode_cache
= kmem_cache_create("zcache_objnode",
1849 sizeof(struct tmem_objnode
), 0, 0, NULL
);
1850 zcache_obj_cache
= kmem_cache_create("zcache_obj",
1851 sizeof(struct tmem_obj
), 0, 0, NULL
);
1852 ret
= zcache_new_client(LOCAL_CLIENT
);
1854 pr_err("%s: can't create client\n", namestr
);
1858 if (zcache_enabled
&& !disable_cleancache
) {
1859 struct cleancache_ops old_ops
;
1861 register_shrinker(&zcache_shrinker
);
1862 old_ops
= zcache_cleancache_register_ops();
1863 pr_info("%s: cleancache enabled using kernel transcendent "
1864 "memory and compression buddies\n", namestr
);
1865 #ifdef CONFIG_ZCACHE_DEBUG
1866 pr_info("%s: cleancache: ignorenonactive = %d\n",
1867 namestr
, !disable_cleancache_ignore_nonactive
);
1869 if (old_ops
.init_fs
!= NULL
)
1870 pr_warn("%s: cleancache_ops overridden\n", namestr
);
1872 if (zcache_enabled
&& !disable_frontswap
) {
1873 struct frontswap_ops old_ops
;
1875 old_ops
= zcache_frontswap_register_ops();
1876 if (frontswap_has_exclusive_gets
)
1877 frontswap_tmem_exclusive_gets(true);
1878 pr_info("%s: frontswap enabled using kernel transcendent "
1879 "memory and compression buddies\n", namestr
);
1880 #ifdef CONFIG_ZCACHE_DEBUG
1881 pr_info("%s: frontswap: excl gets = %d active only = %d\n",
1882 namestr
, frontswap_has_exclusive_gets
,
1883 !disable_frontswap_ignore_nonactive
);
1885 if (old_ops
.init
!= NULL
)
1886 pr_warn("%s: frontswap_ops overridden\n", namestr
);
1888 if (ramster_enabled
)
1889 ramster_init(!disable_cleancache
, !disable_frontswap
,
1890 frontswap_has_exclusive_gets
);
1895 late_initcall(zcache_init
);