4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
9 * lzo1x compression to improve density and an embedded allocator called
10 * "zbud" which "buddies" two compressed pages semi-optimally in each physical
11 * pageframe. Zbud is integrally tied into tmem to allow pageframes to
12 * be "reclaimed" efficiently.
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/highmem.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/math64.h>
24 #include <linux/crypto.h>
26 #include <linux/cleancache.h>
27 #include <linux/frontswap.h>
33 static int ramster_enabled
;
35 #define ramster_enabled 0
38 #ifndef __PG_WAS_ACTIVE
39 static inline bool PageWasActive(struct page
*page
)
44 static inline void SetPageWasActive(struct page
*page
)
49 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
50 static bool frontswap_has_exclusive_gets __read_mostly
= true;
52 static bool frontswap_has_exclusive_gets __read_mostly
;
53 static inline void frontswap_tmem_exclusive_gets(bool b
)
58 static int zcache_enabled __read_mostly
;
59 static int disable_cleancache __read_mostly
;
60 static int disable_frontswap __read_mostly
;
61 static int disable_frontswap_ignore_nonactive __read_mostly
;
62 static int disable_cleancache_ignore_nonactive __read_mostly
;
63 static char *namestr __read_mostly
= "zcache";
65 #define ZCACHE_GFP_MASK \
66 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
68 MODULE_LICENSE("GPL");
70 /* crypto API for zcache */
71 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
72 static char zcache_comp_name
[ZCACHE_COMP_NAME_SZ
] __read_mostly
;
73 static struct crypto_comp
* __percpu
*zcache_comp_pcpu_tfms __read_mostly
;
76 ZCACHE_COMPOP_COMPRESS
,
77 ZCACHE_COMPOP_DECOMPRESS
80 static inline int zcache_comp_op(enum comp_op op
,
81 const u8
*src
, unsigned int slen
,
82 u8
*dst
, unsigned int *dlen
)
84 struct crypto_comp
*tfm
;
87 BUG_ON(!zcache_comp_pcpu_tfms
);
88 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, get_cpu());
91 case ZCACHE_COMPOP_COMPRESS
:
92 ret
= crypto_comp_compress(tfm
, src
, slen
, dst
, dlen
);
94 case ZCACHE_COMPOP_DECOMPRESS
:
95 ret
= crypto_comp_decompress(tfm
, src
, slen
, dst
, dlen
);
109 * byte count defining poor compression; pages with greater zsize will be
112 static unsigned int zbud_max_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 7;
114 * byte count defining poor *mean* compression; pages with greater zsize
115 * will be rejected until sufficient better-compressed pages are accepted
116 * driving the mean below this threshold
118 static unsigned int zbud_max_mean_zsize __read_mostly
= (PAGE_SIZE
/ 8) * 5;
121 * for now, used named slabs so can easily track usage; later can
122 * either just use kmalloc, or perhaps add a slab-like allocator
123 * to more carefully manage total memory utilization
125 static struct kmem_cache
*zcache_objnode_cache
;
126 static struct kmem_cache
*zcache_obj_cache
;
128 static DEFINE_PER_CPU(struct zcache_preload
, zcache_preloads
) = { 0, };
130 /* we try to keep these statistics SMP-consistent */
131 static long zcache_obj_count
;
132 static atomic_t zcache_obj_atomic
= ATOMIC_INIT(0);
133 static long zcache_obj_count_max
;
134 static long zcache_objnode_count
;
135 static atomic_t zcache_objnode_atomic
= ATOMIC_INIT(0);
136 static long zcache_objnode_count_max
;
137 static u64 zcache_eph_zbytes
;
138 static atomic_long_t zcache_eph_zbytes_atomic
= ATOMIC_INIT(0);
139 static u64 zcache_eph_zbytes_max
;
140 static u64 zcache_pers_zbytes
;
141 static atomic_long_t zcache_pers_zbytes_atomic
= ATOMIC_INIT(0);
142 static u64 zcache_pers_zbytes_max
;
143 static long zcache_eph_pageframes
;
144 static atomic_t zcache_eph_pageframes_atomic
= ATOMIC_INIT(0);
145 static long zcache_eph_pageframes_max
;
146 static long zcache_pers_pageframes
;
147 static atomic_t zcache_pers_pageframes_atomic
= ATOMIC_INIT(0);
148 static long zcache_pers_pageframes_max
;
149 static long zcache_pageframes_alloced
;
150 static atomic_t zcache_pageframes_alloced_atomic
= ATOMIC_INIT(0);
151 static long zcache_pageframes_freed
;
152 static atomic_t zcache_pageframes_freed_atomic
= ATOMIC_INIT(0);
153 static long zcache_eph_zpages
;
154 static atomic_t zcache_eph_zpages_atomic
= ATOMIC_INIT(0);
155 static long zcache_eph_zpages_max
;
156 static long zcache_pers_zpages
;
157 static atomic_t zcache_pers_zpages_atomic
= ATOMIC_INIT(0);
158 static long zcache_pers_zpages_max
;
160 /* but for the rest of these, counting races are ok */
161 static unsigned long zcache_flush_total
;
162 static unsigned long zcache_flush_found
;
163 static unsigned long zcache_flobj_total
;
164 static unsigned long zcache_flobj_found
;
165 static unsigned long zcache_failed_eph_puts
;
166 static unsigned long zcache_failed_pers_puts
;
167 static unsigned long zcache_failed_getfreepages
;
168 static unsigned long zcache_failed_alloc
;
169 static unsigned long zcache_put_to_flush
;
170 static unsigned long zcache_compress_poor
;
171 static unsigned long zcache_mean_compress_poor
;
172 static unsigned long zcache_eph_ate_tail
;
173 static unsigned long zcache_eph_ate_tail_failed
;
174 static unsigned long zcache_pers_ate_eph
;
175 static unsigned long zcache_pers_ate_eph_failed
;
176 static unsigned long zcache_evicted_eph_zpages
;
177 static unsigned long zcache_evicted_eph_pageframes
;
178 static unsigned long zcache_last_active_file_pageframes
;
179 static unsigned long zcache_last_inactive_file_pageframes
;
180 static unsigned long zcache_last_active_anon_pageframes
;
181 static unsigned long zcache_last_inactive_anon_pageframes
;
182 static unsigned long zcache_eph_nonactive_puts_ignored
;
183 static unsigned long zcache_pers_nonactive_puts_ignored
;
185 #ifdef CONFIG_DEBUG_FS
186 #include <linux/debugfs.h>
187 #define zdfs debugfs_create_size_t
188 #define zdfs64 debugfs_create_u64
189 static int zcache_debugfs_init(void)
191 struct dentry
*root
= debugfs_create_dir("zcache", NULL
);
195 zdfs("obj_count", S_IRUGO
, root
, &zcache_obj_count
);
196 zdfs("obj_count_max", S_IRUGO
, root
, &zcache_obj_count_max
);
197 zdfs("objnode_count", S_IRUGO
, root
, &zcache_objnode_count
);
198 zdfs("objnode_count_max", S_IRUGO
, root
, &zcache_objnode_count_max
);
199 zdfs("flush_total", S_IRUGO
, root
, &zcache_flush_total
);
200 zdfs("flush_found", S_IRUGO
, root
, &zcache_flush_found
);
201 zdfs("flobj_total", S_IRUGO
, root
, &zcache_flobj_total
);
202 zdfs("flobj_found", S_IRUGO
, root
, &zcache_flobj_found
);
203 zdfs("failed_eph_puts", S_IRUGO
, root
, &zcache_failed_eph_puts
);
204 zdfs("failed_pers_puts", S_IRUGO
, root
, &zcache_failed_pers_puts
);
205 zdfs("failed_get_free_pages", S_IRUGO
, root
,
206 &zcache_failed_getfreepages
);
207 zdfs("failed_alloc", S_IRUGO
, root
, &zcache_failed_alloc
);
208 zdfs("put_to_flush", S_IRUGO
, root
, &zcache_put_to_flush
);
209 zdfs("compress_poor", S_IRUGO
, root
, &zcache_compress_poor
);
210 zdfs("mean_compress_poor", S_IRUGO
, root
, &zcache_mean_compress_poor
);
211 zdfs("eph_ate_tail", S_IRUGO
, root
, &zcache_eph_ate_tail
);
212 zdfs("eph_ate_tail_failed", S_IRUGO
, root
, &zcache_eph_ate_tail_failed
);
213 zdfs("pers_ate_eph", S_IRUGO
, root
, &zcache_pers_ate_eph
);
214 zdfs("pers_ate_eph_failed", S_IRUGO
, root
, &zcache_pers_ate_eph_failed
);
215 zdfs("evicted_eph_zpages", S_IRUGO
, root
, &zcache_evicted_eph_zpages
);
216 zdfs("evicted_eph_pageframes", S_IRUGO
, root
,
217 &zcache_evicted_eph_pageframes
);
218 zdfs("eph_pageframes", S_IRUGO
, root
, &zcache_eph_pageframes
);
219 zdfs("eph_pageframes_max", S_IRUGO
, root
, &zcache_eph_pageframes_max
);
220 zdfs("pers_pageframes", S_IRUGO
, root
, &zcache_pers_pageframes
);
221 zdfs("pers_pageframes_max", S_IRUGO
, root
, &zcache_pers_pageframes_max
);
222 zdfs("eph_zpages", S_IRUGO
, root
, &zcache_eph_zpages
);
223 zdfs("eph_zpages_max", S_IRUGO
, root
, &zcache_eph_zpages_max
);
224 zdfs("pers_zpages", S_IRUGO
, root
, &zcache_pers_zpages
);
225 zdfs("pers_zpages_max", S_IRUGO
, root
, &zcache_pers_zpages_max
);
226 zdfs("last_active_file_pageframes", S_IRUGO
, root
,
227 &zcache_last_active_file_pageframes
);
228 zdfs("last_inactive_file_pageframes", S_IRUGO
, root
,
229 &zcache_last_inactive_file_pageframes
);
230 zdfs("last_active_anon_pageframes", S_IRUGO
, root
,
231 &zcache_last_active_anon_pageframes
);
232 zdfs("last_inactive_anon_pageframes", S_IRUGO
, root
,
233 &zcache_last_inactive_anon_pageframes
);
234 zdfs("eph_nonactive_puts_ignored", S_IRUGO
, root
,
235 &zcache_eph_nonactive_puts_ignored
);
236 zdfs("pers_nonactive_puts_ignored", S_IRUGO
, root
,
237 &zcache_pers_nonactive_puts_ignored
);
238 zdfs64("eph_zbytes", S_IRUGO
, root
, &zcache_eph_zbytes
);
239 zdfs64("eph_zbytes_max", S_IRUGO
, root
, &zcache_eph_zbytes_max
);
240 zdfs64("pers_zbytes", S_IRUGO
, root
, &zcache_pers_zbytes
);
241 zdfs64("pers_zbytes_max", S_IRUGO
, root
, &zcache_pers_zbytes_max
);
250 /* developers can call this in case of ooms, e.g. to find memory leaks */
251 void zcache_dump(void)
253 pr_info("zcache: obj_count=%lu\n", zcache_obj_count
);
254 pr_info("zcache: obj_count_max=%lu\n", zcache_obj_count_max
);
255 pr_info("zcache: objnode_count=%lu\n", zcache_objnode_count
);
256 pr_info("zcache: objnode_count_max=%lu\n", zcache_objnode_count_max
);
257 pr_info("zcache: flush_total=%lu\n", zcache_flush_total
);
258 pr_info("zcache: flush_found=%lu\n", zcache_flush_found
);
259 pr_info("zcache: flobj_total=%lu\n", zcache_flobj_total
);
260 pr_info("zcache: flobj_found=%lu\n", zcache_flobj_found
);
261 pr_info("zcache: failed_eph_puts=%lu\n", zcache_failed_eph_puts
);
262 pr_info("zcache: failed_pers_puts=%lu\n", zcache_failed_pers_puts
);
263 pr_info("zcache: failed_get_free_pages=%lu\n",
264 zcache_failed_getfreepages
);
265 pr_info("zcache: failed_alloc=%lu\n", zcache_failed_alloc
);
266 pr_info("zcache: put_to_flush=%lu\n", zcache_put_to_flush
);
267 pr_info("zcache: compress_poor=%lu\n", zcache_compress_poor
);
268 pr_info("zcache: mean_compress_poor=%lu\n",
269 zcache_mean_compress_poor
);
270 pr_info("zcache: eph_ate_tail=%lu\n", zcache_eph_ate_tail
);
271 pr_info("zcache: eph_ate_tail_failed=%lu\n",
272 zcache_eph_ate_tail_failed
);
273 pr_info("zcache: pers_ate_eph=%lu\n", zcache_pers_ate_eph
);
274 pr_info("zcache: pers_ate_eph_failed=%lu\n",
275 zcache_pers_ate_eph_failed
);
276 pr_info("zcache: evicted_eph_zpages=%lu\n", zcache_evicted_eph_zpages
);
277 pr_info("zcache: evicted_eph_pageframes=%lu\n",
278 zcache_evicted_eph_pageframes
);
279 pr_info("zcache: eph_pageframes=%lu\n", zcache_eph_pageframes
);
280 pr_info("zcache: eph_pageframes_max=%lu\n", zcache_eph_pageframes_max
);
281 pr_info("zcache: pers_pageframes=%lu\n", zcache_pers_pageframes
);
282 pr_info("zcache: pers_pageframes_max=%lu\n",
283 zcache_pers_pageframes_max
);
284 pr_info("zcache: eph_zpages=%lu\n", zcache_eph_zpages
);
285 pr_info("zcache: eph_zpages_max=%lu\n", zcache_eph_zpages_max
);
286 pr_info("zcache: pers_zpages=%lu\n", zcache_pers_zpages
);
287 pr_info("zcache: pers_zpages_max=%lu\n", zcache_pers_zpages_max
);
288 pr_info("zcache: eph_zbytes=%llu\n",
289 (unsigned long long)zcache_eph_zbytes
);
290 pr_info("zcache: eph_zbytes_max=%llu\n",
291 (unsigned long long)zcache_eph_zbytes_max
);
292 pr_info("zcache: pers_zbytes=%llu\n",
293 (unsigned long long)zcache_pers_zbytes
);
294 pr_info("zcache: pers_zbytes_max=%llu\n",
295 (unsigned long long)zcache_pers_zbytes_max
);
300 * zcache core code starts here
303 static struct zcache_client zcache_host
;
304 static struct zcache_client zcache_clients
[MAX_CLIENTS
];
306 static inline bool is_local_client(struct zcache_client
*cli
)
308 return cli
== &zcache_host
;
311 static struct zcache_client
*zcache_get_client_by_id(uint16_t cli_id
)
313 struct zcache_client
*cli
= &zcache_host
;
315 if (cli_id
!= LOCAL_CLIENT
) {
316 if (cli_id
>= MAX_CLIENTS
)
318 cli
= &zcache_clients
[cli_id
];
325 * Tmem operations assume the poolid implies the invoking client.
326 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
327 * RAMster has each client numbered by cluster node, and a KVM version
328 * of zcache would have one client per guest and each client might
331 struct tmem_pool
*zcache_get_pool_by_id(uint16_t cli_id
, uint16_t poolid
)
333 struct tmem_pool
*pool
= NULL
;
334 struct zcache_client
*cli
= NULL
;
336 cli
= zcache_get_client_by_id(cli_id
);
339 if (!is_local_client(cli
))
340 atomic_inc(&cli
->refcount
);
341 if (poolid
< MAX_POOLS_PER_CLIENT
) {
342 pool
= cli
->tmem_pools
[poolid
];
344 atomic_inc(&pool
->refcount
);
350 void zcache_put_pool(struct tmem_pool
*pool
)
352 struct zcache_client
*cli
= NULL
;
357 atomic_dec(&pool
->refcount
);
358 if (!is_local_client(cli
))
359 atomic_dec(&cli
->refcount
);
362 int zcache_new_client(uint16_t cli_id
)
364 struct zcache_client
*cli
;
367 cli
= zcache_get_client_by_id(cli_id
);
379 * zcache implementation for tmem host ops
382 static struct tmem_objnode
*zcache_objnode_alloc(struct tmem_pool
*pool
)
384 struct tmem_objnode
*objnode
= NULL
;
385 struct zcache_preload
*kp
;
388 kp
= &__get_cpu_var(zcache_preloads
);
389 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
390 objnode
= kp
->objnodes
[i
];
391 if (objnode
!= NULL
) {
392 kp
->objnodes
[i
] = NULL
;
396 BUG_ON(objnode
== NULL
);
397 zcache_objnode_count
= atomic_inc_return(&zcache_objnode_atomic
);
398 if (zcache_objnode_count
> zcache_objnode_count_max
)
399 zcache_objnode_count_max
= zcache_objnode_count
;
403 static void zcache_objnode_free(struct tmem_objnode
*objnode
,
404 struct tmem_pool
*pool
)
406 zcache_objnode_count
=
407 atomic_dec_return(&zcache_objnode_atomic
);
408 BUG_ON(zcache_objnode_count
< 0);
409 kmem_cache_free(zcache_objnode_cache
, objnode
);
412 static struct tmem_obj
*zcache_obj_alloc(struct tmem_pool
*pool
)
414 struct tmem_obj
*obj
= NULL
;
415 struct zcache_preload
*kp
;
417 kp
= &__get_cpu_var(zcache_preloads
);
421 zcache_obj_count
= atomic_inc_return(&zcache_obj_atomic
);
422 if (zcache_obj_count
> zcache_obj_count_max
)
423 zcache_obj_count_max
= zcache_obj_count
;
427 static void zcache_obj_free(struct tmem_obj
*obj
, struct tmem_pool
*pool
)
430 atomic_dec_return(&zcache_obj_atomic
);
431 BUG_ON(zcache_obj_count
< 0);
432 kmem_cache_free(zcache_obj_cache
, obj
);
435 static struct tmem_hostops zcache_hostops
= {
436 .obj_alloc
= zcache_obj_alloc
,
437 .obj_free
= zcache_obj_free
,
438 .objnode_alloc
= zcache_objnode_alloc
,
439 .objnode_free
= zcache_objnode_free
,
442 static struct page
*zcache_alloc_page(void)
444 struct page
*page
= alloc_page(ZCACHE_GFP_MASK
);
447 zcache_pageframes_alloced
=
448 atomic_inc_return(&zcache_pageframes_alloced_atomic
);
452 #ifdef FRONTSWAP_HAS_UNUSE
453 static void zcache_unacct_page(void)
455 zcache_pageframes_freed
=
456 atomic_inc_return(&zcache_pageframes_freed_atomic
);
460 static void zcache_free_page(struct page
*page
)
462 long curr_pageframes
;
463 static long max_pageframes
, min_pageframes
;
468 zcache_pageframes_freed
=
469 atomic_inc_return(&zcache_pageframes_freed_atomic
);
470 curr_pageframes
= zcache_pageframes_alloced
-
471 atomic_read(&zcache_pageframes_freed_atomic
) -
472 atomic_read(&zcache_eph_pageframes_atomic
) -
473 atomic_read(&zcache_pers_pageframes_atomic
);
474 if (curr_pageframes
> max_pageframes
)
475 max_pageframes
= curr_pageframes
;
476 if (curr_pageframes
< min_pageframes
)
477 min_pageframes
= curr_pageframes
;
479 if (curr_pageframes
> 2L || curr_pageframes
< -2L) {
486 * zcache implementations for PAM page descriptor ops
489 /* forward reference */
490 static void zcache_compress(struct page
*from
,
491 void **out_va
, unsigned *out_len
);
493 static struct page
*zcache_evict_eph_pageframe(void);
495 static void *zcache_pampd_eph_create(char *data
, size_t size
, bool raw
,
496 struct tmem_handle
*th
)
498 void *pampd
= NULL
, *cdata
= data
;
499 unsigned clen
= size
;
500 struct page
*page
= (struct page
*)(data
), *newpage
;
503 zcache_compress(page
, &cdata
, &clen
);
504 if (clen
> zbud_max_buddy_size()) {
505 zcache_compress_poor
++;
509 BUG_ON(clen
> zbud_max_buddy_size());
512 /* look for space via an existing match first */
513 pampd
= (void *)zbud_match_prep(th
, true, cdata
, clen
);
517 /* no match, now we need to find (or free up) a full page */
518 newpage
= zcache_alloc_page();
520 goto create_in_new_page
;
522 zcache_failed_getfreepages
++;
523 /* can't allocate a page, evict an ephemeral page via LRU */
524 newpage
= zcache_evict_eph_pageframe();
525 if (newpage
== NULL
) {
526 zcache_eph_ate_tail_failed
++;
529 zcache_eph_ate_tail
++;
532 pampd
= (void *)zbud_create_prep(th
, true, cdata
, clen
, newpage
);
533 BUG_ON(pampd
== NULL
);
534 zcache_eph_pageframes
=
535 atomic_inc_return(&zcache_eph_pageframes_atomic
);
536 if (zcache_eph_pageframes
> zcache_eph_pageframes_max
)
537 zcache_eph_pageframes_max
= zcache_eph_pageframes
;
541 atomic_long_add_return(clen
, &zcache_eph_zbytes_atomic
);
542 if (zcache_eph_zbytes
> zcache_eph_zbytes_max
)
543 zcache_eph_zbytes_max
= zcache_eph_zbytes
;
544 zcache_eph_zpages
= atomic_inc_return(&zcache_eph_zpages_atomic
);
545 if (zcache_eph_zpages
> zcache_eph_zpages_max
)
546 zcache_eph_zpages_max
= zcache_eph_zpages
;
547 if (ramster_enabled
&& raw
)
548 ramster_count_foreign_pages(true, 1);
553 static void *zcache_pampd_pers_create(char *data
, size_t size
, bool raw
,
554 struct tmem_handle
*th
)
556 void *pampd
= NULL
, *cdata
= data
;
557 unsigned clen
= size
;
558 struct page
*page
= (struct page
*)(data
), *newpage
;
559 unsigned long zbud_mean_zsize
;
560 unsigned long curr_pers_zpages
, total_zsize
;
563 BUG_ON(!ramster_enabled
);
566 curr_pers_zpages
= zcache_pers_zpages
;
567 /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
569 zcache_compress(page
, &cdata
, &clen
);
570 /* reject if compression is too poor */
571 if (clen
> zbud_max_zsize
) {
572 zcache_compress_poor
++;
575 /* reject if mean compression is too poor */
576 if ((clen
> zbud_max_mean_zsize
) && (curr_pers_zpages
> 0)) {
577 total_zsize
= zcache_pers_zbytes
;
578 if ((long)total_zsize
< 0)
580 zbud_mean_zsize
= div_u64(total_zsize
,
582 if (zbud_mean_zsize
> zbud_max_mean_zsize
) {
583 zcache_mean_compress_poor
++;
589 /* look for space via an existing match first */
590 pampd
= (void *)zbud_match_prep(th
, false, cdata
, clen
);
594 /* no match, now we need to find (or free up) a full page */
595 newpage
= zcache_alloc_page();
597 goto create_in_new_page
;
599 * FIXME do the following only if eph is oversized?
600 * if (zcache_eph_pageframes >
601 * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
602 * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
604 zcache_failed_getfreepages
++;
605 /* can't allocate a page, evict an ephemeral page via LRU */
606 newpage
= zcache_evict_eph_pageframe();
607 if (newpage
== NULL
) {
608 zcache_pers_ate_eph_failed
++;
611 zcache_pers_ate_eph
++;
614 pampd
= (void *)zbud_create_prep(th
, false, cdata
, clen
, newpage
);
615 BUG_ON(pampd
== NULL
);
616 zcache_pers_pageframes
=
617 atomic_inc_return(&zcache_pers_pageframes_atomic
);
618 if (zcache_pers_pageframes
> zcache_pers_pageframes_max
)
619 zcache_pers_pageframes_max
= zcache_pers_pageframes
;
622 zcache_pers_zpages
= atomic_inc_return(&zcache_pers_zpages_atomic
);
623 if (zcache_pers_zpages
> zcache_pers_zpages_max
)
624 zcache_pers_zpages_max
= zcache_pers_zpages
;
626 atomic_long_add_return(clen
, &zcache_pers_zbytes_atomic
);
627 if (zcache_pers_zbytes
> zcache_pers_zbytes_max
)
628 zcache_pers_zbytes_max
= zcache_pers_zbytes
;
629 if (ramster_enabled
&& raw
)
630 ramster_count_foreign_pages(false, 1);
636 * This is called directly from zcache_put_page to pre-allocate space
639 void *zcache_pampd_create(char *data
, unsigned int size
, bool raw
,
640 int eph
, struct tmem_handle
*th
)
643 struct zcache_preload
*kp
;
644 struct tmem_objnode
*objnode
;
645 struct tmem_obj
*obj
;
648 BUG_ON(!irqs_disabled());
649 /* pre-allocate per-cpu metadata */
650 BUG_ON(zcache_objnode_cache
== NULL
);
651 BUG_ON(zcache_obj_cache
== NULL
);
652 kp
= &__get_cpu_var(zcache_preloads
);
653 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
654 objnode
= kp
->objnodes
[i
];
655 if (objnode
== NULL
) {
656 objnode
= kmem_cache_alloc(zcache_objnode_cache
,
658 if (unlikely(objnode
== NULL
)) {
659 zcache_failed_alloc
++;
662 kp
->objnodes
[i
] = objnode
;
665 if (kp
->obj
== NULL
) {
666 obj
= kmem_cache_alloc(zcache_obj_cache
, ZCACHE_GFP_MASK
);
669 if (unlikely(kp
->obj
== NULL
)) {
670 zcache_failed_alloc
++;
674 * ok, have all the metadata pre-allocated, now do the data
675 * but since how we allocate the data is dependent on ephemeral
676 * or persistent, we split the call here to different sub-functions
679 pampd
= zcache_pampd_eph_create(data
, size
, raw
, th
);
681 pampd
= zcache_pampd_pers_create(data
, size
, raw
, th
);
687 * This is a pamops called via tmem_put and is necessary to "finish"
690 void zcache_pampd_create_finish(void *pampd
, bool eph
)
692 zbud_create_finish((struct zbudref
*)pampd
, eph
);
696 * This is passed as a function parameter to zbud_decompress so that
697 * zbud need not be familiar with the details of crypto. It assumes that
698 * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
699 * kmapped. It must be successful, else there is a logic bug somewhere.
701 static void zcache_decompress(char *from_va
, unsigned int size
, char *to_va
)
704 unsigned int outlen
= PAGE_SIZE
;
706 ret
= zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS
, from_va
, size
,
709 BUG_ON(outlen
!= PAGE_SIZE
);
713 * Decompress from the kernel va to a pageframe
715 void zcache_decompress_to_page(char *from_va
, unsigned int size
,
716 struct page
*to_page
)
718 char *to_va
= kmap_atomic(to_page
);
719 zcache_decompress(from_va
, size
, to_va
);
720 kunmap_atomic(to_va
);
724 * fill the pageframe corresponding to the struct page with the data
725 * from the passed pampd
727 static int zcache_pampd_get_data(char *data
, size_t *sizep
, bool raw
,
728 void *pampd
, struct tmem_pool
*pool
,
729 struct tmem_oid
*oid
, uint32_t index
)
732 bool eph
= !is_persistent(pool
);
734 BUG_ON(preemptible());
735 BUG_ON(eph
); /* fix later if shared pools get implemented */
736 BUG_ON(pampd_is_remote(pampd
));
738 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
741 ret
= zbud_decompress((struct page
*)(data
),
742 (struct zbudref
*)pampd
, false,
750 * fill the pageframe corresponding to the struct page with the data
751 * from the passed pampd
753 static int zcache_pampd_get_data_and_free(char *data
, size_t *sizep
, bool raw
,
754 void *pampd
, struct tmem_pool
*pool
,
755 struct tmem_oid
*oid
, uint32_t index
)
758 bool eph
= !is_persistent(pool
);
759 struct page
*page
= NULL
;
760 unsigned int zsize
, zpages
;
762 BUG_ON(preemptible());
763 BUG_ON(pampd_is_remote(pampd
));
765 ret
= zbud_copy_from_zbud(data
, (struct zbudref
*)pampd
,
768 ret
= zbud_decompress((struct page
*)(data
),
769 (struct zbudref
*)pampd
, eph
,
773 page
= zbud_free_and_delist((struct zbudref
*)pampd
, eph
,
777 zcache_eph_pageframes
=
778 atomic_dec_return(&zcache_eph_pageframes_atomic
);
780 atomic_sub_return(zpages
, &zcache_eph_zpages_atomic
);
782 atomic_long_sub_return(zsize
, &zcache_eph_zbytes_atomic
);
785 zcache_pers_pageframes
=
786 atomic_dec_return(&zcache_pers_pageframes_atomic
);
788 atomic_sub_return(zpages
, &zcache_pers_zpages_atomic
);
790 atomic_long_sub_return(zsize
, &zcache_pers_zbytes_atomic
);
792 if (!is_local_client(pool
->client
))
793 ramster_count_foreign_pages(eph
, -1);
795 zcache_free_page(page
);
800 * free the pampd and remove it from any zcache lists
801 * pampd must no longer be pointed to from any tmem data structures!
803 static void zcache_pampd_free(void *pampd
, struct tmem_pool
*pool
,
804 struct tmem_oid
*oid
, uint32_t index
, bool acct
)
806 struct page
*page
= NULL
;
807 unsigned int zsize
, zpages
;
809 BUG_ON(preemptible());
810 if (pampd_is_remote(pampd
)) {
811 BUG_ON(!ramster_enabled
);
812 pampd
= ramster_pampd_free(pampd
, pool
, oid
, index
, acct
);
816 if (is_ephemeral(pool
)) {
817 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
818 true, &zsize
, &zpages
);
820 zcache_eph_pageframes
=
821 atomic_dec_return(&zcache_eph_pageframes_atomic
);
823 atomic_sub_return(zpages
, &zcache_eph_zpages_atomic
);
825 atomic_long_sub_return(zsize
, &zcache_eph_zbytes_atomic
);
826 /* FIXME CONFIG_RAMSTER... check acct parameter? */
828 page
= zbud_free_and_delist((struct zbudref
*)pampd
,
829 false, &zsize
, &zpages
);
831 zcache_pers_pageframes
=
832 atomic_dec_return(&zcache_pers_pageframes_atomic
);
834 atomic_sub_return(zpages
, &zcache_pers_zpages_atomic
);
836 atomic_long_sub_return(zsize
, &zcache_pers_zbytes_atomic
);
838 if (!is_local_client(pool
->client
))
839 ramster_count_foreign_pages(is_ephemeral(pool
), -1);
841 zcache_free_page(page
);
844 static struct tmem_pamops zcache_pamops
= {
845 .create_finish
= zcache_pampd_create_finish
,
846 .get_data
= zcache_pampd_get_data
,
847 .get_data_and_free
= zcache_pampd_get_data_and_free
,
848 .free
= zcache_pampd_free
,
852 * zcache compression/decompression and related per-cpu stuff
855 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem
);
856 #define ZCACHE_DSTMEM_ORDER 1
858 static void zcache_compress(struct page
*from
, void **out_va
, unsigned *out_len
)
861 unsigned char *dmem
= __get_cpu_var(zcache_dstmem
);
864 BUG_ON(!irqs_disabled());
865 /* no buffer or no compressor so can't compress */
866 BUG_ON(dmem
== NULL
);
867 *out_len
= PAGE_SIZE
<< ZCACHE_DSTMEM_ORDER
;
868 from_va
= kmap_atomic(from
);
870 ret
= zcache_comp_op(ZCACHE_COMPOP_COMPRESS
, from_va
, PAGE_SIZE
, dmem
,
874 kunmap_atomic(from_va
);
877 static int zcache_comp_cpu_up(int cpu
)
879 struct crypto_comp
*tfm
;
881 tfm
= crypto_alloc_comp(zcache_comp_name
, 0, 0);
884 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = tfm
;
888 static void zcache_comp_cpu_down(int cpu
)
890 struct crypto_comp
*tfm
;
892 tfm
= *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
);
893 crypto_free_comp(tfm
);
894 *per_cpu_ptr(zcache_comp_pcpu_tfms
, cpu
) = NULL
;
897 static int zcache_cpu_notifier(struct notifier_block
*nb
,
898 unsigned long action
, void *pcpu
)
900 int ret
, i
, cpu
= (long)pcpu
;
901 struct zcache_preload
*kp
;
905 ret
= zcache_comp_cpu_up(cpu
);
906 if (ret
!= NOTIFY_OK
) {
907 pr_err("%s: can't allocate compressor xform\n",
911 per_cpu(zcache_dstmem
, cpu
) = (void *)__get_free_pages(
912 GFP_KERNEL
| __GFP_REPEAT
, ZCACHE_DSTMEM_ORDER
);
917 case CPU_UP_CANCELED
:
918 zcache_comp_cpu_down(cpu
);
919 free_pages((unsigned long)per_cpu(zcache_dstmem
, cpu
),
920 ZCACHE_DSTMEM_ORDER
);
921 per_cpu(zcache_dstmem
, cpu
) = NULL
;
922 kp
= &per_cpu(zcache_preloads
, cpu
);
923 for (i
= 0; i
< ARRAY_SIZE(kp
->objnodes
); i
++) {
925 kmem_cache_free(zcache_objnode_cache
,
929 kmem_cache_free(zcache_obj_cache
, kp
->obj
);
933 ramster_cpu_down(cpu
);
941 static struct notifier_block zcache_cpu_notifier_block
= {
942 .notifier_call
= zcache_cpu_notifier
946 * The following code interacts with the zbud eviction and zbud
947 * zombify code to access LRU pages
950 static struct page
*zcache_evict_eph_pageframe(void)
953 unsigned int zsize
= 0, zpages
= 0;
955 page
= zbud_evict_pageframe_lru(&zsize
, &zpages
);
958 zcache_eph_zbytes
= atomic_long_sub_return(zsize
,
959 &zcache_eph_zbytes_atomic
);
960 zcache_eph_zpages
= atomic_sub_return(zpages
,
961 &zcache_eph_zpages_atomic
);
962 zcache_evicted_eph_zpages
++;
963 zcache_eph_pageframes
=
964 atomic_dec_return(&zcache_eph_pageframes_atomic
);
965 zcache_evicted_eph_pageframes
++;
970 #ifdef FRONTSWAP_HAS_UNUSE
971 static void unswiz(struct tmem_oid oid
, u32 index
,
972 unsigned *type
, pgoff_t
*offset
);
975 * Choose an LRU persistent pageframe and attempt to "unuse" it by
976 * calling frontswap_unuse on both zpages.
978 * This is work-in-progress.
981 static int zcache_frontswap_unuse(void)
983 struct tmem_handle th
[2];
985 int nzbuds
, unuse_ret
;
987 struct page
*newpage1
= NULL
, *newpage2
= NULL
;
988 struct page
*evictpage1
= NULL
, *evictpage2
= NULL
;
991 newpage1
= alloc_page(ZCACHE_GFP_MASK
);
992 newpage2
= alloc_page(ZCACHE_GFP_MASK
);
993 if (newpage1
== NULL
)
994 evictpage1
= zcache_evict_eph_pageframe();
995 if (newpage2
== NULL
)
996 evictpage2
= zcache_evict_eph_pageframe();
997 if (evictpage1
== NULL
|| evictpage2
== NULL
)
999 /* ok, we have two pages pre-allocated */
1000 nzbuds
= zbud_make_zombie_lru(&th
[0], NULL
, NULL
, false);
1005 unswiz(th
[0].oid
, th
[0].index
, &type
, &offset
);
1006 unuse_ret
= frontswap_unuse(type
, offset
,
1007 newpage1
!= NULL
? newpage1
: evictpage1
,
1011 else if (evictpage1
!= NULL
)
1012 zcache_unacct_page();
1016 unswiz(th
[1].oid
, th
[1].index
, &type
, &offset
);
1017 unuse_ret
= frontswap_unuse(type
, offset
,
1018 newpage2
!= NULL
? newpage2
: evictpage2
,
1020 if (unuse_ret
!= 0) {
1022 } else if (evictpage2
!= NULL
) {
1023 zcache_unacct_page();
1030 if (newpage1
!= NULL
)
1031 __free_page(newpage1
);
1032 if (newpage2
!= NULL
)
1033 __free_page(newpage2
);
1034 if (evictpage1
!= NULL
)
1035 zcache_free_page(evictpage1
);
1036 if (evictpage2
!= NULL
)
1037 zcache_free_page(evictpage2
);
1044 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1045 * but all puts (and thus all other operations that require memory allocation)
1046 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1047 * data consistency requires all puts while frozen to be converted into
1050 static bool zcache_freeze
;
1053 * This zcache shrinker interface reduces the number of ephemeral pageframes
1054 * used by zcache to approximately the same as the total number of LRU_FILE
1055 * pageframes in use.
1057 static int shrink_zcache_memory(struct shrinker
*shrink
,
1058 struct shrink_control
*sc
)
1060 static bool in_progress
;
1062 int nr
= sc
->nr_to_scan
;
1066 #ifdef FRONTSWAP_HAS_UNUSE
1073 /* don't allow more than one eviction thread at a time */
1079 /* we are going to ignore nr, and target a different value */
1080 zcache_last_active_file_pageframes
=
1081 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1082 zcache_last_inactive_file_pageframes
=
1083 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1084 nr_evict
= zcache_eph_pageframes
- zcache_last_active_file_pageframes
+
1085 zcache_last_inactive_file_pageframes
;
1086 while (nr_evict
-- > 0) {
1087 page
= zcache_evict_eph_pageframe();
1090 zcache_free_page(page
);
1093 zcache_last_active_anon_pageframes
=
1094 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_ANON
);
1095 zcache_last_inactive_anon_pageframes
=
1096 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_ANON
);
1097 nr_unuse
= zcache_pers_pageframes
- zcache_last_active_anon_pageframes
+
1098 zcache_last_inactive_anon_pageframes
;
1099 #ifdef FRONTSWAP_HAS_UNUSE
1100 /* rate limit for testing */
1103 while (nr_unuse
-- > 0) {
1104 unuse_ret
= zcache_frontswap_unuse();
1105 if (unuse_ret
== -ENOMEM
)
1109 in_progress
= false;
1112 /* resample: has changed, but maybe not all the way yet */
1113 zcache_last_active_file_pageframes
=
1114 global_page_state(NR_LRU_BASE
+ LRU_ACTIVE_FILE
);
1115 zcache_last_inactive_file_pageframes
=
1116 global_page_state(NR_LRU_BASE
+ LRU_INACTIVE_FILE
);
1117 ret
= zcache_eph_pageframes
- zcache_last_active_file_pageframes
+
1118 zcache_last_inactive_file_pageframes
;
1124 static struct shrinker zcache_shrinker
= {
1125 .shrink
= shrink_zcache_memory
,
1126 .seeks
= DEFAULT_SEEKS
,
1130 * zcache shims between cleancache/frontswap ops and tmem
1133 /* FIXME rename these core routines to zcache_tmemput etc? */
1134 int zcache_put_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1135 uint32_t index
, void *page
,
1136 unsigned int size
, bool raw
, int ephemeral
)
1138 struct tmem_pool
*pool
;
1139 struct tmem_handle th
;
1143 BUG_ON(!irqs_disabled());
1144 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1145 if (unlikely(pool
== NULL
))
1147 if (!zcache_freeze
) {
1149 th
.client_id
= cli_id
;
1150 th
.pool_id
= pool_id
;
1153 pampd
= zcache_pampd_create((char *)page
, size
, raw
,
1155 if (pampd
== NULL
) {
1158 zcache_failed_eph_puts
++;
1160 zcache_failed_pers_puts
++;
1162 if (ramster_enabled
)
1163 ramster_do_preload_flnode(pool
);
1164 ret
= tmem_put(pool
, oidp
, index
, 0, pampd
);
1168 zcache_put_pool(pool
);
1170 zcache_put_to_flush
++;
1171 if (ramster_enabled
)
1172 ramster_do_preload_flnode(pool
);
1173 if (atomic_read(&pool
->obj_count
) > 0)
1174 /* the put fails whether the flush succeeds or not */
1175 (void)tmem_flush_page(pool
, oidp
, index
);
1176 zcache_put_pool(pool
);
1182 int zcache_get_page(int cli_id
, int pool_id
, struct tmem_oid
*oidp
,
1183 uint32_t index
, void *page
,
1184 size_t *sizep
, bool raw
, int get_and_free
)
1186 struct tmem_pool
*pool
;
1191 BUG_ON(irqs_disabled());
1192 BUG_ON(in_softirq());
1194 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1195 eph
= is_ephemeral(pool
);
1196 if (likely(pool
!= NULL
)) {
1197 if (atomic_read(&pool
->obj_count
) > 0)
1198 ret
= tmem_get(pool
, oidp
, index
, (char *)(page
),
1199 sizep
, raw
, get_and_free
);
1200 zcache_put_pool(pool
);
1202 WARN_ONCE((!is_ephemeral(pool
) && (ret
!= 0)),
1203 "zcache_get fails on persistent pool, "
1204 "bad things are very likely to happen soon\n");
1205 #ifdef RAMSTER_TESTING
1206 if (ret
!= 0 && ret
!= -1 && !(ret
== -EINVAL
&& is_ephemeral(pool
)))
1207 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret
);
1212 int zcache_flush_page(int cli_id
, int pool_id
,
1213 struct tmem_oid
*oidp
, uint32_t index
)
1215 struct tmem_pool
*pool
;
1217 unsigned long flags
;
1219 local_irq_save(flags
);
1220 zcache_flush_total
++;
1221 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1222 if (ramster_enabled
)
1223 ramster_do_preload_flnode(pool
);
1224 if (likely(pool
!= NULL
)) {
1225 if (atomic_read(&pool
->obj_count
) > 0)
1226 ret
= tmem_flush_page(pool
, oidp
, index
);
1227 zcache_put_pool(pool
);
1230 zcache_flush_found
++;
1231 local_irq_restore(flags
);
1235 int zcache_flush_object(int cli_id
, int pool_id
,
1236 struct tmem_oid
*oidp
)
1238 struct tmem_pool
*pool
;
1240 unsigned long flags
;
1242 local_irq_save(flags
);
1243 zcache_flobj_total
++;
1244 pool
= zcache_get_pool_by_id(cli_id
, pool_id
);
1245 if (ramster_enabled
)
1246 ramster_do_preload_flnode(pool
);
1247 if (likely(pool
!= NULL
)) {
1248 if (atomic_read(&pool
->obj_count
) > 0)
1249 ret
= tmem_flush_object(pool
, oidp
);
1250 zcache_put_pool(pool
);
1253 zcache_flobj_found
++;
1254 local_irq_restore(flags
);
1258 static int zcache_client_destroy_pool(int cli_id
, int pool_id
)
1260 struct tmem_pool
*pool
= NULL
;
1261 struct zcache_client
*cli
= NULL
;
1266 if (cli_id
== LOCAL_CLIENT
)
1268 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1269 cli
= &zcache_clients
[cli_id
];
1272 atomic_inc(&cli
->refcount
);
1273 pool
= cli
->tmem_pools
[pool_id
];
1276 cli
->tmem_pools
[pool_id
] = NULL
;
1277 /* wait for pool activity on other cpus to quiesce */
1278 while (atomic_read(&pool
->refcount
) != 0)
1280 atomic_dec(&cli
->refcount
);
1282 ret
= tmem_destroy_pool(pool
);
1285 if (cli_id
== LOCAL_CLIENT
)
1286 pr_info("%s: destroyed local pool id=%d\n", namestr
, pool_id
);
1288 pr_info("%s: destroyed pool id=%d, client=%d\n",
1289 namestr
, pool_id
, cli_id
);
1294 int zcache_new_pool(uint16_t cli_id
, uint32_t flags
)
1297 struct tmem_pool
*pool
;
1298 struct zcache_client
*cli
= NULL
;
1300 if (cli_id
== LOCAL_CLIENT
)
1302 else if ((unsigned int)cli_id
< MAX_CLIENTS
)
1303 cli
= &zcache_clients
[cli_id
];
1306 atomic_inc(&cli
->refcount
);
1307 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_ATOMIC
);
1309 pr_info("%s: pool creation failed: out of memory\n", namestr
);
1313 for (poolid
= 0; poolid
< MAX_POOLS_PER_CLIENT
; poolid
++)
1314 if (cli
->tmem_pools
[poolid
] == NULL
)
1316 if (poolid
>= MAX_POOLS_PER_CLIENT
) {
1317 pr_info("%s: pool creation failed: max exceeded\n", namestr
);
1322 atomic_set(&pool
->refcount
, 0);
1324 pool
->pool_id
= poolid
;
1325 tmem_new_pool(pool
, flags
);
1326 cli
->tmem_pools
[poolid
] = pool
;
1327 if (cli_id
== LOCAL_CLIENT
)
1328 pr_info("%s: created %s local tmem pool, id=%d\n", namestr
,
1329 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1332 pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr
,
1333 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1337 atomic_dec(&cli
->refcount
);
1341 static int zcache_local_new_pool(uint32_t flags
)
1343 return zcache_new_pool(LOCAL_CLIENT
, flags
);
1346 int zcache_autocreate_pool(unsigned int cli_id
, unsigned int pool_id
, bool eph
)
1348 struct tmem_pool
*pool
;
1349 struct zcache_client
*cli
;
1350 uint32_t flags
= eph
? 0 : TMEM_POOL_PERSIST
;
1353 BUG_ON(!ramster_enabled
);
1354 if (cli_id
== LOCAL_CLIENT
)
1356 if (pool_id
>= MAX_POOLS_PER_CLIENT
)
1358 if (cli_id
>= MAX_CLIENTS
)
1361 cli
= &zcache_clients
[cli_id
];
1362 if ((eph
&& disable_cleancache
) || (!eph
&& disable_frontswap
)) {
1363 pr_err("zcache_autocreate_pool: pool type disabled\n");
1366 if (!cli
->allocated
) {
1367 if (zcache_new_client(cli_id
)) {
1368 pr_err("zcache_autocreate_pool: can't create client\n");
1371 cli
= &zcache_clients
[cli_id
];
1373 atomic_inc(&cli
->refcount
);
1374 pool
= cli
->tmem_pools
[pool_id
];
1376 if (pool
->persistent
&& eph
) {
1377 pr_err("zcache_autocreate_pool: type mismatch\n");
1383 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_KERNEL
);
1385 pr_info("%s: pool creation failed: out of memory\n", namestr
);
1388 atomic_set(&pool
->refcount
, 0);
1390 pool
->pool_id
= pool_id
;
1391 tmem_new_pool(pool
, flags
);
1392 cli
->tmem_pools
[pool_id
] = pool
;
1393 pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
1394 namestr
, flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1399 atomic_dec(&cli
->refcount
);
1404 * Two kernel functionalities currently can be layered on top of tmem.
1405 * These are "cleancache" which is used as a second-chance cache for clean
1406 * page cache pages; and "frontswap" which is used for swap pages
1407 * to avoid writes to disk. A generic "shim" is provided here for each
1408 * to translate in-kernel semantics to zcache semantics.
1411 static void zcache_cleancache_put_page(int pool_id
,
1412 struct cleancache_filekey key
,
1413 pgoff_t index
, struct page
*page
)
1415 u32 ind
= (u32
) index
;
1416 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1418 if (!disable_cleancache_ignore_nonactive
&& !PageWasActive(page
)) {
1419 zcache_eph_nonactive_puts_ignored
++;
1422 if (likely(ind
== index
))
1423 (void)zcache_put_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1424 page
, PAGE_SIZE
, false, 1);
1427 static int zcache_cleancache_get_page(int pool_id
,
1428 struct cleancache_filekey key
,
1429 pgoff_t index
, struct page
*page
)
1431 u32 ind
= (u32
) index
;
1432 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1436 if (likely(ind
== index
)) {
1437 ret
= zcache_get_page(LOCAL_CLIENT
, pool_id
, &oid
, index
,
1438 page
, &size
, false, 0);
1439 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1441 SetPageWasActive(page
);
1446 static void zcache_cleancache_flush_page(int pool_id
,
1447 struct cleancache_filekey key
,
1450 u32 ind
= (u32
) index
;
1451 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1453 if (likely(ind
== index
))
1454 (void)zcache_flush_page(LOCAL_CLIENT
, pool_id
, &oid
, ind
);
1457 static void zcache_cleancache_flush_inode(int pool_id
,
1458 struct cleancache_filekey key
)
1460 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1462 (void)zcache_flush_object(LOCAL_CLIENT
, pool_id
, &oid
);
1465 static void zcache_cleancache_flush_fs(int pool_id
)
1468 (void)zcache_client_destroy_pool(LOCAL_CLIENT
, pool_id
);
1471 static int zcache_cleancache_init_fs(size_t pagesize
)
1473 BUG_ON(sizeof(struct cleancache_filekey
) !=
1474 sizeof(struct tmem_oid
));
1475 BUG_ON(pagesize
!= PAGE_SIZE
);
1476 return zcache_local_new_pool(0);
1479 static int zcache_cleancache_init_shared_fs(char *uuid
, size_t pagesize
)
1481 /* shared pools are unsupported and map to private */
1482 BUG_ON(sizeof(struct cleancache_filekey
) !=
1483 sizeof(struct tmem_oid
));
1484 BUG_ON(pagesize
!= PAGE_SIZE
);
1485 return zcache_local_new_pool(0);
1488 static struct cleancache_ops zcache_cleancache_ops
= {
1489 .put_page
= zcache_cleancache_put_page
,
1490 .get_page
= zcache_cleancache_get_page
,
1491 .invalidate_page
= zcache_cleancache_flush_page
,
1492 .invalidate_inode
= zcache_cleancache_flush_inode
,
1493 .invalidate_fs
= zcache_cleancache_flush_fs
,
1494 .init_shared_fs
= zcache_cleancache_init_shared_fs
,
1495 .init_fs
= zcache_cleancache_init_fs
1498 struct cleancache_ops
zcache_cleancache_register_ops(void)
1500 struct cleancache_ops old_ops
=
1501 cleancache_register_ops(&zcache_cleancache_ops
);
1506 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1507 static int zcache_frontswap_poolid __read_mostly
= -1;
1510 * Swizzling increases objects per swaptype, increasing tmem concurrency
1511 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1512 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1513 * frontswap_get_page(), but has side-effects. Hence using 8.
1516 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1517 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1518 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1520 static inline struct tmem_oid
oswiz(unsigned type
, u32 ind
)
1522 struct tmem_oid oid
= { .oid
= { 0 } };
1523 oid
.oid
[0] = _oswiz(type
, ind
);
1527 #ifdef FRONTSWAP_HAS_UNUSE
1528 static void unswiz(struct tmem_oid oid
, u32 index
,
1529 unsigned *type
, pgoff_t
*offset
)
1531 *type
= (unsigned)(oid
.oid
[0] >> SWIZ_BITS
);
1532 *offset
= (pgoff_t
)((index
<< SWIZ_BITS
) |
1533 (oid
.oid
[0] & SWIZ_MASK
));
1537 static int zcache_frontswap_put_page(unsigned type
, pgoff_t offset
,
1540 u64 ind64
= (u64
)offset
;
1541 u32 ind
= (u32
)offset
;
1542 struct tmem_oid oid
= oswiz(type
, ind
);
1544 unsigned long flags
;
1546 BUG_ON(!PageLocked(page
));
1547 if (!disable_frontswap_ignore_nonactive
&& !PageWasActive(page
)) {
1548 zcache_pers_nonactive_puts_ignored
++;
1552 if (likely(ind64
== ind
)) {
1553 local_irq_save(flags
);
1554 ret
= zcache_put_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1556 page
, PAGE_SIZE
, false, 0);
1557 local_irq_restore(flags
);
1563 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1564 * was not present (should never happen!) */
1565 static int zcache_frontswap_get_page(unsigned type
, pgoff_t offset
,
1568 u64 ind64
= (u64
)offset
;
1569 u32 ind
= (u32
)offset
;
1570 struct tmem_oid oid
= oswiz(type
, ind
);
1572 int ret
= -1, get_and_free
;
1574 if (frontswap_has_exclusive_gets
)
1578 BUG_ON(!PageLocked(page
));
1579 if (likely(ind64
== ind
)) {
1580 ret
= zcache_get_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1582 page
, &size
, false, get_and_free
);
1583 BUG_ON(ret
>= 0 && size
!= PAGE_SIZE
);
1588 /* flush a single page from frontswap */
1589 static void zcache_frontswap_flush_page(unsigned type
, pgoff_t offset
)
1591 u64 ind64
= (u64
)offset
;
1592 u32 ind
= (u32
)offset
;
1593 struct tmem_oid oid
= oswiz(type
, ind
);
1595 if (likely(ind64
== ind
))
1596 (void)zcache_flush_page(LOCAL_CLIENT
, zcache_frontswap_poolid
,
1600 /* flush all pages from the passed swaptype */
1601 static void zcache_frontswap_flush_area(unsigned type
)
1603 struct tmem_oid oid
;
1606 for (ind
= SWIZ_MASK
; ind
>= 0; ind
--) {
1607 oid
= oswiz(type
, ind
);
1608 (void)zcache_flush_object(LOCAL_CLIENT
,
1609 zcache_frontswap_poolid
, &oid
);
1613 static void zcache_frontswap_init(unsigned ignored
)
1615 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1616 if (zcache_frontswap_poolid
< 0)
1617 zcache_frontswap_poolid
=
1618 zcache_local_new_pool(TMEM_POOL_PERSIST
);
1621 static struct frontswap_ops zcache_frontswap_ops
= {
1622 .store
= zcache_frontswap_put_page
,
1623 .load
= zcache_frontswap_get_page
,
1624 .invalidate_page
= zcache_frontswap_flush_page
,
1625 .invalidate_area
= zcache_frontswap_flush_area
,
1626 .init
= zcache_frontswap_init
1629 struct frontswap_ops
zcache_frontswap_register_ops(void)
1631 struct frontswap_ops old_ops
=
1632 frontswap_register_ops(&zcache_frontswap_ops
);
1638 * zcache initialization
1639 * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
1640 * OR NOTHING HAPPENS!
1643 static int __init
enable_zcache(char *s
)
1648 __setup("zcache", enable_zcache
);
1650 static int __init
enable_ramster(char *s
)
1653 #ifdef CONFIG_RAMSTER
1654 ramster_enabled
= 1;
1658 __setup("ramster", enable_ramster
);
1660 /* allow independent dynamic disabling of cleancache and frontswap */
1662 static int __init
no_cleancache(char *s
)
1664 disable_cleancache
= 1;
1668 __setup("nocleancache", no_cleancache
);
1670 static int __init
no_frontswap(char *s
)
1672 disable_frontswap
= 1;
1676 __setup("nofrontswap", no_frontswap
);
1678 static int __init
no_frontswap_exclusive_gets(char *s
)
1680 frontswap_has_exclusive_gets
= false;
1684 __setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets
);
1686 static int __init
no_frontswap_ignore_nonactive(char *s
)
1688 disable_frontswap_ignore_nonactive
= 1;
1692 __setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive
);
1694 static int __init
no_cleancache_ignore_nonactive(char *s
)
1696 disable_cleancache_ignore_nonactive
= 1;
1700 __setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive
);
1702 static int __init
enable_zcache_compressor(char *s
)
1704 strncpy(zcache_comp_name
, s
, ZCACHE_COMP_NAME_SZ
);
1708 __setup("zcache=", enable_zcache_compressor
);
1711 static int __init
zcache_comp_init(void)
1715 /* check crypto algorithm */
1716 if (*zcache_comp_name
!= '\0') {
1717 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1719 pr_info("zcache: %s not supported\n",
1723 strcpy(zcache_comp_name
, "lzo");
1724 ret
= crypto_has_comp(zcache_comp_name
, 0, 0);
1729 pr_info("zcache: using %s compressor\n", zcache_comp_name
);
1731 /* alloc percpu transforms */
1733 zcache_comp_pcpu_tfms
= alloc_percpu(struct crypto_comp
*);
1734 if (!zcache_comp_pcpu_tfms
)
1740 static int __init
zcache_init(void)
1744 if (ramster_enabled
) {
1745 namestr
= "ramster";
1746 ramster_register_pamops(&zcache_pamops
);
1748 #ifdef CONFIG_DEBUG_FS
1749 zcache_debugfs_init();
1751 if (zcache_enabled
) {
1754 tmem_register_hostops(&zcache_hostops
);
1755 tmem_register_pamops(&zcache_pamops
);
1756 ret
= register_cpu_notifier(&zcache_cpu_notifier_block
);
1758 pr_err("%s: can't register cpu notifier\n", namestr
);
1761 ret
= zcache_comp_init();
1763 pr_err("%s: compressor initialization failed\n",
1767 for_each_online_cpu(cpu
) {
1768 void *pcpu
= (void *)(long)cpu
;
1769 zcache_cpu_notifier(&zcache_cpu_notifier_block
,
1770 CPU_UP_PREPARE
, pcpu
);
1773 zcache_objnode_cache
= kmem_cache_create("zcache_objnode",
1774 sizeof(struct tmem_objnode
), 0, 0, NULL
);
1775 zcache_obj_cache
= kmem_cache_create("zcache_obj",
1776 sizeof(struct tmem_obj
), 0, 0, NULL
);
1777 ret
= zcache_new_client(LOCAL_CLIENT
);
1779 pr_err("%s: can't create client\n", namestr
);
1783 if (zcache_enabled
&& !disable_cleancache
) {
1784 struct cleancache_ops old_ops
;
1786 register_shrinker(&zcache_shrinker
);
1787 old_ops
= zcache_cleancache_register_ops();
1788 pr_info("%s: cleancache enabled using kernel transcendent "
1789 "memory and compression buddies\n", namestr
);
1791 pr_info("%s: cleancache: ignorenonactive = %d\n",
1792 namestr
, !disable_cleancache_ignore_nonactive
);
1794 if (old_ops
.init_fs
!= NULL
)
1795 pr_warn("%s: cleancache_ops overridden\n", namestr
);
1797 if (zcache_enabled
&& !disable_frontswap
) {
1798 struct frontswap_ops old_ops
;
1800 old_ops
= zcache_frontswap_register_ops();
1801 if (frontswap_has_exclusive_gets
)
1802 frontswap_tmem_exclusive_gets(true);
1803 pr_info("%s: frontswap enabled using kernel transcendent "
1804 "memory and compression buddies\n", namestr
);
1806 pr_info("%s: frontswap: excl gets = %d active only = %d\n",
1807 namestr
, frontswap_has_exclusive_gets
,
1808 !disable_frontswap_ignore_nonactive
);
1810 if (old_ops
.init
!= NULL
)
1811 pr_warn("%s: frontswap_ops overridden\n", namestr
);
1813 if (ramster_enabled
)
1814 ramster_init(!disable_cleancache
, !disable_frontswap
,
1815 frontswap_has_exclusive_gets
);
1820 late_initcall(zcache_init
);