2 * zswap.c - zswap driver file
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zpool.h>
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
45 /*********************************
47 **********************************/
48 /* Total bytes used by the compressed storage */
49 static u64 zswap_pool_total_size
;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages
= ATOMIC_INIT(0);
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit
;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages
;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail
;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor
;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail
;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail
;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry
;
75 /*********************************
77 **********************************/
79 /* Enable/disable zswap (disabled by default) */
80 static bool zswap_enabled
;
81 module_param_named(enabled
, zswap_enabled
, bool, 0644);
83 /* Compressor to be used by zswap (fixed at boot for now) */
84 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
85 static char *zswap_compressor
= ZSWAP_COMPRESSOR_DEFAULT
;
86 module_param_named(compressor
, zswap_compressor
, charp
, 0444);
88 /* The maximum percentage of memory that the compressed pool can occupy */
89 static unsigned int zswap_max_pool_percent
= 20;
90 module_param_named(max_pool_percent
,
91 zswap_max_pool_percent
, uint
, 0644);
93 /* Compressed storage to use */
94 #define ZSWAP_ZPOOL_DEFAULT "zbud"
95 static char *zswap_zpool_type
= ZSWAP_ZPOOL_DEFAULT
;
96 module_param_named(zpool
, zswap_zpool_type
, charp
, 0444);
98 /* zpool is shared by all of zswap backend */
99 static struct zpool
*zswap_pool
;
101 /*********************************
103 **********************************/
107 struct crypto_comp
* __percpu
*tfm
;
109 struct list_head list
;
110 struct rcu_head rcu_head
;
111 struct notifier_block notifier
;
112 char tfm_name
[CRYPTO_MAX_ALG_NAME
];
118 * This structure contains the metadata for tracking a single compressed
121 * rbnode - links the entry into red-black tree for the appropriate swap type
122 * offset - the swap offset for the entry. Index into the red-black tree.
123 * refcount - the number of outstanding reference to the entry. This is needed
124 * to protect against premature freeing of the entry by code
125 * concurrent calls to load, invalidate, and writeback. The lock
126 * for the zswap_tree structure that contains the entry must
127 * be held while changing the refcount. Since the lock must
128 * be held, there is no reason to also make refcount atomic.
129 * length - the length in bytes of the compressed page data. Needed during
131 * pool - the zswap_pool the entry's data is in
132 * handle - zpool allocation handle that stores the compressed page data
135 struct rb_node rbnode
;
139 struct zswap_pool
*pool
;
140 unsigned long handle
;
143 struct zswap_header
{
144 swp_entry_t swpentry
;
148 * The tree lock in the zswap_tree struct protects a few things:
150 * - the refcount field of each entry in the tree
153 struct rb_root rbroot
;
157 static struct zswap_tree
*zswap_trees
[MAX_SWAPFILES
];
159 /* RCU-protected iteration */
160 static LIST_HEAD(zswap_pools
);
161 /* protects zswap_pools list modification */
162 static DEFINE_SPINLOCK(zswap_pools_lock
);
164 /*********************************
165 * helpers and fwd declarations
166 **********************************/
168 #define zswap_pool_debug(msg, p) \
169 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
170 zpool_get_type((p)->zpool))
172 static int zswap_writeback_entry(struct zpool
*pool
, unsigned long handle
);
173 static int zswap_pool_get(struct zswap_pool
*pool
);
174 static void zswap_pool_put(struct zswap_pool
*pool
);
176 static const struct zpool_ops zswap_zpool_ops
= {
177 .evict
= zswap_writeback_entry
180 static bool zswap_is_full(void)
182 return totalram_pages
* zswap_max_pool_percent
/ 100 <
183 DIV_ROUND_UP(zswap_pool_total_size
, PAGE_SIZE
);
186 static void zswap_update_total_size(void)
188 struct zswap_pool
*pool
;
193 list_for_each_entry_rcu(pool
, &zswap_pools
, list
)
194 total
+= zpool_get_total_size(pool
->zpool
);
198 zswap_pool_total_size
= total
;
201 /*********************************
202 * zswap entry functions
203 **********************************/
204 static struct kmem_cache
*zswap_entry_cache
;
206 static int __init
zswap_entry_cache_create(void)
208 zswap_entry_cache
= KMEM_CACHE(zswap_entry
, 0);
209 return zswap_entry_cache
== NULL
;
212 static void __init
zswap_entry_cache_destroy(void)
214 kmem_cache_destroy(zswap_entry_cache
);
217 static struct zswap_entry
*zswap_entry_cache_alloc(gfp_t gfp
)
219 struct zswap_entry
*entry
;
220 entry
= kmem_cache_alloc(zswap_entry_cache
, gfp
);
224 RB_CLEAR_NODE(&entry
->rbnode
);
228 static void zswap_entry_cache_free(struct zswap_entry
*entry
)
230 kmem_cache_free(zswap_entry_cache
, entry
);
233 /*********************************
235 **********************************/
236 static struct zswap_entry
*zswap_rb_search(struct rb_root
*root
, pgoff_t offset
)
238 struct rb_node
*node
= root
->rb_node
;
239 struct zswap_entry
*entry
;
242 entry
= rb_entry(node
, struct zswap_entry
, rbnode
);
243 if (entry
->offset
> offset
)
244 node
= node
->rb_left
;
245 else if (entry
->offset
< offset
)
246 node
= node
->rb_right
;
254 * In the case that a entry with the same offset is found, a pointer to
255 * the existing entry is stored in dupentry and the function returns -EEXIST
257 static int zswap_rb_insert(struct rb_root
*root
, struct zswap_entry
*entry
,
258 struct zswap_entry
**dupentry
)
260 struct rb_node
**link
= &root
->rb_node
, *parent
= NULL
;
261 struct zswap_entry
*myentry
;
265 myentry
= rb_entry(parent
, struct zswap_entry
, rbnode
);
266 if (myentry
->offset
> entry
->offset
)
267 link
= &(*link
)->rb_left
;
268 else if (myentry
->offset
< entry
->offset
)
269 link
= &(*link
)->rb_right
;
275 rb_link_node(&entry
->rbnode
, parent
, link
);
276 rb_insert_color(&entry
->rbnode
, root
);
280 static void zswap_rb_erase(struct rb_root
*root
, struct zswap_entry
*entry
)
282 if (!RB_EMPTY_NODE(&entry
->rbnode
)) {
283 rb_erase(&entry
->rbnode
, root
);
284 RB_CLEAR_NODE(&entry
->rbnode
);
289 * Carries out the common pattern of freeing and entry's zpool allocation,
290 * freeing the entry itself, and decrementing the number of stored pages.
292 static void zswap_free_entry(struct zswap_entry
*entry
)
294 zpool_free(entry
->pool
->zpool
, entry
->handle
);
295 zswap_pool_put(entry
->pool
);
296 zswap_entry_cache_free(entry
);
297 atomic_dec(&zswap_stored_pages
);
298 zswap_update_total_size();
301 /* caller must hold the tree lock */
302 static void zswap_entry_get(struct zswap_entry
*entry
)
307 /* caller must hold the tree lock
308 * remove from the tree and free it, if nobody reference the entry
310 static void zswap_entry_put(struct zswap_tree
*tree
,
311 struct zswap_entry
*entry
)
313 int refcount
= --entry
->refcount
;
315 BUG_ON(refcount
< 0);
317 zswap_rb_erase(&tree
->rbroot
, entry
);
318 zswap_free_entry(entry
);
322 /* caller must hold the tree lock */
323 static struct zswap_entry
*zswap_entry_find_get(struct rb_root
*root
,
326 struct zswap_entry
*entry
= NULL
;
328 entry
= zswap_rb_search(root
, offset
);
330 zswap_entry_get(entry
);
335 /*********************************
337 **********************************/
338 static DEFINE_PER_CPU(u8
*, zswap_dstmem
);
340 static int __zswap_cpu_dstmem_notifier(unsigned long action
, unsigned long cpu
)
346 dst
= kmalloc_node(PAGE_SIZE
* 2, GFP_KERNEL
, cpu_to_node(cpu
));
348 pr_err("can't allocate compressor buffer\n");
351 per_cpu(zswap_dstmem
, cpu
) = dst
;
354 case CPU_UP_CANCELED
:
355 dst
= per_cpu(zswap_dstmem
, cpu
);
357 per_cpu(zswap_dstmem
, cpu
) = NULL
;
365 static int zswap_cpu_dstmem_notifier(struct notifier_block
*nb
,
366 unsigned long action
, void *pcpu
)
368 return __zswap_cpu_dstmem_notifier(action
, (unsigned long)pcpu
);
371 static struct notifier_block zswap_dstmem_notifier
= {
372 .notifier_call
= zswap_cpu_dstmem_notifier
,
375 static int __init
zswap_cpu_dstmem_init(void)
379 cpu_notifier_register_begin();
380 for_each_online_cpu(cpu
)
381 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE
, cpu
) ==
384 __register_cpu_notifier(&zswap_dstmem_notifier
);
385 cpu_notifier_register_done();
389 for_each_online_cpu(cpu
)
390 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED
, cpu
);
391 cpu_notifier_register_done();
395 static void zswap_cpu_dstmem_destroy(void)
399 cpu_notifier_register_begin();
400 for_each_online_cpu(cpu
)
401 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED
, cpu
);
402 __unregister_cpu_notifier(&zswap_dstmem_notifier
);
403 cpu_notifier_register_done();
406 static int __zswap_cpu_comp_notifier(struct zswap_pool
*pool
,
407 unsigned long action
, unsigned long cpu
)
409 struct crypto_comp
*tfm
;
413 if (WARN_ON(*per_cpu_ptr(pool
->tfm
, cpu
)))
415 tfm
= crypto_alloc_comp(pool
->tfm_name
, 0, 0);
416 if (IS_ERR_OR_NULL(tfm
)) {
417 pr_err("could not alloc crypto comp %s : %ld\n",
418 pool
->tfm_name
, PTR_ERR(tfm
));
421 *per_cpu_ptr(pool
->tfm
, cpu
) = tfm
;
424 case CPU_UP_CANCELED
:
425 tfm
= *per_cpu_ptr(pool
->tfm
, cpu
);
426 if (!IS_ERR_OR_NULL(tfm
))
427 crypto_free_comp(tfm
);
428 *per_cpu_ptr(pool
->tfm
, cpu
) = NULL
;
436 static int zswap_cpu_comp_notifier(struct notifier_block
*nb
,
437 unsigned long action
, void *pcpu
)
439 unsigned long cpu
= (unsigned long)pcpu
;
440 struct zswap_pool
*pool
= container_of(nb
, typeof(*pool
), notifier
);
442 return __zswap_cpu_comp_notifier(pool
, action
, cpu
);
445 static int zswap_cpu_comp_init(struct zswap_pool
*pool
)
449 memset(&pool
->notifier
, 0, sizeof(pool
->notifier
));
450 pool
->notifier
.notifier_call
= zswap_cpu_comp_notifier
;
452 cpu_notifier_register_begin();
453 for_each_online_cpu(cpu
)
454 if (__zswap_cpu_comp_notifier(pool
, CPU_UP_PREPARE
, cpu
) ==
457 __register_cpu_notifier(&pool
->notifier
);
458 cpu_notifier_register_done();
462 for_each_online_cpu(cpu
)
463 __zswap_cpu_comp_notifier(pool
, CPU_UP_CANCELED
, cpu
);
464 cpu_notifier_register_done();
468 static void zswap_cpu_comp_destroy(struct zswap_pool
*pool
)
472 cpu_notifier_register_begin();
473 for_each_online_cpu(cpu
)
474 __zswap_cpu_comp_notifier(pool
, CPU_UP_CANCELED
, cpu
);
475 __unregister_cpu_notifier(&pool
->notifier
);
476 cpu_notifier_register_done();
479 /*********************************
481 **********************************/
483 static struct zswap_pool
*__zswap_pool_current(void)
485 struct zswap_pool
*pool
;
487 pool
= list_first_or_null_rcu(&zswap_pools
, typeof(*pool
), list
);
493 static struct zswap_pool
*zswap_pool_current(void)
495 assert_spin_locked(&zswap_pools_lock
);
497 return __zswap_pool_current();
500 static struct zswap_pool
*zswap_pool_current_get(void)
502 struct zswap_pool
*pool
;
506 pool
= __zswap_pool_current();
507 if (!pool
|| !zswap_pool_get(pool
))
515 static struct zswap_pool
*zswap_pool_last_get(void)
517 struct zswap_pool
*pool
, *last
= NULL
;
521 list_for_each_entry_rcu(pool
, &zswap_pools
, list
)
523 if (!WARN_ON(!last
) && !zswap_pool_get(last
))
531 static struct zswap_pool
*zswap_pool_find_get(char *type
, char *compressor
)
533 struct zswap_pool
*pool
;
535 assert_spin_locked(&zswap_pools_lock
);
537 list_for_each_entry_rcu(pool
, &zswap_pools
, list
) {
538 if (strncmp(pool
->tfm_name
, compressor
, sizeof(pool
->tfm_name
)))
540 if (strncmp(zpool_get_type(pool
->zpool
), type
,
541 sizeof(zswap_zpool_type
)))
543 /* if we can't get it, it's about to be destroyed */
544 if (!zswap_pool_get(pool
))
552 static struct zswap_pool
*zswap_pool_create(char *type
, char *compressor
)
554 struct zswap_pool
*pool
;
555 gfp_t gfp
= __GFP_NORETRY
| __GFP_NOWARN
;
557 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
559 pr_err("pool alloc failed\n");
563 pool
->zpool
= zpool_create_pool(type
, "zswap", gfp
, &zswap_zpool_ops
);
565 pr_err("%s zpool not available\n", type
);
568 pr_debug("using %s zpool\n", zpool_get_type(pool
->zpool
));
570 strlcpy(pool
->tfm_name
, compressor
, sizeof(pool
->tfm_name
));
571 pool
->tfm
= alloc_percpu(struct crypto_comp
*);
573 pr_err("percpu alloc failed\n");
577 if (zswap_cpu_comp_init(pool
))
579 pr_debug("using %s compressor\n", pool
->tfm_name
);
581 /* being the current pool takes 1 ref; this func expects the
582 * caller to always add the new pool as the current pool
584 kref_init(&pool
->kref
);
585 INIT_LIST_HEAD(&pool
->list
);
587 zswap_pool_debug("created", pool
);
592 free_percpu(pool
->tfm
);
594 zpool_destroy_pool(pool
->zpool
);
599 static struct zswap_pool
*__zswap_pool_create_fallback(void)
601 if (!crypto_has_comp(zswap_compressor
, 0, 0)) {
602 pr_err("compressor %s not available, using default %s\n",
603 zswap_compressor
, ZSWAP_COMPRESSOR_DEFAULT
);
604 strncpy(zswap_compressor
, ZSWAP_COMPRESSOR_DEFAULT
,
605 sizeof(zswap_compressor
));
607 if (!zpool_has_pool(zswap_zpool_type
)) {
608 pr_err("zpool %s not available, using default %s\n",
609 zswap_zpool_type
, ZSWAP_ZPOOL_DEFAULT
);
610 strncpy(zswap_zpool_type
, ZSWAP_ZPOOL_DEFAULT
,
611 sizeof(zswap_zpool_type
));
614 return zswap_pool_create(zswap_zpool_type
, zswap_compressor
);
617 static void zswap_pool_destroy(struct zswap_pool
*pool
)
619 zswap_pool_debug("destroying", pool
);
621 zswap_cpu_comp_destroy(pool
);
622 free_percpu(pool
->tfm
);
623 zpool_destroy_pool(pool
->zpool
);
627 static int __must_check
zswap_pool_get(struct zswap_pool
*pool
)
629 return kref_get_unless_zero(&pool
->kref
);
632 static void __zswap_pool_release(struct rcu_head
*head
)
634 struct zswap_pool
*pool
= container_of(head
, typeof(*pool
), rcu_head
);
636 /* nobody should have been able to get a kref... */
637 WARN_ON(kref_get_unless_zero(&pool
->kref
));
639 /* pool is now off zswap_pools list and has no references. */
640 zswap_pool_destroy(pool
);
643 static void __zswap_pool_empty(struct kref
*kref
)
645 struct zswap_pool
*pool
;
647 pool
= container_of(kref
, typeof(*pool
), kref
);
649 spin_lock(&zswap_pools_lock
);
651 WARN_ON(pool
== zswap_pool_current());
653 list_del_rcu(&pool
->list
);
654 call_rcu(&pool
->rcu_head
, __zswap_pool_release
);
656 spin_unlock(&zswap_pools_lock
);
659 static void zswap_pool_put(struct zswap_pool
*pool
)
661 kref_put(&pool
->kref
, __zswap_pool_empty
);
664 /*********************************
666 **********************************/
667 /* return enum for zswap_get_swap_cache_page */
668 enum zswap_get_swap_ret
{
670 ZSWAP_SWAPCACHE_EXIST
,
671 ZSWAP_SWAPCACHE_FAIL
,
675 * zswap_get_swap_cache_page
677 * This is an adaption of read_swap_cache_async()
679 * This function tries to find a page with the given swap entry
680 * in the swapper_space address space (the swap cache). If the page
681 * is found, it is returned in retpage. Otherwise, a page is allocated,
682 * added to the swap cache, and returned in retpage.
684 * If success, the swap cache page is returned in retpage
685 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
686 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
687 * the new page is added to swapcache and locked
688 * Returns ZSWAP_SWAPCACHE_FAIL on error
690 static int zswap_get_swap_cache_page(swp_entry_t entry
,
691 struct page
**retpage
)
693 bool page_was_allocated
;
695 *retpage
= __read_swap_cache_async(entry
, GFP_KERNEL
,
696 NULL
, 0, &page_was_allocated
);
697 if (page_was_allocated
)
698 return ZSWAP_SWAPCACHE_NEW
;
700 return ZSWAP_SWAPCACHE_FAIL
;
701 return ZSWAP_SWAPCACHE_EXIST
;
705 * Attempts to free an entry by adding a page to the swap cache,
706 * decompressing the entry data into the page, and issuing a
707 * bio write to write the page back to the swap device.
709 * This can be thought of as a "resumed writeback" of the page
710 * to the swap device. We are basically resuming the same swap
711 * writeback path that was intercepted with the frontswap_store()
712 * in the first place. After the page has been decompressed into
713 * the swap cache, the compressed version stored by zswap can be
716 static int zswap_writeback_entry(struct zpool
*pool
, unsigned long handle
)
718 struct zswap_header
*zhdr
;
719 swp_entry_t swpentry
;
720 struct zswap_tree
*tree
;
722 struct zswap_entry
*entry
;
724 struct crypto_comp
*tfm
;
728 struct writeback_control wbc
= {
729 .sync_mode
= WB_SYNC_NONE
,
732 /* extract swpentry from data */
733 zhdr
= zpool_map_handle(pool
, handle
, ZPOOL_MM_RO
);
734 swpentry
= zhdr
->swpentry
; /* here */
735 zpool_unmap_handle(pool
, handle
);
736 tree
= zswap_trees
[swp_type(swpentry
)];
737 offset
= swp_offset(swpentry
);
739 /* find and ref zswap entry */
740 spin_lock(&tree
->lock
);
741 entry
= zswap_entry_find_get(&tree
->rbroot
, offset
);
743 /* entry was invalidated */
744 spin_unlock(&tree
->lock
);
747 spin_unlock(&tree
->lock
);
748 BUG_ON(offset
!= entry
->offset
);
750 /* try to allocate swap cache page */
751 switch (zswap_get_swap_cache_page(swpentry
, &page
)) {
752 case ZSWAP_SWAPCACHE_FAIL
: /* no memory or invalidate happened */
756 case ZSWAP_SWAPCACHE_EXIST
:
757 /* page is already in the swap cache, ignore for now */
758 page_cache_release(page
);
762 case ZSWAP_SWAPCACHE_NEW
: /* page is locked */
765 src
= (u8
*)zpool_map_handle(entry
->pool
->zpool
, entry
->handle
,
766 ZPOOL_MM_RO
) + sizeof(struct zswap_header
);
767 dst
= kmap_atomic(page
);
768 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
769 ret
= crypto_comp_decompress(tfm
, src
, entry
->length
,
771 put_cpu_ptr(entry
->pool
->tfm
);
773 zpool_unmap_handle(entry
->pool
->zpool
, entry
->handle
);
775 BUG_ON(dlen
!= PAGE_SIZE
);
777 /* page is up to date */
778 SetPageUptodate(page
);
781 /* move it to the tail of the inactive list after end_writeback */
782 SetPageReclaim(page
);
784 /* start writeback */
785 __swap_writepage(page
, &wbc
, end_swap_bio_write
);
786 page_cache_release(page
);
787 zswap_written_back_pages
++;
789 spin_lock(&tree
->lock
);
790 /* drop local reference */
791 zswap_entry_put(tree
, entry
);
794 * There are two possible situations for entry here:
795 * (1) refcount is 1(normal case), entry is valid and on the tree
796 * (2) refcount is 0, entry is freed and not on the tree
797 * because invalidate happened during writeback
798 * search the tree and free the entry if find entry
800 if (entry
== zswap_rb_search(&tree
->rbroot
, offset
))
801 zswap_entry_put(tree
, entry
);
802 spin_unlock(&tree
->lock
);
807 * if we get here due to ZSWAP_SWAPCACHE_EXIST
808 * a load may happening concurrently
809 * it is safe and okay to not free the entry
810 * if we free the entry in the following put
811 * it it either okay to return !0
814 spin_lock(&tree
->lock
);
815 zswap_entry_put(tree
, entry
);
816 spin_unlock(&tree
->lock
);
822 static int zswap_shrink(void)
824 struct zswap_pool
*pool
;
827 pool
= zswap_pool_last_get();
831 ret
= zpool_shrink(pool
->zpool
, 1, NULL
);
833 zswap_pool_put(pool
);
838 /*********************************
840 **********************************/
841 /* attempts to compress and store an single page */
842 static int zswap_frontswap_store(unsigned type
, pgoff_t offset
,
845 struct zswap_tree
*tree
= zswap_trees
[type
];
846 struct zswap_entry
*entry
, *dupentry
;
847 struct crypto_comp
*tfm
;
849 unsigned int dlen
= PAGE_SIZE
, len
;
850 unsigned long handle
;
853 struct zswap_header
*zhdr
;
855 if (!zswap_enabled
|| !tree
) {
860 /* reclaim space if needed */
861 if (zswap_is_full()) {
862 zswap_pool_limit_hit
++;
863 if (zswap_shrink()) {
864 zswap_reject_reclaim_fail
++;
871 entry
= zswap_entry_cache_alloc(GFP_KERNEL
);
873 zswap_reject_kmemcache_fail
++;
878 /* if entry is successfully added, it keeps the reference */
879 entry
->pool
= zswap_pool_current_get();
886 dst
= get_cpu_var(zswap_dstmem
);
887 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
888 src
= kmap_atomic(page
);
889 ret
= crypto_comp_compress(tfm
, src
, PAGE_SIZE
, dst
, &dlen
);
891 put_cpu_ptr(entry
->pool
->tfm
);
898 len
= dlen
+ sizeof(struct zswap_header
);
899 ret
= zpool_malloc(entry
->pool
->zpool
, len
,
900 __GFP_NORETRY
| __GFP_NOWARN
, &handle
);
901 if (ret
== -ENOSPC
) {
902 zswap_reject_compress_poor
++;
906 zswap_reject_alloc_fail
++;
909 zhdr
= zpool_map_handle(entry
->pool
->zpool
, handle
, ZPOOL_MM_RW
);
910 zhdr
->swpentry
= swp_entry(type
, offset
);
911 buf
= (u8
*)(zhdr
+ 1);
912 memcpy(buf
, dst
, dlen
);
913 zpool_unmap_handle(entry
->pool
->zpool
, handle
);
914 put_cpu_var(zswap_dstmem
);
917 entry
->offset
= offset
;
918 entry
->handle
= handle
;
919 entry
->length
= dlen
;
922 spin_lock(&tree
->lock
);
924 ret
= zswap_rb_insert(&tree
->rbroot
, entry
, &dupentry
);
925 if (ret
== -EEXIST
) {
926 zswap_duplicate_entry
++;
927 /* remove from rbtree */
928 zswap_rb_erase(&tree
->rbroot
, dupentry
);
929 zswap_entry_put(tree
, dupentry
);
931 } while (ret
== -EEXIST
);
932 spin_unlock(&tree
->lock
);
935 atomic_inc(&zswap_stored_pages
);
936 zswap_update_total_size();
941 put_cpu_var(zswap_dstmem
);
942 zswap_pool_put(entry
->pool
);
944 zswap_entry_cache_free(entry
);
950 * returns 0 if the page was successfully decompressed
951 * return -1 on entry not found or error
953 static int zswap_frontswap_load(unsigned type
, pgoff_t offset
,
956 struct zswap_tree
*tree
= zswap_trees
[type
];
957 struct zswap_entry
*entry
;
958 struct crypto_comp
*tfm
;
964 spin_lock(&tree
->lock
);
965 entry
= zswap_entry_find_get(&tree
->rbroot
, offset
);
967 /* entry was written back */
968 spin_unlock(&tree
->lock
);
971 spin_unlock(&tree
->lock
);
975 src
= (u8
*)zpool_map_handle(entry
->pool
->zpool
, entry
->handle
,
976 ZPOOL_MM_RO
) + sizeof(struct zswap_header
);
977 dst
= kmap_atomic(page
);
978 tfm
= *get_cpu_ptr(entry
->pool
->tfm
);
979 ret
= crypto_comp_decompress(tfm
, src
, entry
->length
, dst
, &dlen
);
980 put_cpu_ptr(entry
->pool
->tfm
);
982 zpool_unmap_handle(entry
->pool
->zpool
, entry
->handle
);
985 spin_lock(&tree
->lock
);
986 zswap_entry_put(tree
, entry
);
987 spin_unlock(&tree
->lock
);
992 /* frees an entry in zswap */
993 static void zswap_frontswap_invalidate_page(unsigned type
, pgoff_t offset
)
995 struct zswap_tree
*tree
= zswap_trees
[type
];
996 struct zswap_entry
*entry
;
999 spin_lock(&tree
->lock
);
1000 entry
= zswap_rb_search(&tree
->rbroot
, offset
);
1002 /* entry was written back */
1003 spin_unlock(&tree
->lock
);
1007 /* remove from rbtree */
1008 zswap_rb_erase(&tree
->rbroot
, entry
);
1010 /* drop the initial reference from entry creation */
1011 zswap_entry_put(tree
, entry
);
1013 spin_unlock(&tree
->lock
);
1016 /* frees all zswap entries for the given swap type */
1017 static void zswap_frontswap_invalidate_area(unsigned type
)
1019 struct zswap_tree
*tree
= zswap_trees
[type
];
1020 struct zswap_entry
*entry
, *n
;
1025 /* walk the tree and free everything */
1026 spin_lock(&tree
->lock
);
1027 rbtree_postorder_for_each_entry_safe(entry
, n
, &tree
->rbroot
, rbnode
)
1028 zswap_free_entry(entry
);
1029 tree
->rbroot
= RB_ROOT
;
1030 spin_unlock(&tree
->lock
);
1032 zswap_trees
[type
] = NULL
;
1035 static void zswap_frontswap_init(unsigned type
)
1037 struct zswap_tree
*tree
;
1039 tree
= kzalloc(sizeof(struct zswap_tree
), GFP_KERNEL
);
1041 pr_err("alloc failed, zswap disabled for swap type %d\n", type
);
1045 tree
->rbroot
= RB_ROOT
;
1046 spin_lock_init(&tree
->lock
);
1047 zswap_trees
[type
] = tree
;
1050 static struct frontswap_ops zswap_frontswap_ops
= {
1051 .store
= zswap_frontswap_store
,
1052 .load
= zswap_frontswap_load
,
1053 .invalidate_page
= zswap_frontswap_invalidate_page
,
1054 .invalidate_area
= zswap_frontswap_invalidate_area
,
1055 .init
= zswap_frontswap_init
1058 /*********************************
1060 **********************************/
1061 #ifdef CONFIG_DEBUG_FS
1062 #include <linux/debugfs.h>
1064 static struct dentry
*zswap_debugfs_root
;
1066 static int __init
zswap_debugfs_init(void)
1068 if (!debugfs_initialized())
1071 zswap_debugfs_root
= debugfs_create_dir("zswap", NULL
);
1072 if (!zswap_debugfs_root
)
1075 debugfs_create_u64("pool_limit_hit", S_IRUGO
,
1076 zswap_debugfs_root
, &zswap_pool_limit_hit
);
1077 debugfs_create_u64("reject_reclaim_fail", S_IRUGO
,
1078 zswap_debugfs_root
, &zswap_reject_reclaim_fail
);
1079 debugfs_create_u64("reject_alloc_fail", S_IRUGO
,
1080 zswap_debugfs_root
, &zswap_reject_alloc_fail
);
1081 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO
,
1082 zswap_debugfs_root
, &zswap_reject_kmemcache_fail
);
1083 debugfs_create_u64("reject_compress_poor", S_IRUGO
,
1084 zswap_debugfs_root
, &zswap_reject_compress_poor
);
1085 debugfs_create_u64("written_back_pages", S_IRUGO
,
1086 zswap_debugfs_root
, &zswap_written_back_pages
);
1087 debugfs_create_u64("duplicate_entry", S_IRUGO
,
1088 zswap_debugfs_root
, &zswap_duplicate_entry
);
1089 debugfs_create_u64("pool_total_size", S_IRUGO
,
1090 zswap_debugfs_root
, &zswap_pool_total_size
);
1091 debugfs_create_atomic_t("stored_pages", S_IRUGO
,
1092 zswap_debugfs_root
, &zswap_stored_pages
);
1097 static void __exit
zswap_debugfs_exit(void)
1099 debugfs_remove_recursive(zswap_debugfs_root
);
1102 static int __init
zswap_debugfs_init(void)
1107 static void __exit
zswap_debugfs_exit(void) { }
1110 /*********************************
1111 * module init and exit
1112 **********************************/
1113 static int __init
init_zswap(void)
1115 struct zswap_pool
*pool
;
1117 if (zswap_entry_cache_create()) {
1118 pr_err("entry cache creation failed\n");
1122 if (zswap_cpu_dstmem_init()) {
1123 pr_err("dstmem alloc failed\n");
1127 pool
= __zswap_pool_create_fallback();
1129 pr_err("pool creation failed\n");
1132 pr_info("loaded using pool %s/%s\n", pool
->tfm_name
,
1133 zpool_get_type(pool
->zpool
));
1135 list_add(&pool
->list
, &zswap_pools
);
1137 frontswap_register_ops(&zswap_frontswap_ops
);
1138 if (zswap_debugfs_init())
1139 pr_warn("debugfs initialization failed\n");
1143 zswap_cpu_dstmem_destroy();
1145 zswap_entry_cache_destroy();
1149 /* must be late so crypto has time to come up */
1150 late_initcall(init_zswap
);
1152 MODULE_LICENSE("GPL");
1153 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1154 MODULE_DESCRIPTION("Compressed cache for swap pages");