2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 * Following is how we use various fields and flags of underlying
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->index: offset of the first object starting in this page.
21 * For the first page, this is always 0, so we use this field
22 * to store handle for huge object.
23 * page->next: links together all component pages of a zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_private2: identifies the last component page
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/bitops.h>
37 #include <linux/errno.h>
38 #include <linux/highmem.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <asm/tlbflush.h>
42 #include <asm/pgtable.h>
43 #include <linux/cpumask.h>
44 #include <linux/cpu.h>
45 #include <linux/vmalloc.h>
46 #include <linux/preempt.h>
47 #include <linux/spinlock.h>
48 #include <linux/types.h>
49 #include <linux/debugfs.h>
50 #include <linux/zsmalloc.h>
51 #include <linux/zpool.h>
54 * This must be power of 2 and greater than of equal to sizeof(link_free).
55 * These two conditions ensure that any 'struct link_free' itself doesn't
56 * span more than 1 page which avoids complex case of mapping 2 pages simply
57 * to restore link_free pointer values.
62 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
63 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
65 #define ZS_MAX_ZSPAGE_ORDER 2
66 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
68 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
71 * Object location (<PFN>, <obj_idx>) is encoded as
72 * as single (unsigned long) handle value.
74 * Note that object index <obj_idx> is relative to system
75 * page <PFN> it is stored in, so for each sub-page belonging
76 * to a zspage, obj_idx starts with 0.
78 * This is made more complicated by various memory models and PAE.
81 #ifndef MAX_PHYSMEM_BITS
82 #ifdef CONFIG_HIGHMEM64G
83 #define MAX_PHYSMEM_BITS 36
84 #else /* !CONFIG_HIGHMEM64G */
86 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
89 #define MAX_PHYSMEM_BITS BITS_PER_LONG
92 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
95 * Memory for allocating for handle keeps object position by
96 * encoding <page, obj_idx> and the encoded value has a room
97 * in least bit(ie, look at obj_to_location).
98 * We use the bit to synchronize between object access by
101 #define HANDLE_PIN_BIT 0
104 * Head in allocated object should have OBJ_ALLOCATED_TAG
105 * to identify the object was allocated or not.
106 * It's okay to add the status bit in the least bit because
107 * header keeps handle which is 4byte-aligned address so we
108 * have room for two bit at least.
110 #define OBJ_ALLOCATED_TAG 1
111 #define OBJ_TAG_BITS 1
112 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
113 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
115 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
116 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
117 #define ZS_MIN_ALLOC_SIZE \
118 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
119 /* each chunk includes extra space to keep handle */
120 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
123 * On systems with 4K page size, this gives 255 size classes! There is a
125 * - Large number of size classes is potentially wasteful as free page are
126 * spread across these classes
127 * - Small number of size classes causes large internal fragmentation
128 * - Probably its better to use specific size classes (empirically
129 * determined). NOTE: all those class sizes must be set as multiple of
130 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
132 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
135 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
138 * We do not maintain any list for completely empty or full pages
140 enum fullness_group
{
154 #ifdef CONFIG_ZSMALLOC_STAT
155 #define NR_ZS_STAT_TYPE (CLASS_ALMOST_EMPTY + 1)
157 #define NR_ZS_STAT_TYPE (OBJ_USED + 1)
160 struct zs_size_stat
{
161 unsigned long objs
[NR_ZS_STAT_TYPE
];
164 #ifdef CONFIG_ZSMALLOC_STAT
165 static struct dentry
*zs_stat_root
;
169 * number of size_classes
171 static int zs_size_classes
;
174 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
176 * n = number of allocated objects
177 * N = total number of objects zspage can store
178 * f = fullness_threshold_frac
180 * Similarly, we assign zspage to:
181 * ZS_ALMOST_FULL when n > N / f
182 * ZS_EMPTY when n == 0
183 * ZS_FULL when n == N
185 * (see: fix_fullness_group())
187 static const int fullness_threshold_frac
= 4;
191 struct list_head fullness_list
[2];
193 * Size of objects stored in this class. Must be multiple
200 struct zs_size_stat stats
;
202 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
203 int pages_per_zspage
;
204 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
209 * Placed within free objects to form a singly linked list.
210 * For every zspage, zspage->freeobj gives head of this list.
212 * This must be power of 2 and less than or equal to ZS_ALIGN
217 * Position of next free chunk (encodes <PFN, obj_idx>)
218 * It's valid for non-allocated object
222 * Handle of allocated object.
224 unsigned long handle
;
231 struct size_class
**size_class
;
232 struct kmem_cache
*handle_cachep
;
233 struct kmem_cache
*zspage_cachep
;
235 atomic_long_t pages_allocated
;
237 struct zs_pool_stats stats
;
239 /* Compact classes */
240 struct shrinker shrinker
;
242 * To signify that register_shrinker() was successful
243 * and unregister_shrinker() will not Oops.
245 bool shrinker_enabled
;
246 #ifdef CONFIG_ZSMALLOC_STAT
247 struct dentry
*stat_dentry
;
252 * A zspage's class index and fullness group
253 * are encoded in its (first)page->mapping
255 #define FULLNESS_BITS 2
260 unsigned int fullness
:FULLNESS_BITS
;
261 unsigned int class:CLASS_BITS
;
265 struct page
*first_page
;
266 struct list_head list
; /* fullness list */
269 struct mapping_area
{
270 #ifdef CONFIG_PGTABLE_MAPPING
271 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
273 char *vm_buf
; /* copy buffer for objects that span pages */
275 char *vm_addr
; /* address of kmap_atomic()'ed pages */
276 enum zs_mapmode vm_mm
; /* mapping mode */
279 static int create_cache(struct zs_pool
*pool
)
281 pool
->handle_cachep
= kmem_cache_create("zs_handle", ZS_HANDLE_SIZE
,
283 if (!pool
->handle_cachep
)
286 pool
->zspage_cachep
= kmem_cache_create("zspage", sizeof(struct zspage
),
288 if (!pool
->zspage_cachep
) {
289 kmem_cache_destroy(pool
->handle_cachep
);
290 pool
->handle_cachep
= NULL
;
297 static void destroy_cache(struct zs_pool
*pool
)
299 kmem_cache_destroy(pool
->handle_cachep
);
300 kmem_cache_destroy(pool
->zspage_cachep
);
303 static unsigned long cache_alloc_handle(struct zs_pool
*pool
, gfp_t gfp
)
305 return (unsigned long)kmem_cache_alloc(pool
->handle_cachep
,
306 gfp
& ~__GFP_HIGHMEM
);
309 static void cache_free_handle(struct zs_pool
*pool
, unsigned long handle
)
311 kmem_cache_free(pool
->handle_cachep
, (void *)handle
);
314 static struct zspage
*cache_alloc_zspage(struct zs_pool
*pool
, gfp_t flags
)
316 return kmem_cache_alloc(pool
->zspage_cachep
, flags
& ~__GFP_HIGHMEM
);
319 static void cache_free_zspage(struct zs_pool
*pool
, struct zspage
*zspage
)
321 kmem_cache_free(pool
->zspage_cachep
, zspage
);
324 static void record_obj(unsigned long handle
, unsigned long obj
)
327 * lsb of @obj represents handle lock while other bits
328 * represent object value the handle is pointing so
329 * updating shouldn't do store tearing.
331 WRITE_ONCE(*(unsigned long *)handle
, obj
);
338 static void *zs_zpool_create(const char *name
, gfp_t gfp
,
339 const struct zpool_ops
*zpool_ops
,
343 * Ignore global gfp flags: zs_malloc() may be invoked from
344 * different contexts and its caller must provide a valid
347 return zs_create_pool(name
);
350 static void zs_zpool_destroy(void *pool
)
352 zs_destroy_pool(pool
);
355 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
356 unsigned long *handle
)
358 *handle
= zs_malloc(pool
, size
, gfp
);
359 return *handle
? 0 : -1;
361 static void zs_zpool_free(void *pool
, unsigned long handle
)
363 zs_free(pool
, handle
);
366 static int zs_zpool_shrink(void *pool
, unsigned int pages
,
367 unsigned int *reclaimed
)
372 static void *zs_zpool_map(void *pool
, unsigned long handle
,
373 enum zpool_mapmode mm
)
375 enum zs_mapmode zs_mm
;
384 case ZPOOL_MM_RW
: /* fallthru */
390 return zs_map_object(pool
, handle
, zs_mm
);
392 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
394 zs_unmap_object(pool
, handle
);
397 static u64
zs_zpool_total_size(void *pool
)
399 return zs_get_total_pages(pool
) << PAGE_SHIFT
;
402 static struct zpool_driver zs_zpool_driver
= {
404 .owner
= THIS_MODULE
,
405 .create
= zs_zpool_create
,
406 .destroy
= zs_zpool_destroy
,
407 .malloc
= zs_zpool_malloc
,
408 .free
= zs_zpool_free
,
409 .shrink
= zs_zpool_shrink
,
411 .unmap
= zs_zpool_unmap
,
412 .total_size
= zs_zpool_total_size
,
415 MODULE_ALIAS("zpool-zsmalloc");
416 #endif /* CONFIG_ZPOOL */
418 static unsigned int get_maxobj_per_zspage(int size
, int pages_per_zspage
)
420 return pages_per_zspage
* PAGE_SIZE
/ size
;
423 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
424 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
426 static int is_first_page(struct page
*page
)
428 return PagePrivate(page
);
431 static inline int get_zspage_inuse(struct zspage
*zspage
)
433 return zspage
->inuse
;
436 static inline void set_zspage_inuse(struct zspage
*zspage
, int val
)
441 static inline void mod_zspage_inuse(struct zspage
*zspage
, int val
)
443 zspage
->inuse
+= val
;
446 static inline int get_first_obj_offset(struct page
*page
)
448 if (is_first_page(page
))
454 static inline void set_first_obj_offset(struct page
*page
, int offset
)
456 if (is_first_page(page
))
459 page
->index
= offset
;
462 static inline unsigned long get_freeobj(struct zspage
*zspage
)
464 return (unsigned long)zspage
->freeobj
;
467 static inline void set_freeobj(struct zspage
*zspage
, unsigned long obj
)
469 zspage
->freeobj
= (void *)obj
;
472 static void get_zspage_mapping(struct zspage
*zspage
,
473 unsigned int *class_idx
,
474 enum fullness_group
*fullness
)
476 *fullness
= zspage
->fullness
;
477 *class_idx
= zspage
->class;
480 static void set_zspage_mapping(struct zspage
*zspage
,
481 unsigned int class_idx
,
482 enum fullness_group fullness
)
484 zspage
->class = class_idx
;
485 zspage
->fullness
= fullness
;
489 * zsmalloc divides the pool into various size classes where each
490 * class maintains a list of zspages where each zspage is divided
491 * into equal sized chunks. Each allocation falls into one of these
492 * classes depending on its size. This function returns index of the
493 * size class which has chunk size big enough to hold the give size.
495 static int get_size_class_index(int size
)
499 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
500 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
501 ZS_SIZE_CLASS_DELTA
);
503 return min(zs_size_classes
- 1, idx
);
506 static inline void zs_stat_inc(struct size_class
*class,
507 enum zs_stat_type type
, unsigned long cnt
)
509 if (type
< NR_ZS_STAT_TYPE
)
510 class->stats
.objs
[type
] += cnt
;
513 static inline void zs_stat_dec(struct size_class
*class,
514 enum zs_stat_type type
, unsigned long cnt
)
516 if (type
< NR_ZS_STAT_TYPE
)
517 class->stats
.objs
[type
] -= cnt
;
520 static inline unsigned long zs_stat_get(struct size_class
*class,
521 enum zs_stat_type type
)
523 if (type
< NR_ZS_STAT_TYPE
)
524 return class->stats
.objs
[type
];
528 #ifdef CONFIG_ZSMALLOC_STAT
530 static void __init
zs_stat_init(void)
532 if (!debugfs_initialized()) {
533 pr_warn("debugfs not available, stat dir not created\n");
537 zs_stat_root
= debugfs_create_dir("zsmalloc", NULL
);
539 pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
542 static void __exit
zs_stat_exit(void)
544 debugfs_remove_recursive(zs_stat_root
);
547 static unsigned long zs_can_compact(struct size_class
*class);
549 static int zs_stats_size_show(struct seq_file
*s
, void *v
)
552 struct zs_pool
*pool
= s
->private;
553 struct size_class
*class;
555 unsigned long class_almost_full
, class_almost_empty
;
556 unsigned long obj_allocated
, obj_used
, pages_used
, freeable
;
557 unsigned long total_class_almost_full
= 0, total_class_almost_empty
= 0;
558 unsigned long total_objs
= 0, total_used_objs
= 0, total_pages
= 0;
559 unsigned long total_freeable
= 0;
561 seq_printf(s
, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
562 "class", "size", "almost_full", "almost_empty",
563 "obj_allocated", "obj_used", "pages_used",
564 "pages_per_zspage", "freeable");
566 for (i
= 0; i
< zs_size_classes
; i
++) {
567 class = pool
->size_class
[i
];
569 if (class->index
!= i
)
572 spin_lock(&class->lock
);
573 class_almost_full
= zs_stat_get(class, CLASS_ALMOST_FULL
);
574 class_almost_empty
= zs_stat_get(class, CLASS_ALMOST_EMPTY
);
575 obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
576 obj_used
= zs_stat_get(class, OBJ_USED
);
577 freeable
= zs_can_compact(class);
578 spin_unlock(&class->lock
);
580 objs_per_zspage
= get_maxobj_per_zspage(class->size
,
581 class->pages_per_zspage
);
582 pages_used
= obj_allocated
/ objs_per_zspage
*
583 class->pages_per_zspage
;
585 seq_printf(s
, " %5u %5u %11lu %12lu %13lu"
586 " %10lu %10lu %16d %8lu\n",
587 i
, class->size
, class_almost_full
, class_almost_empty
,
588 obj_allocated
, obj_used
, pages_used
,
589 class->pages_per_zspage
, freeable
);
591 total_class_almost_full
+= class_almost_full
;
592 total_class_almost_empty
+= class_almost_empty
;
593 total_objs
+= obj_allocated
;
594 total_used_objs
+= obj_used
;
595 total_pages
+= pages_used
;
596 total_freeable
+= freeable
;
600 seq_printf(s
, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
601 "Total", "", total_class_almost_full
,
602 total_class_almost_empty
, total_objs
,
603 total_used_objs
, total_pages
, "", total_freeable
);
608 static int zs_stats_size_open(struct inode
*inode
, struct file
*file
)
610 return single_open(file
, zs_stats_size_show
, inode
->i_private
);
613 static const struct file_operations zs_stat_size_ops
= {
614 .open
= zs_stats_size_open
,
617 .release
= single_release
,
620 static void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
622 struct dentry
*entry
;
625 pr_warn("no root stat dir, not creating <%s> stat dir\n", name
);
629 entry
= debugfs_create_dir(name
, zs_stat_root
);
631 pr_warn("debugfs dir <%s> creation failed\n", name
);
634 pool
->stat_dentry
= entry
;
636 entry
= debugfs_create_file("classes", S_IFREG
| S_IRUGO
,
637 pool
->stat_dentry
, pool
, &zs_stat_size_ops
);
639 pr_warn("%s: debugfs file entry <%s> creation failed\n",
641 debugfs_remove_recursive(pool
->stat_dentry
);
642 pool
->stat_dentry
= NULL
;
646 static void zs_pool_stat_destroy(struct zs_pool
*pool
)
648 debugfs_remove_recursive(pool
->stat_dentry
);
651 #else /* CONFIG_ZSMALLOC_STAT */
652 static void __init
zs_stat_init(void)
656 static void __exit
zs_stat_exit(void)
660 static inline void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
664 static inline void zs_pool_stat_destroy(struct zs_pool
*pool
)
670 * For each size class, zspages are divided into different groups
671 * depending on how "full" they are. This was done so that we could
672 * easily find empty or nearly empty zspages when we try to shrink
673 * the pool (not yet implemented). This function returns fullness
674 * status of the given page.
676 static enum fullness_group
get_fullness_group(struct size_class
*class,
677 struct zspage
*zspage
)
679 int inuse
, objs_per_zspage
;
680 enum fullness_group fg
;
682 inuse
= get_zspage_inuse(zspage
);
683 objs_per_zspage
= class->objs_per_zspage
;
687 else if (inuse
== objs_per_zspage
)
689 else if (inuse
<= 3 * objs_per_zspage
/ fullness_threshold_frac
)
690 fg
= ZS_ALMOST_EMPTY
;
698 * Each size class maintains various freelists and zspages are assigned
699 * to one of these freelists based on the number of live objects they
700 * have. This functions inserts the given zspage into the freelist
701 * identified by <class, fullness_group>.
703 static void insert_zspage(struct size_class
*class,
704 struct zspage
*zspage
,
705 enum fullness_group fullness
)
709 if (fullness
>= ZS_EMPTY
)
712 head
= list_first_entry_or_null(&class->fullness_list
[fullness
],
713 struct zspage
, list
);
715 zs_stat_inc(class, fullness
== ZS_ALMOST_EMPTY
?
716 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
719 * We want to see more ZS_FULL pages and less almost empty/full.
720 * Put pages with higher ->inuse first.
723 if (get_zspage_inuse(zspage
) < get_zspage_inuse(head
)) {
724 list_add(&zspage
->list
, &head
->list
);
728 list_add(&zspage
->list
, &class->fullness_list
[fullness
]);
732 * This function removes the given zspage from the freelist identified
733 * by <class, fullness_group>.
735 static void remove_zspage(struct size_class
*class,
736 struct zspage
*zspage
,
737 enum fullness_group fullness
)
739 if (fullness
>= ZS_EMPTY
)
742 VM_BUG_ON(list_empty(&class->fullness_list
[fullness
]));
744 list_del_init(&zspage
->list
);
745 zs_stat_dec(class, fullness
== ZS_ALMOST_EMPTY
?
746 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
750 * Each size class maintains zspages in different fullness groups depending
751 * on the number of live objects they contain. When allocating or freeing
752 * objects, the fullness status of the page can change, say, from ALMOST_FULL
753 * to ALMOST_EMPTY when freeing an object. This function checks if such
754 * a status change has occurred for the given page and accordingly moves the
755 * page from the freelist of the old fullness group to that of the new
758 static enum fullness_group
fix_fullness_group(struct size_class
*class,
759 struct zspage
*zspage
)
762 enum fullness_group currfg
, newfg
;
764 get_zspage_mapping(zspage
, &class_idx
, &currfg
);
765 newfg
= get_fullness_group(class, zspage
);
769 remove_zspage(class, zspage
, currfg
);
770 insert_zspage(class, zspage
, newfg
);
771 set_zspage_mapping(zspage
, class_idx
, newfg
);
778 * We have to decide on how many pages to link together
779 * to form a zspage for each size class. This is important
780 * to reduce wastage due to unusable space left at end of
781 * each zspage which is given as:
782 * wastage = Zp % class_size
783 * usage = Zp - wastage
784 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
786 * For example, for size class of 3/8 * PAGE_SIZE, we should
787 * link together 3 PAGE_SIZE sized pages to form a zspage
788 * since then we can perfectly fit in 8 such objects.
790 static int get_pages_per_zspage(int class_size
)
792 int i
, max_usedpc
= 0;
793 /* zspage order which gives maximum used size per KB */
794 int max_usedpc_order
= 1;
796 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
800 zspage_size
= i
* PAGE_SIZE
;
801 waste
= zspage_size
% class_size
;
802 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
804 if (usedpc
> max_usedpc
) {
806 max_usedpc_order
= i
;
810 return max_usedpc_order
;
814 static struct zspage
*get_zspage(struct page
*page
)
816 return (struct zspage
*)page
->private;
819 static struct page
*get_next_page(struct page
*page
)
825 * Encode <page, obj_idx> as a single handle value.
826 * We use the least bit of handle for tagging.
828 static void *location_to_obj(struct page
*page
, unsigned long obj_idx
)
837 obj
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
838 obj
|= ((obj_idx
) & OBJ_INDEX_MASK
);
839 obj
<<= OBJ_TAG_BITS
;
845 * Decode <page, obj_idx> pair from the given object handle. We adjust the
846 * decoded obj_idx back to its original value since it was adjusted in
849 static void obj_to_location(unsigned long obj
, struct page
**page
,
850 unsigned long *obj_idx
)
852 obj
>>= OBJ_TAG_BITS
;
853 *page
= pfn_to_page(obj
>> OBJ_INDEX_BITS
);
854 *obj_idx
= (obj
& OBJ_INDEX_MASK
);
857 static unsigned long handle_to_obj(unsigned long handle
)
859 return *(unsigned long *)handle
;
862 static unsigned long obj_to_head(struct size_class
*class, struct page
*page
,
866 VM_BUG_ON_PAGE(!is_first_page(page
), page
);
869 return *(unsigned long *)obj
;
872 static unsigned long obj_idx_to_offset(struct page
*page
,
873 unsigned long obj_idx
, int class_size
)
877 off
= get_first_obj_offset(page
);
879 return off
+ obj_idx
* class_size
;
882 static inline int trypin_tag(unsigned long handle
)
884 return bit_spin_trylock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
887 static void pin_tag(unsigned long handle
)
889 bit_spin_lock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
892 static void unpin_tag(unsigned long handle
)
894 bit_spin_unlock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
897 static void reset_page(struct page
*page
)
899 clear_bit(PG_private
, &page
->flags
);
900 clear_bit(PG_private_2
, &page
->flags
);
901 set_page_private(page
, 0);
905 static void free_zspage(struct zs_pool
*pool
, struct zspage
*zspage
)
907 struct page
*page
, *next
;
909 VM_BUG_ON(get_zspage_inuse(zspage
));
911 next
= page
= zspage
->first_page
;
917 } while (page
!= NULL
);
919 cache_free_zspage(pool
, zspage
);
922 /* Initialize a newly allocated zspage */
923 static void init_zspage(struct size_class
*class, struct zspage
*zspage
)
925 unsigned long off
= 0;
926 struct page
*page
= zspage
->first_page
;
929 struct page
*next_page
;
930 struct link_free
*link
;
934 set_first_obj_offset(page
, off
);
936 vaddr
= kmap_atomic(page
);
937 link
= (struct link_free
*)vaddr
+ off
/ sizeof(*link
);
939 while ((off
+= class->size
) < PAGE_SIZE
) {
940 link
->next
= location_to_obj(page
, i
++);
941 link
+= class->size
/ sizeof(*link
);
945 * We now come to the last (full or partial) object on this
946 * page, which must point to the first object on the next
949 next_page
= get_next_page(page
);
950 link
->next
= location_to_obj(next_page
, 0);
951 kunmap_atomic(vaddr
);
957 (unsigned long)location_to_obj(zspage
->first_page
, 0));
960 static void create_page_chain(struct zspage
*zspage
, struct page
*pages
[],
965 struct page
*prev_page
= NULL
;
968 * Allocate individual pages and link them together as:
969 * 1. all pages are linked together using page->next
970 * 2. each sub-page point to zspage using page->private
972 * we set PG_private to identify the first page (i.e. no other sub-page
973 * has this flag set) and PG_private_2 to identify the last page.
975 for (i
= 0; i
< nr_pages
; i
++) {
977 set_page_private(page
, (unsigned long)zspage
);
979 zspage
->first_page
= page
;
980 SetPagePrivate(page
);
982 prev_page
->next
= page
;
984 if (i
== nr_pages
- 1) {
985 SetPagePrivate2(page
);
993 * Allocate a zspage for the given size class
995 static struct zspage
*alloc_zspage(struct zs_pool
*pool
,
996 struct size_class
*class,
1000 struct page
*pages
[ZS_MAX_PAGES_PER_ZSPAGE
];
1001 struct zspage
*zspage
= cache_alloc_zspage(pool
, gfp
);
1006 memset(zspage
, 0, sizeof(struct zspage
));
1008 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
1011 page
= alloc_page(gfp
);
1014 __free_page(pages
[i
]);
1015 cache_free_zspage(pool
, zspage
);
1021 create_page_chain(zspage
, pages
, class->pages_per_zspage
);
1022 init_zspage(class, zspage
);
1027 static struct zspage
*find_get_zspage(struct size_class
*class)
1030 struct zspage
*zspage
;
1032 for (i
= ZS_ALMOST_FULL
; i
<= ZS_ALMOST_EMPTY
; i
++) {
1033 zspage
= list_first_entry_or_null(&class->fullness_list
[i
],
1034 struct zspage
, list
);
1042 #ifdef CONFIG_PGTABLE_MAPPING
1043 static inline int __zs_cpu_up(struct mapping_area
*area
)
1046 * Make sure we don't leak memory if a cpu UP notification
1047 * and zs_init() race and both call zs_cpu_up() on the same cpu
1051 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
1057 static inline void __zs_cpu_down(struct mapping_area
*area
)
1060 free_vm_area(area
->vm
);
1064 static inline void *__zs_map_object(struct mapping_area
*area
,
1065 struct page
*pages
[2], int off
, int size
)
1067 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, pages
));
1068 area
->vm_addr
= area
->vm
->addr
;
1069 return area
->vm_addr
+ off
;
1072 static inline void __zs_unmap_object(struct mapping_area
*area
,
1073 struct page
*pages
[2], int off
, int size
)
1075 unsigned long addr
= (unsigned long)area
->vm_addr
;
1077 unmap_kernel_range(addr
, PAGE_SIZE
* 2);
1080 #else /* CONFIG_PGTABLE_MAPPING */
1082 static inline int __zs_cpu_up(struct mapping_area
*area
)
1085 * Make sure we don't leak memory if a cpu UP notification
1086 * and zs_init() race and both call zs_cpu_up() on the same cpu
1090 area
->vm_buf
= kmalloc(ZS_MAX_ALLOC_SIZE
, GFP_KERNEL
);
1096 static inline void __zs_cpu_down(struct mapping_area
*area
)
1098 kfree(area
->vm_buf
);
1099 area
->vm_buf
= NULL
;
1102 static void *__zs_map_object(struct mapping_area
*area
,
1103 struct page
*pages
[2], int off
, int size
)
1107 char *buf
= area
->vm_buf
;
1109 /* disable page faults to match kmap_atomic() return conditions */
1110 pagefault_disable();
1112 /* no read fastpath */
1113 if (area
->vm_mm
== ZS_MM_WO
)
1116 sizes
[0] = PAGE_SIZE
- off
;
1117 sizes
[1] = size
- sizes
[0];
1119 /* copy object to per-cpu buffer */
1120 addr
= kmap_atomic(pages
[0]);
1121 memcpy(buf
, addr
+ off
, sizes
[0]);
1122 kunmap_atomic(addr
);
1123 addr
= kmap_atomic(pages
[1]);
1124 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
1125 kunmap_atomic(addr
);
1127 return area
->vm_buf
;
1130 static void __zs_unmap_object(struct mapping_area
*area
,
1131 struct page
*pages
[2], int off
, int size
)
1137 /* no write fastpath */
1138 if (area
->vm_mm
== ZS_MM_RO
)
1142 buf
= buf
+ ZS_HANDLE_SIZE
;
1143 size
-= ZS_HANDLE_SIZE
;
1144 off
+= ZS_HANDLE_SIZE
;
1146 sizes
[0] = PAGE_SIZE
- off
;
1147 sizes
[1] = size
- sizes
[0];
1149 /* copy per-cpu buffer to object */
1150 addr
= kmap_atomic(pages
[0]);
1151 memcpy(addr
+ off
, buf
, sizes
[0]);
1152 kunmap_atomic(addr
);
1153 addr
= kmap_atomic(pages
[1]);
1154 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
1155 kunmap_atomic(addr
);
1158 /* enable page faults to match kunmap_atomic() return conditions */
1162 #endif /* CONFIG_PGTABLE_MAPPING */
1164 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
1167 int ret
, cpu
= (long)pcpu
;
1168 struct mapping_area
*area
;
1171 case CPU_UP_PREPARE
:
1172 area
= &per_cpu(zs_map_area
, cpu
);
1173 ret
= __zs_cpu_up(area
);
1175 return notifier_from_errno(ret
);
1178 case CPU_UP_CANCELED
:
1179 area
= &per_cpu(zs_map_area
, cpu
);
1180 __zs_cpu_down(area
);
1187 static struct notifier_block zs_cpu_nb
= {
1188 .notifier_call
= zs_cpu_notifier
1191 static int zs_register_cpu_notifier(void)
1193 int cpu
, uninitialized_var(ret
);
1195 cpu_notifier_register_begin();
1197 __register_cpu_notifier(&zs_cpu_nb
);
1198 for_each_online_cpu(cpu
) {
1199 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
1200 if (notifier_to_errno(ret
))
1204 cpu_notifier_register_done();
1205 return notifier_to_errno(ret
);
1208 static void zs_unregister_cpu_notifier(void)
1212 cpu_notifier_register_begin();
1214 for_each_online_cpu(cpu
)
1215 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
1216 __unregister_cpu_notifier(&zs_cpu_nb
);
1218 cpu_notifier_register_done();
1221 static void init_zs_size_classes(void)
1225 nr
= (ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) / ZS_SIZE_CLASS_DELTA
+ 1;
1226 if ((ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) % ZS_SIZE_CLASS_DELTA
)
1229 zs_size_classes
= nr
;
1232 static bool can_merge(struct size_class
*prev
, int size
, int pages_per_zspage
)
1234 if (prev
->pages_per_zspage
!= pages_per_zspage
)
1237 if (get_maxobj_per_zspage(prev
->size
, prev
->pages_per_zspage
)
1238 != get_maxobj_per_zspage(size
, pages_per_zspage
))
1244 static bool zspage_full(struct size_class
*class, struct zspage
*zspage
)
1246 return get_zspage_inuse(zspage
) == class->objs_per_zspage
;
1249 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1251 return atomic_long_read(&pool
->pages_allocated
);
1253 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1256 * zs_map_object - get address of allocated object from handle.
1257 * @pool: pool from which the object was allocated
1258 * @handle: handle returned from zs_malloc
1260 * Before using an object allocated from zs_malloc, it must be mapped using
1261 * this function. When done with the object, it must be unmapped using
1264 * Only one object can be mapped per cpu at a time. There is no protection
1265 * against nested mappings.
1267 * This function returns with preemption and page faults disabled.
1269 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1272 struct zspage
*zspage
;
1274 unsigned long obj
, obj_idx
, off
;
1276 unsigned int class_idx
;
1277 enum fullness_group fg
;
1278 struct size_class
*class;
1279 struct mapping_area
*area
;
1280 struct page
*pages
[2];
1284 * Because we use per-cpu mapping areas shared among the
1285 * pools/users, we can't allow mapping in interrupt context
1286 * because it can corrupt another users mappings.
1288 WARN_ON_ONCE(in_interrupt());
1290 /* From now on, migration cannot move the object */
1293 obj
= handle_to_obj(handle
);
1294 obj_to_location(obj
, &page
, &obj_idx
);
1295 zspage
= get_zspage(page
);
1296 get_zspage_mapping(zspage
, &class_idx
, &fg
);
1297 class = pool
->size_class
[class_idx
];
1298 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1300 area
= &get_cpu_var(zs_map_area
);
1302 if (off
+ class->size
<= PAGE_SIZE
) {
1303 /* this object is contained entirely within a page */
1304 area
->vm_addr
= kmap_atomic(page
);
1305 ret
= area
->vm_addr
+ off
;
1309 /* this object spans two pages */
1311 pages
[1] = get_next_page(page
);
1314 ret
= __zs_map_object(area
, pages
, off
, class->size
);
1317 ret
+= ZS_HANDLE_SIZE
;
1321 EXPORT_SYMBOL_GPL(zs_map_object
);
1323 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1325 struct zspage
*zspage
;
1327 unsigned long obj
, obj_idx
, off
;
1329 unsigned int class_idx
;
1330 enum fullness_group fg
;
1331 struct size_class
*class;
1332 struct mapping_area
*area
;
1334 obj
= handle_to_obj(handle
);
1335 obj_to_location(obj
, &page
, &obj_idx
);
1336 zspage
= get_zspage(page
);
1337 get_zspage_mapping(zspage
, &class_idx
, &fg
);
1338 class = pool
->size_class
[class_idx
];
1339 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1341 area
= this_cpu_ptr(&zs_map_area
);
1342 if (off
+ class->size
<= PAGE_SIZE
)
1343 kunmap_atomic(area
->vm_addr
);
1345 struct page
*pages
[2];
1348 pages
[1] = get_next_page(page
);
1351 __zs_unmap_object(area
, pages
, off
, class->size
);
1353 put_cpu_var(zs_map_area
);
1356 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1358 static unsigned long obj_malloc(struct size_class
*class,
1359 struct zspage
*zspage
, unsigned long handle
)
1362 struct link_free
*link
;
1364 struct page
*m_page
;
1365 unsigned long m_objidx
, m_offset
;
1368 handle
|= OBJ_ALLOCATED_TAG
;
1369 obj
= get_freeobj(zspage
);
1370 obj_to_location(obj
, &m_page
, &m_objidx
);
1371 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
1373 vaddr
= kmap_atomic(m_page
);
1374 link
= (struct link_free
*)vaddr
+ m_offset
/ sizeof(*link
);
1375 set_freeobj(zspage
, (unsigned long)link
->next
);
1377 /* record handle in the header of allocated chunk */
1378 link
->handle
= handle
;
1380 /* record handle to page->index */
1381 zspage
->first_page
->index
= handle
;
1383 kunmap_atomic(vaddr
);
1384 mod_zspage_inuse(zspage
, 1);
1385 zs_stat_inc(class, OBJ_USED
, 1);
1392 * zs_malloc - Allocate block of given size from pool.
1393 * @pool: pool to allocate from
1394 * @size: size of block to allocate
1396 * On success, handle to the allocated object is returned,
1398 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1400 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
, gfp_t gfp
)
1402 unsigned long handle
, obj
;
1403 struct size_class
*class;
1404 struct zspage
*zspage
;
1406 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
1409 handle
= cache_alloc_handle(pool
, gfp
);
1413 /* extra space in chunk to keep the handle */
1414 size
+= ZS_HANDLE_SIZE
;
1415 class = pool
->size_class
[get_size_class_index(size
)];
1417 spin_lock(&class->lock
);
1418 zspage
= find_get_zspage(class);
1421 spin_unlock(&class->lock
);
1422 zspage
= alloc_zspage(pool
, class, gfp
);
1423 if (unlikely(!zspage
)) {
1424 cache_free_handle(pool
, handle
);
1428 set_zspage_mapping(zspage
, class->index
, ZS_EMPTY
);
1429 atomic_long_add(class->pages_per_zspage
,
1430 &pool
->pages_allocated
);
1432 spin_lock(&class->lock
);
1433 zs_stat_inc(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1434 class->size
, class->pages_per_zspage
));
1437 obj
= obj_malloc(class, zspage
, handle
);
1438 /* Now move the zspage to another fullness group, if required */
1439 fix_fullness_group(class, zspage
);
1440 record_obj(handle
, obj
);
1441 spin_unlock(&class->lock
);
1445 EXPORT_SYMBOL_GPL(zs_malloc
);
1447 static void obj_free(struct size_class
*class, unsigned long obj
)
1449 struct link_free
*link
;
1450 struct zspage
*zspage
;
1451 struct page
*f_page
;
1452 unsigned long f_objidx
, f_offset
;
1455 obj
&= ~OBJ_ALLOCATED_TAG
;
1456 obj_to_location(obj
, &f_page
, &f_objidx
);
1457 zspage
= get_zspage(f_page
);
1459 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
1461 vaddr
= kmap_atomic(f_page
);
1463 /* Insert this object in containing zspage's freelist */
1464 link
= (struct link_free
*)(vaddr
+ f_offset
);
1465 link
->next
= (void *)get_freeobj(zspage
);
1466 kunmap_atomic(vaddr
);
1467 set_freeobj(zspage
, obj
);
1468 mod_zspage_inuse(zspage
, -1);
1469 zs_stat_dec(class, OBJ_USED
, 1);
1472 void zs_free(struct zs_pool
*pool
, unsigned long handle
)
1474 struct zspage
*zspage
;
1475 struct page
*f_page
;
1476 unsigned long obj
, f_objidx
;
1478 struct size_class
*class;
1479 enum fullness_group fullness
;
1481 if (unlikely(!handle
))
1485 obj
= handle_to_obj(handle
);
1486 obj_to_location(obj
, &f_page
, &f_objidx
);
1487 zspage
= get_zspage(f_page
);
1489 get_zspage_mapping(zspage
, &class_idx
, &fullness
);
1490 class = pool
->size_class
[class_idx
];
1492 spin_lock(&class->lock
);
1493 obj_free(class, obj
);
1494 fullness
= fix_fullness_group(class, zspage
);
1495 if (fullness
== ZS_EMPTY
) {
1496 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1497 class->size
, class->pages_per_zspage
));
1498 atomic_long_sub(class->pages_per_zspage
,
1499 &pool
->pages_allocated
);
1500 free_zspage(pool
, zspage
);
1502 spin_unlock(&class->lock
);
1505 cache_free_handle(pool
, handle
);
1507 EXPORT_SYMBOL_GPL(zs_free
);
1509 static void zs_object_copy(struct size_class
*class, unsigned long dst
,
1512 struct page
*s_page
, *d_page
;
1513 unsigned long s_objidx
, d_objidx
;
1514 unsigned long s_off
, d_off
;
1515 void *s_addr
, *d_addr
;
1516 int s_size
, d_size
, size
;
1519 s_size
= d_size
= class->size
;
1521 obj_to_location(src
, &s_page
, &s_objidx
);
1522 obj_to_location(dst
, &d_page
, &d_objidx
);
1524 s_off
= obj_idx_to_offset(s_page
, s_objidx
, class->size
);
1525 d_off
= obj_idx_to_offset(d_page
, d_objidx
, class->size
);
1527 if (s_off
+ class->size
> PAGE_SIZE
)
1528 s_size
= PAGE_SIZE
- s_off
;
1530 if (d_off
+ class->size
> PAGE_SIZE
)
1531 d_size
= PAGE_SIZE
- d_off
;
1533 s_addr
= kmap_atomic(s_page
);
1534 d_addr
= kmap_atomic(d_page
);
1537 size
= min(s_size
, d_size
);
1538 memcpy(d_addr
+ d_off
, s_addr
+ s_off
, size
);
1541 if (written
== class->size
)
1549 if (s_off
>= PAGE_SIZE
) {
1550 kunmap_atomic(d_addr
);
1551 kunmap_atomic(s_addr
);
1552 s_page
= get_next_page(s_page
);
1553 s_addr
= kmap_atomic(s_page
);
1554 d_addr
= kmap_atomic(d_page
);
1555 s_size
= class->size
- written
;
1559 if (d_off
>= PAGE_SIZE
) {
1560 kunmap_atomic(d_addr
);
1561 d_page
= get_next_page(d_page
);
1562 d_addr
= kmap_atomic(d_page
);
1563 d_size
= class->size
- written
;
1568 kunmap_atomic(d_addr
);
1569 kunmap_atomic(s_addr
);
1573 * Find alloced object in zspage from index object and
1576 static unsigned long find_alloced_obj(struct size_class
*class,
1577 struct page
*page
, int index
)
1581 unsigned long handle
= 0;
1582 void *addr
= kmap_atomic(page
);
1584 offset
= get_first_obj_offset(page
);
1585 offset
+= class->size
* index
;
1587 while (offset
< PAGE_SIZE
) {
1588 head
= obj_to_head(class, page
, addr
+ offset
);
1589 if (head
& OBJ_ALLOCATED_TAG
) {
1590 handle
= head
& ~OBJ_ALLOCATED_TAG
;
1591 if (trypin_tag(handle
))
1596 offset
+= class->size
;
1600 kunmap_atomic(addr
);
1604 struct zs_compact_control
{
1605 /* Source spage for migration which could be a subpage of zspage */
1606 struct page
*s_page
;
1607 /* Destination page for migration which should be a first page
1609 struct page
*d_page
;
1610 /* Starting object index within @s_page which used for live object
1611 * in the subpage. */
1615 static int migrate_zspage(struct zs_pool
*pool
, struct size_class
*class,
1616 struct zs_compact_control
*cc
)
1618 unsigned long used_obj
, free_obj
;
1619 unsigned long handle
;
1620 struct page
*s_page
= cc
->s_page
;
1621 struct page
*d_page
= cc
->d_page
;
1622 unsigned long index
= cc
->index
;
1626 handle
= find_alloced_obj(class, s_page
, index
);
1628 s_page
= get_next_page(s_page
);
1635 /* Stop if there is no more space */
1636 if (zspage_full(class, get_zspage(d_page
))) {
1642 used_obj
= handle_to_obj(handle
);
1643 free_obj
= obj_malloc(class, get_zspage(d_page
), handle
);
1644 zs_object_copy(class, free_obj
, used_obj
);
1647 * record_obj updates handle's value to free_obj and it will
1648 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
1649 * breaks synchronization using pin_tag(e,g, zs_free) so
1650 * let's keep the lock bit.
1652 free_obj
|= BIT(HANDLE_PIN_BIT
);
1653 record_obj(handle
, free_obj
);
1655 obj_free(class, used_obj
);
1658 /* Remember last position in this iteration */
1659 cc
->s_page
= s_page
;
1665 static struct zspage
*isolate_zspage(struct size_class
*class, bool source
)
1668 struct zspage
*zspage
;
1669 enum fullness_group fg
[2] = {ZS_ALMOST_EMPTY
, ZS_ALMOST_FULL
};
1672 fg
[0] = ZS_ALMOST_FULL
;
1673 fg
[1] = ZS_ALMOST_EMPTY
;
1676 for (i
= 0; i
< 2; i
++) {
1677 zspage
= list_first_entry_or_null(&class->fullness_list
[fg
[i
]],
1678 struct zspage
, list
);
1680 remove_zspage(class, zspage
, fg
[i
]);
1689 * putback_zspage - add @zspage into right class's fullness list
1690 * @class: destination class
1691 * @zspage: target page
1693 * Return @zspage's fullness_group
1695 static enum fullness_group
putback_zspage(struct size_class
*class,
1696 struct zspage
*zspage
)
1698 enum fullness_group fullness
;
1700 fullness
= get_fullness_group(class, zspage
);
1701 insert_zspage(class, zspage
, fullness
);
1702 set_zspage_mapping(zspage
, class->index
, fullness
);
1709 * Based on the number of unused allocated objects calculate
1710 * and return the number of pages that we can free.
1712 static unsigned long zs_can_compact(struct size_class
*class)
1714 unsigned long obj_wasted
;
1715 unsigned long obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
1716 unsigned long obj_used
= zs_stat_get(class, OBJ_USED
);
1718 if (obj_allocated
<= obj_used
)
1721 obj_wasted
= obj_allocated
- obj_used
;
1722 obj_wasted
/= get_maxobj_per_zspage(class->size
,
1723 class->pages_per_zspage
);
1725 return obj_wasted
* class->pages_per_zspage
;
1728 static void __zs_compact(struct zs_pool
*pool
, struct size_class
*class)
1730 struct zs_compact_control cc
;
1731 struct zspage
*src_zspage
;
1732 struct zspage
*dst_zspage
= NULL
;
1734 spin_lock(&class->lock
);
1735 while ((src_zspage
= isolate_zspage(class, true))) {
1737 if (!zs_can_compact(class))
1741 cc
.s_page
= src_zspage
->first_page
;
1743 while ((dst_zspage
= isolate_zspage(class, false))) {
1744 cc
.d_page
= dst_zspage
->first_page
;
1746 * If there is no more space in dst_page, resched
1747 * and see if anyone had allocated another zspage.
1749 if (!migrate_zspage(pool
, class, &cc
))
1752 putback_zspage(class, dst_zspage
);
1755 /* Stop if we couldn't find slot */
1756 if (dst_zspage
== NULL
)
1759 putback_zspage(class, dst_zspage
);
1760 if (putback_zspage(class, src_zspage
) == ZS_EMPTY
) {
1761 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1762 class->size
, class->pages_per_zspage
));
1763 atomic_long_sub(class->pages_per_zspage
,
1764 &pool
->pages_allocated
);
1765 free_zspage(pool
, src_zspage
);
1766 pool
->stats
.pages_compacted
+= class->pages_per_zspage
;
1768 spin_unlock(&class->lock
);
1770 spin_lock(&class->lock
);
1774 putback_zspage(class, src_zspage
);
1776 spin_unlock(&class->lock
);
1779 unsigned long zs_compact(struct zs_pool
*pool
)
1782 struct size_class
*class;
1784 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1785 class = pool
->size_class
[i
];
1788 if (class->index
!= i
)
1790 __zs_compact(pool
, class);
1793 return pool
->stats
.pages_compacted
;
1795 EXPORT_SYMBOL_GPL(zs_compact
);
1797 void zs_pool_stats(struct zs_pool
*pool
, struct zs_pool_stats
*stats
)
1799 memcpy(stats
, &pool
->stats
, sizeof(struct zs_pool_stats
));
1801 EXPORT_SYMBOL_GPL(zs_pool_stats
);
1803 static unsigned long zs_shrinker_scan(struct shrinker
*shrinker
,
1804 struct shrink_control
*sc
)
1806 unsigned long pages_freed
;
1807 struct zs_pool
*pool
= container_of(shrinker
, struct zs_pool
,
1810 pages_freed
= pool
->stats
.pages_compacted
;
1812 * Compact classes and calculate compaction delta.
1813 * Can run concurrently with a manually triggered
1814 * (by user) compaction.
1816 pages_freed
= zs_compact(pool
) - pages_freed
;
1818 return pages_freed
? pages_freed
: SHRINK_STOP
;
1821 static unsigned long zs_shrinker_count(struct shrinker
*shrinker
,
1822 struct shrink_control
*sc
)
1825 struct size_class
*class;
1826 unsigned long pages_to_free
= 0;
1827 struct zs_pool
*pool
= container_of(shrinker
, struct zs_pool
,
1830 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1831 class = pool
->size_class
[i
];
1834 if (class->index
!= i
)
1837 pages_to_free
+= zs_can_compact(class);
1840 return pages_to_free
;
1843 static void zs_unregister_shrinker(struct zs_pool
*pool
)
1845 if (pool
->shrinker_enabled
) {
1846 unregister_shrinker(&pool
->shrinker
);
1847 pool
->shrinker_enabled
= false;
1851 static int zs_register_shrinker(struct zs_pool
*pool
)
1853 pool
->shrinker
.scan_objects
= zs_shrinker_scan
;
1854 pool
->shrinker
.count_objects
= zs_shrinker_count
;
1855 pool
->shrinker
.batch
= 0;
1856 pool
->shrinker
.seeks
= DEFAULT_SEEKS
;
1858 return register_shrinker(&pool
->shrinker
);
1862 * zs_create_pool - Creates an allocation pool to work from.
1863 * @flags: allocation flags used to allocate pool metadata
1865 * This function must be called before anything when using
1866 * the zsmalloc allocator.
1868 * On success, a pointer to the newly created pool is returned,
1871 struct zs_pool
*zs_create_pool(const char *name
)
1874 struct zs_pool
*pool
;
1875 struct size_class
*prev_class
= NULL
;
1877 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
1881 pool
->size_class
= kcalloc(zs_size_classes
, sizeof(struct size_class
*),
1883 if (!pool
->size_class
) {
1888 pool
->name
= kstrdup(name
, GFP_KERNEL
);
1892 if (create_cache(pool
))
1896 * Iterate reversly, because, size of size_class that we want to use
1897 * for merging should be larger or equal to current size.
1899 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1901 int pages_per_zspage
;
1902 struct size_class
*class;
1905 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
1906 if (size
> ZS_MAX_ALLOC_SIZE
)
1907 size
= ZS_MAX_ALLOC_SIZE
;
1908 pages_per_zspage
= get_pages_per_zspage(size
);
1911 * size_class is used for normal zsmalloc operation such
1912 * as alloc/free for that size. Although it is natural that we
1913 * have one size_class for each size, there is a chance that we
1914 * can get more memory utilization if we use one size_class for
1915 * many different sizes whose size_class have same
1916 * characteristics. So, we makes size_class point to
1917 * previous size_class if possible.
1920 if (can_merge(prev_class
, size
, pages_per_zspage
)) {
1921 pool
->size_class
[i
] = prev_class
;
1926 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
1932 class->pages_per_zspage
= pages_per_zspage
;
1933 class->objs_per_zspage
= class->pages_per_zspage
*
1934 PAGE_SIZE
/ class->size
;
1935 if (pages_per_zspage
== 1 && class->objs_per_zspage
== 1)
1937 spin_lock_init(&class->lock
);
1938 pool
->size_class
[i
] = class;
1939 for (fullness
= ZS_ALMOST_FULL
; fullness
<= ZS_ALMOST_EMPTY
;
1941 INIT_LIST_HEAD(&class->fullness_list
[fullness
]);
1946 /* debug only, don't abort if it fails */
1947 zs_pool_stat_create(pool
, name
);
1950 * Not critical, we still can use the pool
1951 * and user can trigger compaction manually.
1953 if (zs_register_shrinker(pool
) == 0)
1954 pool
->shrinker_enabled
= true;
1958 zs_destroy_pool(pool
);
1961 EXPORT_SYMBOL_GPL(zs_create_pool
);
1963 void zs_destroy_pool(struct zs_pool
*pool
)
1967 zs_unregister_shrinker(pool
);
1968 zs_pool_stat_destroy(pool
);
1970 for (i
= 0; i
< zs_size_classes
; i
++) {
1972 struct size_class
*class = pool
->size_class
[i
];
1977 if (class->index
!= i
)
1980 for (fg
= ZS_ALMOST_FULL
; fg
<= ZS_ALMOST_EMPTY
; fg
++) {
1981 if (!list_empty(&class->fullness_list
[fg
])) {
1982 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
1989 destroy_cache(pool
);
1990 kfree(pool
->size_class
);
1994 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
1996 static int __init
zs_init(void)
1998 int ret
= zs_register_cpu_notifier();
2003 init_zs_size_classes();
2006 zpool_register_driver(&zs_zpool_driver
);
2014 zs_unregister_cpu_notifier();
2019 static void __exit
zs_exit(void)
2022 zpool_unregister_driver(&zs_zpool_driver
);
2024 zs_unregister_cpu_notifier();
2029 module_init(zs_init
);
2030 module_exit(zs_exit
);
2032 MODULE_LICENSE("Dual BSD/GPL");
2033 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");