2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 * Following is how we use various fields and flags of underlying
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to the first component (0-order) page
20 * page->index (union with page->freelist): offset of the first object
21 * starting in this page. For the first page, this is
22 * always 0, so we use this field (aka freelist) to point
23 * to the first free object in zspage.
24 * page->lru: links together all component pages (except the first page)
27 * For _first_ page only:
29 * page->private: refers to the component page after the first page
30 * If the page is first_page for huge object, it stores handle.
31 * Look at size_class->huge.
32 * page->freelist: points to the first free object in zspage.
33 * Free objects are linked together using in-place
35 * page->lru: links together first pages of various zspages.
36 * Basically forming list of zspages in a fullness group.
37 * page->mapping: class index and fullness group of the zspage
38 * page->inuse: the number of objects that are used in this zspage
40 * Usage of struct page flags:
41 * PG_private: identifies the first component page
42 * PG_private2: identifies the last component page
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 #include <linux/module.h>
49 #include <linux/kernel.h>
50 #include <linux/sched.h>
51 #include <linux/bitops.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/string.h>
55 #include <linux/slab.h>
56 #include <asm/tlbflush.h>
57 #include <asm/pgtable.h>
58 #include <linux/cpumask.h>
59 #include <linux/cpu.h>
60 #include <linux/vmalloc.h>
61 #include <linux/preempt.h>
62 #include <linux/spinlock.h>
63 #include <linux/types.h>
64 #include <linux/debugfs.h>
65 #include <linux/zsmalloc.h>
66 #include <linux/zpool.h>
69 * This must be power of 2 and greater than of equal to sizeof(link_free).
70 * These two conditions ensure that any 'struct link_free' itself doesn't
71 * span more than 1 page which avoids complex case of mapping 2 pages simply
72 * to restore link_free pointer values.
77 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
78 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
80 #define ZS_MAX_ZSPAGE_ORDER 2
81 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
83 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
86 * Object location (<PFN>, <obj_idx>) is encoded as
87 * as single (unsigned long) handle value.
89 * Note that object index <obj_idx> is relative to system
90 * page <PFN> it is stored in, so for each sub-page belonging
91 * to a zspage, obj_idx starts with 0.
93 * This is made more complicated by various memory models and PAE.
96 #ifndef MAX_PHYSMEM_BITS
97 #ifdef CONFIG_HIGHMEM64G
98 #define MAX_PHYSMEM_BITS 36
99 #else /* !CONFIG_HIGHMEM64G */
101 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
104 #define MAX_PHYSMEM_BITS BITS_PER_LONG
107 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
110 * Memory for allocating for handle keeps object position by
111 * encoding <page, obj_idx> and the encoded value has a room
112 * in least bit(ie, look at obj_to_location).
113 * We use the bit to synchronize between object access by
114 * user and migration.
116 #define HANDLE_PIN_BIT 0
119 * Head in allocated object should have OBJ_ALLOCATED_TAG
120 * to identify the object was allocated or not.
121 * It's okay to add the status bit in the least bit because
122 * header keeps handle which is 4byte-aligned address so we
123 * have room for two bit at least.
125 #define OBJ_ALLOCATED_TAG 1
126 #define OBJ_TAG_BITS 1
127 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
128 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
130 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
131 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
132 #define ZS_MIN_ALLOC_SIZE \
133 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
134 /* each chunk includes extra space to keep handle */
135 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
138 * On systems with 4K page size, this gives 255 size classes! There is a
140 * - Large number of size classes is potentially wasteful as free page are
141 * spread across these classes
142 * - Small number of size classes causes large internal fragmentation
143 * - Probably its better to use specific size classes (empirically
144 * determined). NOTE: all those class sizes must be set as multiple of
145 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
147 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
150 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
153 * We do not maintain any list for completely empty or full pages
155 enum fullness_group
{
158 _ZS_NR_FULLNESS_GROUPS
,
171 #ifdef CONFIG_ZSMALLOC_STAT
172 #define NR_ZS_STAT_TYPE (CLASS_ALMOST_EMPTY + 1)
174 #define NR_ZS_STAT_TYPE (OBJ_USED + 1)
177 struct zs_size_stat
{
178 unsigned long objs
[NR_ZS_STAT_TYPE
];
181 #ifdef CONFIG_ZSMALLOC_STAT
182 static struct dentry
*zs_stat_root
;
186 * number of size_classes
188 static int zs_size_classes
;
191 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
193 * n = number of allocated objects
194 * N = total number of objects zspage can store
195 * f = fullness_threshold_frac
197 * Similarly, we assign zspage to:
198 * ZS_ALMOST_FULL when n > N / f
199 * ZS_EMPTY when n == 0
200 * ZS_FULL when n == N
202 * (see: fix_fullness_group())
204 static const int fullness_threshold_frac
= 4;
208 struct page
*fullness_list
[_ZS_NR_FULLNESS_GROUPS
];
210 * Size of objects stored in this class. Must be multiple
217 struct zs_size_stat stats
;
219 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
220 int pages_per_zspage
;
221 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
226 * Placed within free objects to form a singly linked list.
227 * For every zspage, first_page->freelist gives head of this list.
229 * This must be power of 2 and less than or equal to ZS_ALIGN
234 * Position of next free chunk (encodes <PFN, obj_idx>)
235 * It's valid for non-allocated object
239 * Handle of allocated object.
241 unsigned long handle
;
248 struct size_class
**size_class
;
249 struct kmem_cache
*handle_cachep
;
251 atomic_long_t pages_allocated
;
253 struct zs_pool_stats stats
;
255 /* Compact classes */
256 struct shrinker shrinker
;
258 * To signify that register_shrinker() was successful
259 * and unregister_shrinker() will not Oops.
261 bool shrinker_enabled
;
262 #ifdef CONFIG_ZSMALLOC_STAT
263 struct dentry
*stat_dentry
;
268 * A zspage's class index and fullness group
269 * are encoded in its (first)page->mapping
271 #define FULLNESS_BITS 4
272 #define CLASS_BITS 28
274 #define FULLNESS_SHIFT 0
275 #define CLASS_SHIFT (FULLNESS_SHIFT + FULLNESS_BITS)
277 #define FULLNESS_MASK ((1UL << FULLNESS_BITS) - 1)
278 #define CLASS_MASK ((1UL << CLASS_BITS) - 1)
280 struct mapping_area
{
281 #ifdef CONFIG_PGTABLE_MAPPING
282 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
284 char *vm_buf
; /* copy buffer for objects that span pages */
286 char *vm_addr
; /* address of kmap_atomic()'ed pages */
287 enum zs_mapmode vm_mm
; /* mapping mode */
290 static int create_handle_cache(struct zs_pool
*pool
)
292 pool
->handle_cachep
= kmem_cache_create("zs_handle", ZS_HANDLE_SIZE
,
294 return pool
->handle_cachep
? 0 : 1;
297 static void destroy_handle_cache(struct zs_pool
*pool
)
299 kmem_cache_destroy(pool
->handle_cachep
);
302 static unsigned long alloc_handle(struct zs_pool
*pool
, gfp_t gfp
)
304 return (unsigned long)kmem_cache_alloc(pool
->handle_cachep
,
305 gfp
& ~__GFP_HIGHMEM
);
308 static void free_handle(struct zs_pool
*pool
, unsigned long handle
)
310 kmem_cache_free(pool
->handle_cachep
, (void *)handle
);
313 static void record_obj(unsigned long handle
, unsigned long obj
)
316 * lsb of @obj represents handle lock while other bits
317 * represent object value the handle is pointing so
318 * updating shouldn't do store tearing.
320 WRITE_ONCE(*(unsigned long *)handle
, obj
);
327 static void *zs_zpool_create(const char *name
, gfp_t gfp
,
328 const struct zpool_ops
*zpool_ops
,
332 * Ignore global gfp flags: zs_malloc() may be invoked from
333 * different contexts and its caller must provide a valid
336 return zs_create_pool(name
);
339 static void zs_zpool_destroy(void *pool
)
341 zs_destroy_pool(pool
);
344 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
345 unsigned long *handle
)
347 *handle
= zs_malloc(pool
, size
, gfp
);
348 return *handle
? 0 : -1;
350 static void zs_zpool_free(void *pool
, unsigned long handle
)
352 zs_free(pool
, handle
);
355 static int zs_zpool_shrink(void *pool
, unsigned int pages
,
356 unsigned int *reclaimed
)
361 static void *zs_zpool_map(void *pool
, unsigned long handle
,
362 enum zpool_mapmode mm
)
364 enum zs_mapmode zs_mm
;
373 case ZPOOL_MM_RW
: /* fallthru */
379 return zs_map_object(pool
, handle
, zs_mm
);
381 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
383 zs_unmap_object(pool
, handle
);
386 static u64
zs_zpool_total_size(void *pool
)
388 return zs_get_total_pages(pool
) << PAGE_SHIFT
;
391 static struct zpool_driver zs_zpool_driver
= {
393 .owner
= THIS_MODULE
,
394 .create
= zs_zpool_create
,
395 .destroy
= zs_zpool_destroy
,
396 .malloc
= zs_zpool_malloc
,
397 .free
= zs_zpool_free
,
398 .shrink
= zs_zpool_shrink
,
400 .unmap
= zs_zpool_unmap
,
401 .total_size
= zs_zpool_total_size
,
404 MODULE_ALIAS("zpool-zsmalloc");
405 #endif /* CONFIG_ZPOOL */
407 static unsigned int get_maxobj_per_zspage(int size
, int pages_per_zspage
)
409 return pages_per_zspage
* PAGE_SIZE
/ size
;
412 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
413 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
415 static int is_first_page(struct page
*page
)
417 return PagePrivate(page
);
420 static int is_last_page(struct page
*page
)
422 return PagePrivate2(page
);
425 static inline int get_zspage_inuse(struct page
*first_page
)
427 return first_page
->inuse
;
430 static inline void set_zspage_inuse(struct page
*first_page
, int val
)
432 first_page
->inuse
= val
;
435 static inline void mod_zspage_inuse(struct page
*first_page
, int val
)
437 first_page
->inuse
+= val
;
440 static inline int get_first_obj_offset(struct page
*page
)
445 static inline void set_first_obj_offset(struct page
*page
, int offset
)
447 page
->index
= offset
;
450 static inline unsigned long get_freeobj(struct page
*first_page
)
452 return (unsigned long)first_page
->freelist
;
455 static inline void set_freeobj(struct page
*first_page
, unsigned long obj
)
457 first_page
->freelist
= (void *)obj
;
460 static void get_zspage_mapping(struct page
*first_page
,
461 unsigned int *class_idx
,
462 enum fullness_group
*fullness
)
465 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
467 m
= (unsigned long)first_page
->mapping
;
468 *fullness
= (m
>> FULLNESS_SHIFT
) & FULLNESS_MASK
;
469 *class_idx
= (m
>> CLASS_SHIFT
) & CLASS_MASK
;
472 static void set_zspage_mapping(struct page
*first_page
,
473 unsigned int class_idx
,
474 enum fullness_group fullness
)
477 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
479 m
= (class_idx
<< CLASS_SHIFT
) | (fullness
<< FULLNESS_SHIFT
);
480 first_page
->mapping
= (struct address_space
*)m
;
484 * zsmalloc divides the pool into various size classes where each
485 * class maintains a list of zspages where each zspage is divided
486 * into equal sized chunks. Each allocation falls into one of these
487 * classes depending on its size. This function returns index of the
488 * size class which has chunk size big enough to hold the give size.
490 static int get_size_class_index(int size
)
494 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
495 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
496 ZS_SIZE_CLASS_DELTA
);
498 return min(zs_size_classes
- 1, idx
);
501 static inline void zs_stat_inc(struct size_class
*class,
502 enum zs_stat_type type
, unsigned long cnt
)
504 if (type
< NR_ZS_STAT_TYPE
)
505 class->stats
.objs
[type
] += cnt
;
508 static inline void zs_stat_dec(struct size_class
*class,
509 enum zs_stat_type type
, unsigned long cnt
)
511 if (type
< NR_ZS_STAT_TYPE
)
512 class->stats
.objs
[type
] -= cnt
;
515 static inline unsigned long zs_stat_get(struct size_class
*class,
516 enum zs_stat_type type
)
518 if (type
< NR_ZS_STAT_TYPE
)
519 return class->stats
.objs
[type
];
523 #ifdef CONFIG_ZSMALLOC_STAT
525 static void __init
zs_stat_init(void)
527 if (!debugfs_initialized()) {
528 pr_warn("debugfs not available, stat dir not created\n");
532 zs_stat_root
= debugfs_create_dir("zsmalloc", NULL
);
534 pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
537 static void __exit
zs_stat_exit(void)
539 debugfs_remove_recursive(zs_stat_root
);
542 static unsigned long zs_can_compact(struct size_class
*class);
544 static int zs_stats_size_show(struct seq_file
*s
, void *v
)
547 struct zs_pool
*pool
= s
->private;
548 struct size_class
*class;
550 unsigned long class_almost_full
, class_almost_empty
;
551 unsigned long obj_allocated
, obj_used
, pages_used
, freeable
;
552 unsigned long total_class_almost_full
= 0, total_class_almost_empty
= 0;
553 unsigned long total_objs
= 0, total_used_objs
= 0, total_pages
= 0;
554 unsigned long total_freeable
= 0;
556 seq_printf(s
, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
557 "class", "size", "almost_full", "almost_empty",
558 "obj_allocated", "obj_used", "pages_used",
559 "pages_per_zspage", "freeable");
561 for (i
= 0; i
< zs_size_classes
; i
++) {
562 class = pool
->size_class
[i
];
564 if (class->index
!= i
)
567 spin_lock(&class->lock
);
568 class_almost_full
= zs_stat_get(class, CLASS_ALMOST_FULL
);
569 class_almost_empty
= zs_stat_get(class, CLASS_ALMOST_EMPTY
);
570 obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
571 obj_used
= zs_stat_get(class, OBJ_USED
);
572 freeable
= zs_can_compact(class);
573 spin_unlock(&class->lock
);
575 objs_per_zspage
= get_maxobj_per_zspage(class->size
,
576 class->pages_per_zspage
);
577 pages_used
= obj_allocated
/ objs_per_zspage
*
578 class->pages_per_zspage
;
580 seq_printf(s
, " %5u %5u %11lu %12lu %13lu"
581 " %10lu %10lu %16d %8lu\n",
582 i
, class->size
, class_almost_full
, class_almost_empty
,
583 obj_allocated
, obj_used
, pages_used
,
584 class->pages_per_zspage
, freeable
);
586 total_class_almost_full
+= class_almost_full
;
587 total_class_almost_empty
+= class_almost_empty
;
588 total_objs
+= obj_allocated
;
589 total_used_objs
+= obj_used
;
590 total_pages
+= pages_used
;
591 total_freeable
+= freeable
;
595 seq_printf(s
, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
596 "Total", "", total_class_almost_full
,
597 total_class_almost_empty
, total_objs
,
598 total_used_objs
, total_pages
, "", total_freeable
);
603 static int zs_stats_size_open(struct inode
*inode
, struct file
*file
)
605 return single_open(file
, zs_stats_size_show
, inode
->i_private
);
608 static const struct file_operations zs_stat_size_ops
= {
609 .open
= zs_stats_size_open
,
612 .release
= single_release
,
615 static void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
617 struct dentry
*entry
;
620 pr_warn("no root stat dir, not creating <%s> stat dir\n", name
);
624 entry
= debugfs_create_dir(name
, zs_stat_root
);
626 pr_warn("debugfs dir <%s> creation failed\n", name
);
629 pool
->stat_dentry
= entry
;
631 entry
= debugfs_create_file("classes", S_IFREG
| S_IRUGO
,
632 pool
->stat_dentry
, pool
, &zs_stat_size_ops
);
634 pr_warn("%s: debugfs file entry <%s> creation failed\n",
636 debugfs_remove_recursive(pool
->stat_dentry
);
637 pool
->stat_dentry
= NULL
;
641 static void zs_pool_stat_destroy(struct zs_pool
*pool
)
643 debugfs_remove_recursive(pool
->stat_dentry
);
646 #else /* CONFIG_ZSMALLOC_STAT */
647 static void __init
zs_stat_init(void)
651 static void __exit
zs_stat_exit(void)
655 static inline void zs_pool_stat_create(struct zs_pool
*pool
, const char *name
)
659 static inline void zs_pool_stat_destroy(struct zs_pool
*pool
)
665 * For each size class, zspages are divided into different groups
666 * depending on how "full" they are. This was done so that we could
667 * easily find empty or nearly empty zspages when we try to shrink
668 * the pool (not yet implemented). This function returns fullness
669 * status of the given page.
671 static enum fullness_group
get_fullness_group(struct size_class
*class,
672 struct page
*first_page
)
674 int inuse
, objs_per_zspage
;
675 enum fullness_group fg
;
677 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
679 inuse
= get_zspage_inuse(first_page
);
680 objs_per_zspage
= class->objs_per_zspage
;
684 else if (inuse
== objs_per_zspage
)
686 else if (inuse
<= 3 * objs_per_zspage
/ fullness_threshold_frac
)
687 fg
= ZS_ALMOST_EMPTY
;
695 * Each size class maintains various freelists and zspages are assigned
696 * to one of these freelists based on the number of live objects they
697 * have. This functions inserts the given zspage into the freelist
698 * identified by <class, fullness_group>.
700 static void insert_zspage(struct size_class
*class,
701 enum fullness_group fullness
,
702 struct page
*first_page
)
706 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
708 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
711 zs_stat_inc(class, fullness
== ZS_ALMOST_EMPTY
?
712 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
714 head
= &class->fullness_list
[fullness
];
721 * We want to see more ZS_FULL pages and less almost
722 * empty/full. Put pages with higher ->inuse first.
724 list_add_tail(&first_page
->lru
, &(*head
)->lru
);
725 if (get_zspage_inuse(first_page
) >= get_zspage_inuse(*head
))
730 * This function removes the given zspage from the freelist identified
731 * by <class, fullness_group>.
733 static void remove_zspage(struct size_class
*class,
734 enum fullness_group fullness
,
735 struct page
*first_page
)
739 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
741 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
744 head
= &class->fullness_list
[fullness
];
745 VM_BUG_ON_PAGE(!*head
, first_page
);
746 if (list_empty(&(*head
)->lru
))
748 else if (*head
== first_page
)
749 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
752 list_del_init(&first_page
->lru
);
753 zs_stat_dec(class, fullness
== ZS_ALMOST_EMPTY
?
754 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
758 * Each size class maintains zspages in different fullness groups depending
759 * on the number of live objects they contain. When allocating or freeing
760 * objects, the fullness status of the page can change, say, from ALMOST_FULL
761 * to ALMOST_EMPTY when freeing an object. This function checks if such
762 * a status change has occurred for the given page and accordingly moves the
763 * page from the freelist of the old fullness group to that of the new
766 static enum fullness_group
fix_fullness_group(struct size_class
*class,
767 struct page
*first_page
)
770 enum fullness_group currfg
, newfg
;
772 get_zspage_mapping(first_page
, &class_idx
, &currfg
);
773 newfg
= get_fullness_group(class, first_page
);
777 remove_zspage(class, currfg
, first_page
);
778 insert_zspage(class, newfg
, first_page
);
779 set_zspage_mapping(first_page
, class_idx
, newfg
);
786 * We have to decide on how many pages to link together
787 * to form a zspage for each size class. This is important
788 * to reduce wastage due to unusable space left at end of
789 * each zspage which is given as:
790 * wastage = Zp % class_size
791 * usage = Zp - wastage
792 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
794 * For example, for size class of 3/8 * PAGE_SIZE, we should
795 * link together 3 PAGE_SIZE sized pages to form a zspage
796 * since then we can perfectly fit in 8 such objects.
798 static int get_pages_per_zspage(int class_size
)
800 int i
, max_usedpc
= 0;
801 /* zspage order which gives maximum used size per KB */
802 int max_usedpc_order
= 1;
804 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
808 zspage_size
= i
* PAGE_SIZE
;
809 waste
= zspage_size
% class_size
;
810 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
812 if (usedpc
> max_usedpc
) {
814 max_usedpc_order
= i
;
818 return max_usedpc_order
;
822 * A single 'zspage' is composed of many system pages which are
823 * linked together using fields in struct page. This function finds
824 * the first/head page, given any component page of a zspage.
826 static struct page
*get_first_page(struct page
*page
)
828 if (is_first_page(page
))
831 return (struct page
*)page_private(page
);
834 static struct page
*get_next_page(struct page
*page
)
838 if (is_last_page(page
))
840 else if (is_first_page(page
))
841 next
= (struct page
*)page_private(page
);
843 next
= list_entry(page
->lru
.next
, struct page
, lru
);
849 * Encode <page, obj_idx> as a single handle value.
850 * We use the least bit of handle for tagging.
852 static void *location_to_obj(struct page
*page
, unsigned long obj_idx
)
861 obj
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
862 obj
|= ((obj_idx
) & OBJ_INDEX_MASK
);
863 obj
<<= OBJ_TAG_BITS
;
869 * Decode <page, obj_idx> pair from the given object handle. We adjust the
870 * decoded obj_idx back to its original value since it was adjusted in
873 static void obj_to_location(unsigned long obj
, struct page
**page
,
874 unsigned long *obj_idx
)
876 obj
>>= OBJ_TAG_BITS
;
877 *page
= pfn_to_page(obj
>> OBJ_INDEX_BITS
);
878 *obj_idx
= (obj
& OBJ_INDEX_MASK
);
881 static unsigned long handle_to_obj(unsigned long handle
)
883 return *(unsigned long *)handle
;
886 static unsigned long obj_to_head(struct size_class
*class, struct page
*page
,
890 VM_BUG_ON_PAGE(!is_first_page(page
), page
);
891 return page_private(page
);
893 return *(unsigned long *)obj
;
896 static unsigned long obj_idx_to_offset(struct page
*page
,
897 unsigned long obj_idx
, int class_size
)
899 unsigned long off
= 0;
901 if (!is_first_page(page
))
902 off
= get_first_obj_offset(page
);
904 return off
+ obj_idx
* class_size
;
907 static inline int trypin_tag(unsigned long handle
)
909 return bit_spin_trylock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
912 static void pin_tag(unsigned long handle
)
914 bit_spin_lock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
917 static void unpin_tag(unsigned long handle
)
919 bit_spin_unlock(HANDLE_PIN_BIT
, (unsigned long *)handle
);
922 static void reset_page(struct page
*page
)
924 clear_bit(PG_private
, &page
->flags
);
925 clear_bit(PG_private_2
, &page
->flags
);
926 set_page_private(page
, 0);
927 page
->mapping
= NULL
;
928 page
->freelist
= NULL
;
929 page_mapcount_reset(page
);
932 static void free_zspage(struct page
*first_page
)
934 struct page
*nextp
, *tmp
, *head_extra
;
936 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
937 VM_BUG_ON_PAGE(get_zspage_inuse(first_page
), first_page
);
939 head_extra
= (struct page
*)page_private(first_page
);
941 reset_page(first_page
);
942 __free_page(first_page
);
944 /* zspage with only 1 system page */
948 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
949 list_del(&nextp
->lru
);
953 reset_page(head_extra
);
954 __free_page(head_extra
);
957 /* Initialize a newly allocated zspage */
958 static void init_zspage(struct size_class
*class, struct page
*first_page
)
960 unsigned long off
= 0;
961 struct page
*page
= first_page
;
963 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
966 struct page
*next_page
;
967 struct link_free
*link
;
972 * page->index stores offset of first object starting
973 * in the page. For the first page, this is always 0,
974 * so we use first_page->index (aka ->freelist) to store
975 * head of corresponding zspage's freelist.
977 if (page
!= first_page
)
978 set_first_obj_offset(page
, off
);
980 vaddr
= kmap_atomic(page
);
981 link
= (struct link_free
*)vaddr
+ off
/ sizeof(*link
);
983 while ((off
+= class->size
) < PAGE_SIZE
) {
984 link
->next
= location_to_obj(page
, i
++);
985 link
+= class->size
/ sizeof(*link
);
989 * We now come to the last (full or partial) object on this
990 * page, which must point to the first object on the next
993 next_page
= get_next_page(page
);
994 link
->next
= location_to_obj(next_page
, 0);
995 kunmap_atomic(vaddr
);
1002 * Allocate a zspage for the given size class
1004 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
1007 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
1010 * Allocate individual pages and link them together as:
1011 * 1. first page->private = first sub-page
1012 * 2. all sub-pages are linked together using page->lru
1013 * 3. each sub-page is linked to the first page using page->private
1015 * For each size class, First/Head pages are linked together using
1016 * page->lru. Also, we set PG_private to identify the first page
1017 * (i.e. no other sub-page has this flag set) and PG_private_2 to
1018 * identify the last page.
1021 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
1024 page
= alloc_page(flags
);
1028 INIT_LIST_HEAD(&page
->lru
);
1029 if (i
== 0) { /* first page */
1030 SetPagePrivate(page
);
1031 set_page_private(page
, 0);
1033 set_zspage_inuse(first_page
, 0);
1036 set_page_private(first_page
, (unsigned long)page
);
1038 set_page_private(page
, (unsigned long)first_page
);
1040 list_add(&page
->lru
, &prev_page
->lru
);
1041 if (i
== class->pages_per_zspage
- 1) /* last page */
1042 SetPagePrivate2(page
);
1046 init_zspage(class, first_page
);
1048 set_freeobj(first_page
, (unsigned long)location_to_obj(first_page
, 0));
1049 error
= 0; /* Success */
1052 if (unlikely(error
) && first_page
) {
1053 free_zspage(first_page
);
1060 static struct page
*find_get_zspage(struct size_class
*class)
1065 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
1066 page
= class->fullness_list
[i
];
1074 #ifdef CONFIG_PGTABLE_MAPPING
1075 static inline int __zs_cpu_up(struct mapping_area
*area
)
1078 * Make sure we don't leak memory if a cpu UP notification
1079 * and zs_init() race and both call zs_cpu_up() on the same cpu
1083 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
1089 static inline void __zs_cpu_down(struct mapping_area
*area
)
1092 free_vm_area(area
->vm
);
1096 static inline void *__zs_map_object(struct mapping_area
*area
,
1097 struct page
*pages
[2], int off
, int size
)
1099 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, pages
));
1100 area
->vm_addr
= area
->vm
->addr
;
1101 return area
->vm_addr
+ off
;
1104 static inline void __zs_unmap_object(struct mapping_area
*area
,
1105 struct page
*pages
[2], int off
, int size
)
1107 unsigned long addr
= (unsigned long)area
->vm_addr
;
1109 unmap_kernel_range(addr
, PAGE_SIZE
* 2);
1112 #else /* CONFIG_PGTABLE_MAPPING */
1114 static inline int __zs_cpu_up(struct mapping_area
*area
)
1117 * Make sure we don't leak memory if a cpu UP notification
1118 * and zs_init() race and both call zs_cpu_up() on the same cpu
1122 area
->vm_buf
= kmalloc(ZS_MAX_ALLOC_SIZE
, GFP_KERNEL
);
1128 static inline void __zs_cpu_down(struct mapping_area
*area
)
1130 kfree(area
->vm_buf
);
1131 area
->vm_buf
= NULL
;
1134 static void *__zs_map_object(struct mapping_area
*area
,
1135 struct page
*pages
[2], int off
, int size
)
1139 char *buf
= area
->vm_buf
;
1141 /* disable page faults to match kmap_atomic() return conditions */
1142 pagefault_disable();
1144 /* no read fastpath */
1145 if (area
->vm_mm
== ZS_MM_WO
)
1148 sizes
[0] = PAGE_SIZE
- off
;
1149 sizes
[1] = size
- sizes
[0];
1151 /* copy object to per-cpu buffer */
1152 addr
= kmap_atomic(pages
[0]);
1153 memcpy(buf
, addr
+ off
, sizes
[0]);
1154 kunmap_atomic(addr
);
1155 addr
= kmap_atomic(pages
[1]);
1156 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
1157 kunmap_atomic(addr
);
1159 return area
->vm_buf
;
1162 static void __zs_unmap_object(struct mapping_area
*area
,
1163 struct page
*pages
[2], int off
, int size
)
1169 /* no write fastpath */
1170 if (area
->vm_mm
== ZS_MM_RO
)
1174 buf
= buf
+ ZS_HANDLE_SIZE
;
1175 size
-= ZS_HANDLE_SIZE
;
1176 off
+= ZS_HANDLE_SIZE
;
1178 sizes
[0] = PAGE_SIZE
- off
;
1179 sizes
[1] = size
- sizes
[0];
1181 /* copy per-cpu buffer to object */
1182 addr
= kmap_atomic(pages
[0]);
1183 memcpy(addr
+ off
, buf
, sizes
[0]);
1184 kunmap_atomic(addr
);
1185 addr
= kmap_atomic(pages
[1]);
1186 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
1187 kunmap_atomic(addr
);
1190 /* enable page faults to match kunmap_atomic() return conditions */
1194 #endif /* CONFIG_PGTABLE_MAPPING */
1196 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
1199 int ret
, cpu
= (long)pcpu
;
1200 struct mapping_area
*area
;
1203 case CPU_UP_PREPARE
:
1204 area
= &per_cpu(zs_map_area
, cpu
);
1205 ret
= __zs_cpu_up(area
);
1207 return notifier_from_errno(ret
);
1210 case CPU_UP_CANCELED
:
1211 area
= &per_cpu(zs_map_area
, cpu
);
1212 __zs_cpu_down(area
);
1219 static struct notifier_block zs_cpu_nb
= {
1220 .notifier_call
= zs_cpu_notifier
1223 static int zs_register_cpu_notifier(void)
1225 int cpu
, uninitialized_var(ret
);
1227 cpu_notifier_register_begin();
1229 __register_cpu_notifier(&zs_cpu_nb
);
1230 for_each_online_cpu(cpu
) {
1231 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
1232 if (notifier_to_errno(ret
))
1236 cpu_notifier_register_done();
1237 return notifier_to_errno(ret
);
1240 static void zs_unregister_cpu_notifier(void)
1244 cpu_notifier_register_begin();
1246 for_each_online_cpu(cpu
)
1247 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
1248 __unregister_cpu_notifier(&zs_cpu_nb
);
1250 cpu_notifier_register_done();
1253 static void init_zs_size_classes(void)
1257 nr
= (ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) / ZS_SIZE_CLASS_DELTA
+ 1;
1258 if ((ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) % ZS_SIZE_CLASS_DELTA
)
1261 zs_size_classes
= nr
;
1264 static bool can_merge(struct size_class
*prev
, int size
, int pages_per_zspage
)
1266 if (prev
->pages_per_zspage
!= pages_per_zspage
)
1269 if (get_maxobj_per_zspage(prev
->size
, prev
->pages_per_zspage
)
1270 != get_maxobj_per_zspage(size
, pages_per_zspage
))
1276 static bool zspage_full(struct size_class
*class, struct page
*first_page
)
1278 VM_BUG_ON_PAGE(!is_first_page(first_page
), first_page
);
1280 return get_zspage_inuse(first_page
) == class->objs_per_zspage
;
1283 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1285 return atomic_long_read(&pool
->pages_allocated
);
1287 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1290 * zs_map_object - get address of allocated object from handle.
1291 * @pool: pool from which the object was allocated
1292 * @handle: handle returned from zs_malloc
1294 * Before using an object allocated from zs_malloc, it must be mapped using
1295 * this function. When done with the object, it must be unmapped using
1298 * Only one object can be mapped per cpu at a time. There is no protection
1299 * against nested mappings.
1301 * This function returns with preemption and page faults disabled.
1303 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1307 unsigned long obj
, obj_idx
, off
;
1309 unsigned int class_idx
;
1310 enum fullness_group fg
;
1311 struct size_class
*class;
1312 struct mapping_area
*area
;
1313 struct page
*pages
[2];
1317 * Because we use per-cpu mapping areas shared among the
1318 * pools/users, we can't allow mapping in interrupt context
1319 * because it can corrupt another users mappings.
1321 WARN_ON_ONCE(in_interrupt());
1323 /* From now on, migration cannot move the object */
1326 obj
= handle_to_obj(handle
);
1327 obj_to_location(obj
, &page
, &obj_idx
);
1328 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1329 class = pool
->size_class
[class_idx
];
1330 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1332 area
= &get_cpu_var(zs_map_area
);
1334 if (off
+ class->size
<= PAGE_SIZE
) {
1335 /* this object is contained entirely within a page */
1336 area
->vm_addr
= kmap_atomic(page
);
1337 ret
= area
->vm_addr
+ off
;
1341 /* this object spans two pages */
1343 pages
[1] = get_next_page(page
);
1346 ret
= __zs_map_object(area
, pages
, off
, class->size
);
1349 ret
+= ZS_HANDLE_SIZE
;
1353 EXPORT_SYMBOL_GPL(zs_map_object
);
1355 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1358 unsigned long obj
, obj_idx
, off
;
1360 unsigned int class_idx
;
1361 enum fullness_group fg
;
1362 struct size_class
*class;
1363 struct mapping_area
*area
;
1365 obj
= handle_to_obj(handle
);
1366 obj_to_location(obj
, &page
, &obj_idx
);
1367 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1368 class = pool
->size_class
[class_idx
];
1369 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1371 area
= this_cpu_ptr(&zs_map_area
);
1372 if (off
+ class->size
<= PAGE_SIZE
)
1373 kunmap_atomic(area
->vm_addr
);
1375 struct page
*pages
[2];
1378 pages
[1] = get_next_page(page
);
1381 __zs_unmap_object(area
, pages
, off
, class->size
);
1383 put_cpu_var(zs_map_area
);
1386 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1388 static unsigned long obj_malloc(struct size_class
*class,
1389 struct page
*first_page
, unsigned long handle
)
1392 struct link_free
*link
;
1394 struct page
*m_page
;
1395 unsigned long m_objidx
, m_offset
;
1398 handle
|= OBJ_ALLOCATED_TAG
;
1399 obj
= get_freeobj(first_page
);
1400 obj_to_location(obj
, &m_page
, &m_objidx
);
1401 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
1403 vaddr
= kmap_atomic(m_page
);
1404 link
= (struct link_free
*)vaddr
+ m_offset
/ sizeof(*link
);
1405 set_freeobj(first_page
, (unsigned long)link
->next
);
1407 /* record handle in the header of allocated chunk */
1408 link
->handle
= handle
;
1410 /* record handle in first_page->private */
1411 set_page_private(first_page
, handle
);
1412 kunmap_atomic(vaddr
);
1413 mod_zspage_inuse(first_page
, 1);
1414 zs_stat_inc(class, OBJ_USED
, 1);
1421 * zs_malloc - Allocate block of given size from pool.
1422 * @pool: pool to allocate from
1423 * @size: size of block to allocate
1425 * On success, handle to the allocated object is returned,
1427 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1429 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
, gfp_t gfp
)
1431 unsigned long handle
, obj
;
1432 struct size_class
*class;
1433 struct page
*first_page
;
1435 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
1438 handle
= alloc_handle(pool
, gfp
);
1442 /* extra space in chunk to keep the handle */
1443 size
+= ZS_HANDLE_SIZE
;
1444 class = pool
->size_class
[get_size_class_index(size
)];
1446 spin_lock(&class->lock
);
1447 first_page
= find_get_zspage(class);
1450 spin_unlock(&class->lock
);
1451 first_page
= alloc_zspage(class, gfp
);
1452 if (unlikely(!first_page
)) {
1453 free_handle(pool
, handle
);
1457 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
1458 atomic_long_add(class->pages_per_zspage
,
1459 &pool
->pages_allocated
);
1461 spin_lock(&class->lock
);
1462 zs_stat_inc(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1463 class->size
, class->pages_per_zspage
));
1466 obj
= obj_malloc(class, first_page
, handle
);
1467 /* Now move the zspage to another fullness group, if required */
1468 fix_fullness_group(class, first_page
);
1469 record_obj(handle
, obj
);
1470 spin_unlock(&class->lock
);
1474 EXPORT_SYMBOL_GPL(zs_malloc
);
1476 static void obj_free(struct size_class
*class, unsigned long obj
)
1478 struct link_free
*link
;
1479 struct page
*first_page
, *f_page
;
1480 unsigned long f_objidx
, f_offset
;
1483 obj
&= ~OBJ_ALLOCATED_TAG
;
1484 obj_to_location(obj
, &f_page
, &f_objidx
);
1485 first_page
= get_first_page(f_page
);
1487 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
1489 vaddr
= kmap_atomic(f_page
);
1491 /* Insert this object in containing zspage's freelist */
1492 link
= (struct link_free
*)(vaddr
+ f_offset
);
1493 link
->next
= (void *)get_freeobj(first_page
);
1495 set_page_private(first_page
, 0);
1496 kunmap_atomic(vaddr
);
1497 set_freeobj(first_page
, obj
);
1498 mod_zspage_inuse(first_page
, -1);
1499 zs_stat_dec(class, OBJ_USED
, 1);
1502 void zs_free(struct zs_pool
*pool
, unsigned long handle
)
1504 struct page
*first_page
, *f_page
;
1505 unsigned long obj
, f_objidx
;
1507 struct size_class
*class;
1508 enum fullness_group fullness
;
1510 if (unlikely(!handle
))
1514 obj
= handle_to_obj(handle
);
1515 obj_to_location(obj
, &f_page
, &f_objidx
);
1516 first_page
= get_first_page(f_page
);
1518 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1519 class = pool
->size_class
[class_idx
];
1521 spin_lock(&class->lock
);
1522 obj_free(class, obj
);
1523 fullness
= fix_fullness_group(class, first_page
);
1524 if (fullness
== ZS_EMPTY
) {
1525 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1526 class->size
, class->pages_per_zspage
));
1527 atomic_long_sub(class->pages_per_zspage
,
1528 &pool
->pages_allocated
);
1529 free_zspage(first_page
);
1531 spin_unlock(&class->lock
);
1534 free_handle(pool
, handle
);
1536 EXPORT_SYMBOL_GPL(zs_free
);
1538 static void zs_object_copy(struct size_class
*class, unsigned long dst
,
1541 struct page
*s_page
, *d_page
;
1542 unsigned long s_objidx
, d_objidx
;
1543 unsigned long s_off
, d_off
;
1544 void *s_addr
, *d_addr
;
1545 int s_size
, d_size
, size
;
1548 s_size
= d_size
= class->size
;
1550 obj_to_location(src
, &s_page
, &s_objidx
);
1551 obj_to_location(dst
, &d_page
, &d_objidx
);
1553 s_off
= obj_idx_to_offset(s_page
, s_objidx
, class->size
);
1554 d_off
= obj_idx_to_offset(d_page
, d_objidx
, class->size
);
1556 if (s_off
+ class->size
> PAGE_SIZE
)
1557 s_size
= PAGE_SIZE
- s_off
;
1559 if (d_off
+ class->size
> PAGE_SIZE
)
1560 d_size
= PAGE_SIZE
- d_off
;
1562 s_addr
= kmap_atomic(s_page
);
1563 d_addr
= kmap_atomic(d_page
);
1566 size
= min(s_size
, d_size
);
1567 memcpy(d_addr
+ d_off
, s_addr
+ s_off
, size
);
1570 if (written
== class->size
)
1578 if (s_off
>= PAGE_SIZE
) {
1579 kunmap_atomic(d_addr
);
1580 kunmap_atomic(s_addr
);
1581 s_page
= get_next_page(s_page
);
1582 s_addr
= kmap_atomic(s_page
);
1583 d_addr
= kmap_atomic(d_page
);
1584 s_size
= class->size
- written
;
1588 if (d_off
>= PAGE_SIZE
) {
1589 kunmap_atomic(d_addr
);
1590 d_page
= get_next_page(d_page
);
1591 d_addr
= kmap_atomic(d_page
);
1592 d_size
= class->size
- written
;
1597 kunmap_atomic(d_addr
);
1598 kunmap_atomic(s_addr
);
1602 * Find alloced object in zspage from index object and
1605 static unsigned long find_alloced_obj(struct size_class
*class,
1606 struct page
*page
, int index
)
1610 unsigned long handle
= 0;
1611 void *addr
= kmap_atomic(page
);
1613 if (!is_first_page(page
))
1614 offset
= get_first_obj_offset(page
);
1615 offset
+= class->size
* index
;
1617 while (offset
< PAGE_SIZE
) {
1618 head
= obj_to_head(class, page
, addr
+ offset
);
1619 if (head
& OBJ_ALLOCATED_TAG
) {
1620 handle
= head
& ~OBJ_ALLOCATED_TAG
;
1621 if (trypin_tag(handle
))
1626 offset
+= class->size
;
1630 kunmap_atomic(addr
);
1634 struct zs_compact_control
{
1635 /* Source page for migration which could be a subpage of zspage. */
1636 struct page
*s_page
;
1637 /* Destination page for migration which should be a first page
1639 struct page
*d_page
;
1640 /* Starting object index within @s_page which used for live object
1641 * in the subpage. */
1645 static int migrate_zspage(struct zs_pool
*pool
, struct size_class
*class,
1646 struct zs_compact_control
*cc
)
1648 unsigned long used_obj
, free_obj
;
1649 unsigned long handle
;
1650 struct page
*s_page
= cc
->s_page
;
1651 struct page
*d_page
= cc
->d_page
;
1652 unsigned long index
= cc
->index
;
1656 handle
= find_alloced_obj(class, s_page
, index
);
1658 s_page
= get_next_page(s_page
);
1665 /* Stop if there is no more space */
1666 if (zspage_full(class, d_page
)) {
1672 used_obj
= handle_to_obj(handle
);
1673 free_obj
= obj_malloc(class, d_page
, handle
);
1674 zs_object_copy(class, free_obj
, used_obj
);
1677 * record_obj updates handle's value to free_obj and it will
1678 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
1679 * breaks synchronization using pin_tag(e,g, zs_free) so
1680 * let's keep the lock bit.
1682 free_obj
|= BIT(HANDLE_PIN_BIT
);
1683 record_obj(handle
, free_obj
);
1685 obj_free(class, used_obj
);
1688 /* Remember last position in this iteration */
1689 cc
->s_page
= s_page
;
1695 static struct page
*isolate_target_page(struct size_class
*class)
1700 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
1701 page
= class->fullness_list
[i
];
1703 remove_zspage(class, i
, page
);
1712 * putback_zspage - add @first_page into right class's fullness list
1713 * @pool: target pool
1714 * @class: destination class
1715 * @first_page: target page
1717 * Return @fist_page's fullness_group
1719 static enum fullness_group
putback_zspage(struct zs_pool
*pool
,
1720 struct size_class
*class,
1721 struct page
*first_page
)
1723 enum fullness_group fullness
;
1725 fullness
= get_fullness_group(class, first_page
);
1726 insert_zspage(class, fullness
, first_page
);
1727 set_zspage_mapping(first_page
, class->index
, fullness
);
1729 if (fullness
== ZS_EMPTY
) {
1730 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1731 class->size
, class->pages_per_zspage
));
1732 atomic_long_sub(class->pages_per_zspage
,
1733 &pool
->pages_allocated
);
1735 free_zspage(first_page
);
1741 static struct page
*isolate_source_page(struct size_class
*class)
1744 struct page
*page
= NULL
;
1746 for (i
= ZS_ALMOST_EMPTY
; i
>= ZS_ALMOST_FULL
; i
--) {
1747 page
= class->fullness_list
[i
];
1751 remove_zspage(class, i
, page
);
1760 * Based on the number of unused allocated objects calculate
1761 * and return the number of pages that we can free.
1763 static unsigned long zs_can_compact(struct size_class
*class)
1765 unsigned long obj_wasted
;
1766 unsigned long obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
1767 unsigned long obj_used
= zs_stat_get(class, OBJ_USED
);
1769 if (obj_allocated
<= obj_used
)
1772 obj_wasted
= obj_allocated
- obj_used
;
1773 obj_wasted
/= get_maxobj_per_zspage(class->size
,
1774 class->pages_per_zspage
);
1776 return obj_wasted
* class->pages_per_zspage
;
1779 static void __zs_compact(struct zs_pool
*pool
, struct size_class
*class)
1781 struct zs_compact_control cc
;
1782 struct page
*src_page
;
1783 struct page
*dst_page
= NULL
;
1785 spin_lock(&class->lock
);
1786 while ((src_page
= isolate_source_page(class))) {
1788 if (!zs_can_compact(class))
1792 cc
.s_page
= src_page
;
1794 while ((dst_page
= isolate_target_page(class))) {
1795 cc
.d_page
= dst_page
;
1797 * If there is no more space in dst_page, resched
1798 * and see if anyone had allocated another zspage.
1800 if (!migrate_zspage(pool
, class, &cc
))
1803 putback_zspage(pool
, class, dst_page
);
1806 /* Stop if we couldn't find slot */
1807 if (dst_page
== NULL
)
1810 putback_zspage(pool
, class, dst_page
);
1811 if (putback_zspage(pool
, class, src_page
) == ZS_EMPTY
)
1812 pool
->stats
.pages_compacted
+= class->pages_per_zspage
;
1813 spin_unlock(&class->lock
);
1815 spin_lock(&class->lock
);
1819 putback_zspage(pool
, class, src_page
);
1821 spin_unlock(&class->lock
);
1824 unsigned long zs_compact(struct zs_pool
*pool
)
1827 struct size_class
*class;
1829 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1830 class = pool
->size_class
[i
];
1833 if (class->index
!= i
)
1835 __zs_compact(pool
, class);
1838 return pool
->stats
.pages_compacted
;
1840 EXPORT_SYMBOL_GPL(zs_compact
);
1842 void zs_pool_stats(struct zs_pool
*pool
, struct zs_pool_stats
*stats
)
1844 memcpy(stats
, &pool
->stats
, sizeof(struct zs_pool_stats
));
1846 EXPORT_SYMBOL_GPL(zs_pool_stats
);
1848 static unsigned long zs_shrinker_scan(struct shrinker
*shrinker
,
1849 struct shrink_control
*sc
)
1851 unsigned long pages_freed
;
1852 struct zs_pool
*pool
= container_of(shrinker
, struct zs_pool
,
1855 pages_freed
= pool
->stats
.pages_compacted
;
1857 * Compact classes and calculate compaction delta.
1858 * Can run concurrently with a manually triggered
1859 * (by user) compaction.
1861 pages_freed
= zs_compact(pool
) - pages_freed
;
1863 return pages_freed
? pages_freed
: SHRINK_STOP
;
1866 static unsigned long zs_shrinker_count(struct shrinker
*shrinker
,
1867 struct shrink_control
*sc
)
1870 struct size_class
*class;
1871 unsigned long pages_to_free
= 0;
1872 struct zs_pool
*pool
= container_of(shrinker
, struct zs_pool
,
1875 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1876 class = pool
->size_class
[i
];
1879 if (class->index
!= i
)
1882 pages_to_free
+= zs_can_compact(class);
1885 return pages_to_free
;
1888 static void zs_unregister_shrinker(struct zs_pool
*pool
)
1890 if (pool
->shrinker_enabled
) {
1891 unregister_shrinker(&pool
->shrinker
);
1892 pool
->shrinker_enabled
= false;
1896 static int zs_register_shrinker(struct zs_pool
*pool
)
1898 pool
->shrinker
.scan_objects
= zs_shrinker_scan
;
1899 pool
->shrinker
.count_objects
= zs_shrinker_count
;
1900 pool
->shrinker
.batch
= 0;
1901 pool
->shrinker
.seeks
= DEFAULT_SEEKS
;
1903 return register_shrinker(&pool
->shrinker
);
1907 * zs_create_pool - Creates an allocation pool to work from.
1908 * @flags: allocation flags used to allocate pool metadata
1910 * This function must be called before anything when using
1911 * the zsmalloc allocator.
1913 * On success, a pointer to the newly created pool is returned,
1916 struct zs_pool
*zs_create_pool(const char *name
)
1919 struct zs_pool
*pool
;
1920 struct size_class
*prev_class
= NULL
;
1922 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
1926 pool
->size_class
= kcalloc(zs_size_classes
, sizeof(struct size_class
*),
1928 if (!pool
->size_class
) {
1933 pool
->name
= kstrdup(name
, GFP_KERNEL
);
1937 if (create_handle_cache(pool
))
1941 * Iterate reversly, because, size of size_class that we want to use
1942 * for merging should be larger or equal to current size.
1944 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1946 int pages_per_zspage
;
1947 struct size_class
*class;
1949 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
1950 if (size
> ZS_MAX_ALLOC_SIZE
)
1951 size
= ZS_MAX_ALLOC_SIZE
;
1952 pages_per_zspage
= get_pages_per_zspage(size
);
1955 * size_class is used for normal zsmalloc operation such
1956 * as alloc/free for that size. Although it is natural that we
1957 * have one size_class for each size, there is a chance that we
1958 * can get more memory utilization if we use one size_class for
1959 * many different sizes whose size_class have same
1960 * characteristics. So, we makes size_class point to
1961 * previous size_class if possible.
1964 if (can_merge(prev_class
, size
, pages_per_zspage
)) {
1965 pool
->size_class
[i
] = prev_class
;
1970 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
1976 class->pages_per_zspage
= pages_per_zspage
;
1977 class->objs_per_zspage
= class->pages_per_zspage
*
1978 PAGE_SIZE
/ class->size
;
1979 if (pages_per_zspage
== 1 && class->objs_per_zspage
== 1)
1981 spin_lock_init(&class->lock
);
1982 pool
->size_class
[i
] = class;
1987 /* debug only, don't abort if it fails */
1988 zs_pool_stat_create(pool
, name
);
1991 * Not critical, we still can use the pool
1992 * and user can trigger compaction manually.
1994 if (zs_register_shrinker(pool
) == 0)
1995 pool
->shrinker_enabled
= true;
1999 zs_destroy_pool(pool
);
2002 EXPORT_SYMBOL_GPL(zs_create_pool
);
2004 void zs_destroy_pool(struct zs_pool
*pool
)
2008 zs_unregister_shrinker(pool
);
2009 zs_pool_stat_destroy(pool
);
2011 for (i
= 0; i
< zs_size_classes
; i
++) {
2013 struct size_class
*class = pool
->size_class
[i
];
2018 if (class->index
!= i
)
2021 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
2022 if (class->fullness_list
[fg
]) {
2023 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
2030 destroy_handle_cache(pool
);
2031 kfree(pool
->size_class
);
2035 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
2037 static int __init
zs_init(void)
2039 int ret
= zs_register_cpu_notifier();
2044 init_zs_size_classes();
2047 zpool_register_driver(&zs_zpool_driver
);
2055 zs_unregister_cpu_notifier();
2060 static void __exit
zs_exit(void)
2063 zpool_unregister_driver(&zs_zpool_driver
);
2065 zs_unregister_cpu_notifier();
2070 module_init(zs_init
);
2071 module_exit(zs_exit
);
2073 MODULE_LICENSE("Dual BSD/GPL");
2074 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");