2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 * This allocator is designed for use with zram. Thus, the allocator is
16 * supposed to work well under low memory conditions. In particular, it
17 * never attempts higher order page allocation which is very likely to
18 * fail under memory pressure. On the other hand, if we just use single
19 * (0-order) pages, it would suffer from very high fragmentation --
20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
21 * This was one of the major issues with its predecessor (xvmalloc).
23 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
24 * and links them together using various 'struct page' fields. These linked
25 * pages act as a single higher-order page i.e. an object can span 0-order
26 * page boundaries. The code refers to these linked pages as a single entity
29 * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
30 * since this satisfies the requirements of all its current users (in the
31 * worst case, page is incompressible and is thus stored "as-is" i.e. in
32 * uncompressed form). For allocation requests larger than this size, failure
33 * is returned (see zs_malloc).
35 * Additionally, zs_malloc() does not return a dereferenceable pointer.
36 * Instead, it returns an opaque handle (unsigned long) which encodes actual
37 * location of the allocated object. The reason for this indirection is that
38 * zsmalloc does not keep zspages permanently mapped since that would cause
39 * issues on 32-bit systems where the VA region for kernel space mappings
40 * is very small. So, before using the allocating memory, the object has to
41 * be mapped using zs_map_object() to get a usable pointer and subsequently
42 * unmapped using zs_unmap_object().
44 * Following is how we use various fields and flags of underlying
45 * struct page(s) to form a zspage.
47 * Usage of struct page fields:
48 * page->first_page: points to the first component (0-order) page
49 * page->index (union with page->freelist): offset of the first object
50 * starting in this page. For the first page, this is
51 * always 0, so we use this field (aka freelist) to point
52 * to the first free object in zspage.
53 * page->lru: links together all component pages (except the first page)
56 * For _first_ page only:
58 * page->private (union with page->first_page): refers to the
59 * component page after the first page
60 * If the page is first_page for huge object, it stores handle.
61 * Look at size_class->huge.
62 * page->freelist: points to the first free object in zspage.
63 * Free objects are linked together using in-place
65 * page->objects: maximum number of objects we can store in this
66 * zspage (class->zspage_order * PAGE_SIZE / class->size)
67 * page->lru: links together first pages of various zspages.
68 * Basically forming list of zspages in a fullness group.
69 * page->mapping: class index and fullness group of the zspage
71 * Usage of struct page flags:
72 * PG_private: identifies the first component page
73 * PG_private2: identifies the last component page
77 #ifdef CONFIG_ZSMALLOC_DEBUG
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/sched.h>
84 #include <linux/bitops.h>
85 #include <linux/errno.h>
86 #include <linux/highmem.h>
87 #include <linux/string.h>
88 #include <linux/slab.h>
89 #include <asm/tlbflush.h>
90 #include <asm/pgtable.h>
91 #include <linux/cpumask.h>
92 #include <linux/cpu.h>
93 #include <linux/vmalloc.h>
94 #include <linux/hardirq.h>
95 #include <linux/spinlock.h>
96 #include <linux/types.h>
97 #include <linux/debugfs.h>
98 #include <linux/zsmalloc.h>
99 #include <linux/zpool.h>
102 * This must be power of 2 and greater than of equal to sizeof(link_free).
103 * These two conditions ensure that any 'struct link_free' itself doesn't
104 * span more than 1 page which avoids complex case of mapping 2 pages simply
105 * to restore link_free pointer values.
110 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
111 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
113 #define ZS_MAX_ZSPAGE_ORDER 2
114 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
116 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
119 * Object location (<PFN>, <obj_idx>) is encoded as
120 * as single (unsigned long) handle value.
122 * Note that object index <obj_idx> is relative to system
123 * page <PFN> it is stored in, so for each sub-page belonging
124 * to a zspage, obj_idx starts with 0.
126 * This is made more complicated by various memory models and PAE.
129 #ifndef MAX_PHYSMEM_BITS
130 #ifdef CONFIG_HIGHMEM64G
131 #define MAX_PHYSMEM_BITS 36
132 #else /* !CONFIG_HIGHMEM64G */
134 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
137 #define MAX_PHYSMEM_BITS BITS_PER_LONG
140 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
143 * Memory for allocating for handle keeps object position by
144 * encoding <page, obj_idx> and the encoded value has a room
145 * in least bit(ie, look at obj_to_location).
146 * We use the bit to synchronize between object access by
147 * user and migration.
149 #define HANDLE_PIN_BIT 0
152 * Head in allocated object should have OBJ_ALLOCATED_TAG
153 * to identify the object was allocated or not.
154 * It's okay to add the status bit in the least bit because
155 * header keeps handle which is 4byte-aligned address so we
156 * have room for two bit at least.
158 #define OBJ_ALLOCATED_TAG 1
159 #define OBJ_TAG_BITS 1
160 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
161 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
163 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
164 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
165 #define ZS_MIN_ALLOC_SIZE \
166 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
167 /* each chunk includes extra space to keep handle */
168 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
171 * On systems with 4K page size, this gives 255 size classes! There is a
173 * - Large number of size classes is potentially wasteful as free page are
174 * spread across these classes
175 * - Small number of size classes causes large internal fragmentation
176 * - Probably its better to use specific size classes (empirically
177 * determined). NOTE: all those class sizes must be set as multiple of
178 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
180 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
183 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
186 * We do not maintain any list for completely empty or full pages
188 enum fullness_group
{
191 _ZS_NR_FULLNESS_GROUPS
,
205 #ifdef CONFIG_ZSMALLOC_STAT
207 static struct dentry
*zs_stat_root
;
209 struct zs_size_stat
{
210 unsigned long objs
[NR_ZS_STAT_TYPE
];
216 * number of size_classes
218 static int zs_size_classes
;
221 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
223 * n = number of allocated objects
224 * N = total number of objects zspage can store
225 * f = fullness_threshold_frac
227 * Similarly, we assign zspage to:
228 * ZS_ALMOST_FULL when n > N / f
229 * ZS_EMPTY when n == 0
230 * ZS_FULL when n == N
232 * (see: fix_fullness_group())
234 static const int fullness_threshold_frac
= 4;
238 * Size of objects stored in this class. Must be multiple
244 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
245 int pages_per_zspage
;
246 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
249 #ifdef CONFIG_ZSMALLOC_STAT
250 struct zs_size_stat stats
;
255 struct page
*fullness_list
[_ZS_NR_FULLNESS_GROUPS
];
259 * Placed within free objects to form a singly linked list.
260 * For every zspage, first_page->freelist gives head of this list.
262 * This must be power of 2 and less than or equal to ZS_ALIGN
267 * Position of next free chunk (encodes <PFN, obj_idx>)
268 * It's valid for non-allocated object
272 * Handle of allocated object.
274 unsigned long handle
;
281 struct size_class
**size_class
;
282 struct kmem_cache
*handle_cachep
;
284 gfp_t flags
; /* allocation flags used when growing pool */
285 atomic_long_t pages_allocated
;
287 #ifdef CONFIG_ZSMALLOC_STAT
288 struct dentry
*stat_dentry
;
293 * A zspage's class index and fullness group
294 * are encoded in its (first)page->mapping
296 #define CLASS_IDX_BITS 28
297 #define FULLNESS_BITS 4
298 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
299 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
301 struct mapping_area
{
302 #ifdef CONFIG_PGTABLE_MAPPING
303 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
305 char *vm_buf
; /* copy buffer for objects that span pages */
307 char *vm_addr
; /* address of kmap_atomic()'ed pages */
308 enum zs_mapmode vm_mm
; /* mapping mode */
312 static int create_handle_cache(struct zs_pool
*pool
)
314 pool
->handle_cachep
= kmem_cache_create("zs_handle", ZS_HANDLE_SIZE
,
316 return pool
->handle_cachep
? 0 : 1;
319 static void destroy_handle_cache(struct zs_pool
*pool
)
321 kmem_cache_destroy(pool
->handle_cachep
);
324 static unsigned long alloc_handle(struct zs_pool
*pool
)
326 return (unsigned long)kmem_cache_alloc(pool
->handle_cachep
,
327 pool
->flags
& ~__GFP_HIGHMEM
);
330 static void free_handle(struct zs_pool
*pool
, unsigned long handle
)
332 kmem_cache_free(pool
->handle_cachep
, (void *)handle
);
335 static void record_obj(unsigned long handle
, unsigned long obj
)
337 *(unsigned long *)handle
= obj
;
344 static void *zs_zpool_create(char *name
, gfp_t gfp
, struct zpool_ops
*zpool_ops
)
346 return zs_create_pool(name
, gfp
);
349 static void zs_zpool_destroy(void *pool
)
351 zs_destroy_pool(pool
);
354 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
355 unsigned long *handle
)
357 *handle
= zs_malloc(pool
, size
);
358 return *handle
? 0 : -1;
360 static void zs_zpool_free(void *pool
, unsigned long handle
)
362 zs_free(pool
, handle
);
365 static int zs_zpool_shrink(void *pool
, unsigned int pages
,
366 unsigned int *reclaimed
)
371 static void *zs_zpool_map(void *pool
, unsigned long handle
,
372 enum zpool_mapmode mm
)
374 enum zs_mapmode zs_mm
;
383 case ZPOOL_MM_RW
: /* fallthru */
389 return zs_map_object(pool
, handle
, zs_mm
);
391 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
393 zs_unmap_object(pool
, handle
);
396 static u64
zs_zpool_total_size(void *pool
)
398 return zs_get_total_pages(pool
) << PAGE_SHIFT
;
401 static struct zpool_driver zs_zpool_driver
= {
403 .owner
= THIS_MODULE
,
404 .create
= zs_zpool_create
,
405 .destroy
= zs_zpool_destroy
,
406 .malloc
= zs_zpool_malloc
,
407 .free
= zs_zpool_free
,
408 .shrink
= zs_zpool_shrink
,
410 .unmap
= zs_zpool_unmap
,
411 .total_size
= zs_zpool_total_size
,
414 MODULE_ALIAS("zpool-zsmalloc");
415 #endif /* CONFIG_ZPOOL */
417 static unsigned int get_maxobj_per_zspage(int size
, int pages_per_zspage
)
419 return pages_per_zspage
* PAGE_SIZE
/ size
;
422 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
423 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
425 static int is_first_page(struct page
*page
)
427 return PagePrivate(page
);
430 static int is_last_page(struct page
*page
)
432 return PagePrivate2(page
);
435 static void get_zspage_mapping(struct page
*page
, unsigned int *class_idx
,
436 enum fullness_group
*fullness
)
439 BUG_ON(!is_first_page(page
));
441 m
= (unsigned long)page
->mapping
;
442 *fullness
= m
& FULLNESS_MASK
;
443 *class_idx
= (m
>> FULLNESS_BITS
) & CLASS_IDX_MASK
;
446 static void set_zspage_mapping(struct page
*page
, unsigned int class_idx
,
447 enum fullness_group fullness
)
450 BUG_ON(!is_first_page(page
));
452 m
= ((class_idx
& CLASS_IDX_MASK
) << FULLNESS_BITS
) |
453 (fullness
& FULLNESS_MASK
);
454 page
->mapping
= (struct address_space
*)m
;
458 * zsmalloc divides the pool into various size classes where each
459 * class maintains a list of zspages where each zspage is divided
460 * into equal sized chunks. Each allocation falls into one of these
461 * classes depending on its size. This function returns index of the
462 * size class which has chunk size big enough to hold the give size.
464 static int get_size_class_index(int size
)
468 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
469 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
470 ZS_SIZE_CLASS_DELTA
);
472 return min(zs_size_classes
- 1, idx
);
475 #ifdef CONFIG_ZSMALLOC_STAT
477 static inline void zs_stat_inc(struct size_class
*class,
478 enum zs_stat_type type
, unsigned long cnt
)
480 class->stats
.objs
[type
] += cnt
;
483 static inline void zs_stat_dec(struct size_class
*class,
484 enum zs_stat_type type
, unsigned long cnt
)
486 class->stats
.objs
[type
] -= cnt
;
489 static inline unsigned long zs_stat_get(struct size_class
*class,
490 enum zs_stat_type type
)
492 return class->stats
.objs
[type
];
495 static int __init
zs_stat_init(void)
497 if (!debugfs_initialized())
500 zs_stat_root
= debugfs_create_dir("zsmalloc", NULL
);
507 static void __exit
zs_stat_exit(void)
509 debugfs_remove_recursive(zs_stat_root
);
512 static int zs_stats_size_show(struct seq_file
*s
, void *v
)
515 struct zs_pool
*pool
= s
->private;
516 struct size_class
*class;
518 unsigned long class_almost_full
, class_almost_empty
;
519 unsigned long obj_allocated
, obj_used
, pages_used
;
520 unsigned long total_class_almost_full
= 0, total_class_almost_empty
= 0;
521 unsigned long total_objs
= 0, total_used_objs
= 0, total_pages
= 0;
523 seq_printf(s
, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
524 "class", "size", "almost_full", "almost_empty",
525 "obj_allocated", "obj_used", "pages_used",
528 for (i
= 0; i
< zs_size_classes
; i
++) {
529 class = pool
->size_class
[i
];
531 if (class->index
!= i
)
534 spin_lock(&class->lock
);
535 class_almost_full
= zs_stat_get(class, CLASS_ALMOST_FULL
);
536 class_almost_empty
= zs_stat_get(class, CLASS_ALMOST_EMPTY
);
537 obj_allocated
= zs_stat_get(class, OBJ_ALLOCATED
);
538 obj_used
= zs_stat_get(class, OBJ_USED
);
539 spin_unlock(&class->lock
);
541 objs_per_zspage
= get_maxobj_per_zspage(class->size
,
542 class->pages_per_zspage
);
543 pages_used
= obj_allocated
/ objs_per_zspage
*
544 class->pages_per_zspage
;
546 seq_printf(s
, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
547 i
, class->size
, class_almost_full
, class_almost_empty
,
548 obj_allocated
, obj_used
, pages_used
,
549 class->pages_per_zspage
);
551 total_class_almost_full
+= class_almost_full
;
552 total_class_almost_empty
+= class_almost_empty
;
553 total_objs
+= obj_allocated
;
554 total_used_objs
+= obj_used
;
555 total_pages
+= pages_used
;
559 seq_printf(s
, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
560 "Total", "", total_class_almost_full
,
561 total_class_almost_empty
, total_objs
,
562 total_used_objs
, total_pages
);
567 static int zs_stats_size_open(struct inode
*inode
, struct file
*file
)
569 return single_open(file
, zs_stats_size_show
, inode
->i_private
);
572 static const struct file_operations zs_stat_size_ops
= {
573 .open
= zs_stats_size_open
,
576 .release
= single_release
,
579 static int zs_pool_stat_create(char *name
, struct zs_pool
*pool
)
581 struct dentry
*entry
;
586 entry
= debugfs_create_dir(name
, zs_stat_root
);
588 pr_warn("debugfs dir <%s> creation failed\n", name
);
591 pool
->stat_dentry
= entry
;
593 entry
= debugfs_create_file("classes", S_IFREG
| S_IRUGO
,
594 pool
->stat_dentry
, pool
, &zs_stat_size_ops
);
596 pr_warn("%s: debugfs file entry <%s> creation failed\n",
604 static void zs_pool_stat_destroy(struct zs_pool
*pool
)
606 debugfs_remove_recursive(pool
->stat_dentry
);
609 #else /* CONFIG_ZSMALLOC_STAT */
611 static inline void zs_stat_inc(struct size_class
*class,
612 enum zs_stat_type type
, unsigned long cnt
)
616 static inline void zs_stat_dec(struct size_class
*class,
617 enum zs_stat_type type
, unsigned long cnt
)
621 static inline unsigned long zs_stat_get(struct size_class
*class,
622 enum zs_stat_type type
)
627 static int __init
zs_stat_init(void)
632 static void __exit
zs_stat_exit(void)
636 static inline int zs_pool_stat_create(char *name
, struct zs_pool
*pool
)
641 static inline void zs_pool_stat_destroy(struct zs_pool
*pool
)
649 * For each size class, zspages are divided into different groups
650 * depending on how "full" they are. This was done so that we could
651 * easily find empty or nearly empty zspages when we try to shrink
652 * the pool (not yet implemented). This function returns fullness
653 * status of the given page.
655 static enum fullness_group
get_fullness_group(struct page
*page
)
657 int inuse
, max_objects
;
658 enum fullness_group fg
;
659 BUG_ON(!is_first_page(page
));
662 max_objects
= page
->objects
;
666 else if (inuse
== max_objects
)
668 else if (inuse
<= 3 * max_objects
/ fullness_threshold_frac
)
669 fg
= ZS_ALMOST_EMPTY
;
677 * Each size class maintains various freelists and zspages are assigned
678 * to one of these freelists based on the number of live objects they
679 * have. This functions inserts the given zspage into the freelist
680 * identified by <class, fullness_group>.
682 static void insert_zspage(struct page
*page
, struct size_class
*class,
683 enum fullness_group fullness
)
687 BUG_ON(!is_first_page(page
));
689 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
692 head
= &class->fullness_list
[fullness
];
694 list_add_tail(&page
->lru
, &(*head
)->lru
);
697 zs_stat_inc(class, fullness
== ZS_ALMOST_EMPTY
?
698 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
702 * This function removes the given zspage from the freelist identified
703 * by <class, fullness_group>.
705 static void remove_zspage(struct page
*page
, struct size_class
*class,
706 enum fullness_group fullness
)
710 BUG_ON(!is_first_page(page
));
712 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
715 head
= &class->fullness_list
[fullness
];
717 if (list_empty(&(*head
)->lru
))
719 else if (*head
== page
)
720 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
723 list_del_init(&page
->lru
);
724 zs_stat_dec(class, fullness
== ZS_ALMOST_EMPTY
?
725 CLASS_ALMOST_EMPTY
: CLASS_ALMOST_FULL
, 1);
729 * Each size class maintains zspages in different fullness groups depending
730 * on the number of live objects they contain. When allocating or freeing
731 * objects, the fullness status of the page can change, say, from ALMOST_FULL
732 * to ALMOST_EMPTY when freeing an object. This function checks if such
733 * a status change has occurred for the given page and accordingly moves the
734 * page from the freelist of the old fullness group to that of the new
737 static enum fullness_group
fix_fullness_group(struct size_class
*class,
741 enum fullness_group currfg
, newfg
;
743 BUG_ON(!is_first_page(page
));
745 get_zspage_mapping(page
, &class_idx
, &currfg
);
746 newfg
= get_fullness_group(page
);
750 remove_zspage(page
, class, currfg
);
751 insert_zspage(page
, class, newfg
);
752 set_zspage_mapping(page
, class_idx
, newfg
);
759 * We have to decide on how many pages to link together
760 * to form a zspage for each size class. This is important
761 * to reduce wastage due to unusable space left at end of
762 * each zspage which is given as:
763 * wastage = Zp - Zp % size_class
764 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
766 * For example, for size class of 3/8 * PAGE_SIZE, we should
767 * link together 3 PAGE_SIZE sized pages to form a zspage
768 * since then we can perfectly fit in 8 such objects.
770 static int get_pages_per_zspage(int class_size
)
772 int i
, max_usedpc
= 0;
773 /* zspage order which gives maximum used size per KB */
774 int max_usedpc_order
= 1;
776 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
780 zspage_size
= i
* PAGE_SIZE
;
781 waste
= zspage_size
% class_size
;
782 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
784 if (usedpc
> max_usedpc
) {
786 max_usedpc_order
= i
;
790 return max_usedpc_order
;
794 * A single 'zspage' is composed of many system pages which are
795 * linked together using fields in struct page. This function finds
796 * the first/head page, given any component page of a zspage.
798 static struct page
*get_first_page(struct page
*page
)
800 if (is_first_page(page
))
803 return page
->first_page
;
806 static struct page
*get_next_page(struct page
*page
)
810 if (is_last_page(page
))
812 else if (is_first_page(page
))
813 next
= (struct page
*)page_private(page
);
815 next
= list_entry(page
->lru
.next
, struct page
, lru
);
821 * Encode <page, obj_idx> as a single handle value.
822 * We use the least bit of handle for tagging.
824 static void *location_to_obj(struct page
*page
, unsigned long obj_idx
)
833 obj
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
834 obj
|= ((obj_idx
) & OBJ_INDEX_MASK
);
835 obj
<<= OBJ_TAG_BITS
;
841 * Decode <page, obj_idx> pair from the given object handle. We adjust the
842 * decoded obj_idx back to its original value since it was adjusted in
845 static void obj_to_location(unsigned long obj
, struct page
**page
,
846 unsigned long *obj_idx
)
848 obj
>>= OBJ_TAG_BITS
;
849 *page
= pfn_to_page(obj
>> OBJ_INDEX_BITS
);
850 *obj_idx
= (obj
& OBJ_INDEX_MASK
);
853 static unsigned long handle_to_obj(unsigned long handle
)
855 return *(unsigned long *)handle
;
858 static unsigned long obj_to_head(struct size_class
*class, struct page
*page
,
862 VM_BUG_ON(!is_first_page(page
));
863 return *(unsigned long *)page_private(page
);
865 return *(unsigned long *)obj
;
868 static unsigned long obj_idx_to_offset(struct page
*page
,
869 unsigned long obj_idx
, int class_size
)
871 unsigned long off
= 0;
873 if (!is_first_page(page
))
876 return off
+ obj_idx
* class_size
;
879 static inline int trypin_tag(unsigned long handle
)
881 unsigned long *ptr
= (unsigned long *)handle
;
883 return !test_and_set_bit_lock(HANDLE_PIN_BIT
, ptr
);
886 static void pin_tag(unsigned long handle
)
888 while (!trypin_tag(handle
));
891 static void unpin_tag(unsigned long handle
)
893 unsigned long *ptr
= (unsigned long *)handle
;
895 clear_bit_unlock(HANDLE_PIN_BIT
, ptr
);
898 static void reset_page(struct page
*page
)
900 clear_bit(PG_private
, &page
->flags
);
901 clear_bit(PG_private_2
, &page
->flags
);
902 set_page_private(page
, 0);
903 page
->mapping
= NULL
;
904 page
->freelist
= NULL
;
905 page_mapcount_reset(page
);
908 static void free_zspage(struct page
*first_page
)
910 struct page
*nextp
, *tmp
, *head_extra
;
912 BUG_ON(!is_first_page(first_page
));
913 BUG_ON(first_page
->inuse
);
915 head_extra
= (struct page
*)page_private(first_page
);
917 reset_page(first_page
);
918 __free_page(first_page
);
920 /* zspage with only 1 system page */
924 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
925 list_del(&nextp
->lru
);
929 reset_page(head_extra
);
930 __free_page(head_extra
);
933 /* Initialize a newly allocated zspage */
934 static void init_zspage(struct page
*first_page
, struct size_class
*class)
936 unsigned long off
= 0;
937 struct page
*page
= first_page
;
939 BUG_ON(!is_first_page(first_page
));
941 struct page
*next_page
;
942 struct link_free
*link
;
947 * page->index stores offset of first object starting
948 * in the page. For the first page, this is always 0,
949 * so we use first_page->index (aka ->freelist) to store
950 * head of corresponding zspage's freelist.
952 if (page
!= first_page
)
955 vaddr
= kmap_atomic(page
);
956 link
= (struct link_free
*)vaddr
+ off
/ sizeof(*link
);
958 while ((off
+= class->size
) < PAGE_SIZE
) {
959 link
->next
= location_to_obj(page
, i
++);
960 link
+= class->size
/ sizeof(*link
);
964 * We now come to the last (full or partial) object on this
965 * page, which must point to the first object on the next
968 next_page
= get_next_page(page
);
969 link
->next
= location_to_obj(next_page
, 0);
970 kunmap_atomic(vaddr
);
977 * Allocate a zspage for the given size class
979 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
982 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
985 * Allocate individual pages and link them together as:
986 * 1. first page->private = first sub-page
987 * 2. all sub-pages are linked together using page->lru
988 * 3. each sub-page is linked to the first page using page->first_page
990 * For each size class, First/Head pages are linked together using
991 * page->lru. Also, we set PG_private to identify the first page
992 * (i.e. no other sub-page has this flag set) and PG_private_2 to
993 * identify the last page.
996 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
999 page
= alloc_page(flags
);
1003 INIT_LIST_HEAD(&page
->lru
);
1004 if (i
== 0) { /* first page */
1005 SetPagePrivate(page
);
1006 set_page_private(page
, 0);
1008 first_page
->inuse
= 0;
1011 set_page_private(first_page
, (unsigned long)page
);
1013 page
->first_page
= first_page
;
1015 list_add(&page
->lru
, &prev_page
->lru
);
1016 if (i
== class->pages_per_zspage
- 1) /* last page */
1017 SetPagePrivate2(page
);
1021 init_zspage(first_page
, class);
1023 first_page
->freelist
= location_to_obj(first_page
, 0);
1024 /* Maximum number of objects we can store in this zspage */
1025 first_page
->objects
= class->pages_per_zspage
* PAGE_SIZE
/ class->size
;
1027 error
= 0; /* Success */
1030 if (unlikely(error
) && first_page
) {
1031 free_zspage(first_page
);
1038 static struct page
*find_get_zspage(struct size_class
*class)
1043 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
1044 page
= class->fullness_list
[i
];
1052 #ifdef CONFIG_PGTABLE_MAPPING
1053 static inline int __zs_cpu_up(struct mapping_area
*area
)
1056 * Make sure we don't leak memory if a cpu UP notification
1057 * and zs_init() race and both call zs_cpu_up() on the same cpu
1061 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
1067 static inline void __zs_cpu_down(struct mapping_area
*area
)
1070 free_vm_area(area
->vm
);
1074 static inline void *__zs_map_object(struct mapping_area
*area
,
1075 struct page
*pages
[2], int off
, int size
)
1077 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, pages
));
1078 area
->vm_addr
= area
->vm
->addr
;
1079 return area
->vm_addr
+ off
;
1082 static inline void __zs_unmap_object(struct mapping_area
*area
,
1083 struct page
*pages
[2], int off
, int size
)
1085 unsigned long addr
= (unsigned long)area
->vm_addr
;
1087 unmap_kernel_range(addr
, PAGE_SIZE
* 2);
1090 #else /* CONFIG_PGTABLE_MAPPING */
1092 static inline int __zs_cpu_up(struct mapping_area
*area
)
1095 * Make sure we don't leak memory if a cpu UP notification
1096 * and zs_init() race and both call zs_cpu_up() on the same cpu
1100 area
->vm_buf
= kmalloc(ZS_MAX_ALLOC_SIZE
, GFP_KERNEL
);
1106 static inline void __zs_cpu_down(struct mapping_area
*area
)
1108 kfree(area
->vm_buf
);
1109 area
->vm_buf
= NULL
;
1112 static void *__zs_map_object(struct mapping_area
*area
,
1113 struct page
*pages
[2], int off
, int size
)
1117 char *buf
= area
->vm_buf
;
1119 /* disable page faults to match kmap_atomic() return conditions */
1120 pagefault_disable();
1122 /* no read fastpath */
1123 if (area
->vm_mm
== ZS_MM_WO
)
1126 sizes
[0] = PAGE_SIZE
- off
;
1127 sizes
[1] = size
- sizes
[0];
1129 /* copy object to per-cpu buffer */
1130 addr
= kmap_atomic(pages
[0]);
1131 memcpy(buf
, addr
+ off
, sizes
[0]);
1132 kunmap_atomic(addr
);
1133 addr
= kmap_atomic(pages
[1]);
1134 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
1135 kunmap_atomic(addr
);
1137 return area
->vm_buf
;
1140 static void __zs_unmap_object(struct mapping_area
*area
,
1141 struct page
*pages
[2], int off
, int size
)
1147 /* no write fastpath */
1148 if (area
->vm_mm
== ZS_MM_RO
)
1153 buf
= buf
+ ZS_HANDLE_SIZE
;
1154 size
-= ZS_HANDLE_SIZE
;
1155 off
+= ZS_HANDLE_SIZE
;
1158 sizes
[0] = PAGE_SIZE
- off
;
1159 sizes
[1] = size
- sizes
[0];
1161 /* copy per-cpu buffer to object */
1162 addr
= kmap_atomic(pages
[0]);
1163 memcpy(addr
+ off
, buf
, sizes
[0]);
1164 kunmap_atomic(addr
);
1165 addr
= kmap_atomic(pages
[1]);
1166 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
1167 kunmap_atomic(addr
);
1170 /* enable page faults to match kunmap_atomic() return conditions */
1174 #endif /* CONFIG_PGTABLE_MAPPING */
1176 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
1179 int ret
, cpu
= (long)pcpu
;
1180 struct mapping_area
*area
;
1183 case CPU_UP_PREPARE
:
1184 area
= &per_cpu(zs_map_area
, cpu
);
1185 ret
= __zs_cpu_up(area
);
1187 return notifier_from_errno(ret
);
1190 case CPU_UP_CANCELED
:
1191 area
= &per_cpu(zs_map_area
, cpu
);
1192 __zs_cpu_down(area
);
1199 static struct notifier_block zs_cpu_nb
= {
1200 .notifier_call
= zs_cpu_notifier
1203 static int zs_register_cpu_notifier(void)
1205 int cpu
, uninitialized_var(ret
);
1207 cpu_notifier_register_begin();
1209 __register_cpu_notifier(&zs_cpu_nb
);
1210 for_each_online_cpu(cpu
) {
1211 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
1212 if (notifier_to_errno(ret
))
1216 cpu_notifier_register_done();
1217 return notifier_to_errno(ret
);
1220 static void zs_unregister_cpu_notifier(void)
1224 cpu_notifier_register_begin();
1226 for_each_online_cpu(cpu
)
1227 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
1228 __unregister_cpu_notifier(&zs_cpu_nb
);
1230 cpu_notifier_register_done();
1233 static void init_zs_size_classes(void)
1237 nr
= (ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) / ZS_SIZE_CLASS_DELTA
+ 1;
1238 if ((ZS_MAX_ALLOC_SIZE
- ZS_MIN_ALLOC_SIZE
) % ZS_SIZE_CLASS_DELTA
)
1241 zs_size_classes
= nr
;
1244 static bool can_merge(struct size_class
*prev
, int size
, int pages_per_zspage
)
1246 if (prev
->pages_per_zspage
!= pages_per_zspage
)
1249 if (get_maxobj_per_zspage(prev
->size
, prev
->pages_per_zspage
)
1250 != get_maxobj_per_zspage(size
, pages_per_zspage
))
1256 static bool zspage_full(struct page
*page
)
1258 BUG_ON(!is_first_page(page
));
1260 return page
->inuse
== page
->objects
;
1263 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1265 return atomic_long_read(&pool
->pages_allocated
);
1267 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1270 * zs_map_object - get address of allocated object from handle.
1271 * @pool: pool from which the object was allocated
1272 * @handle: handle returned from zs_malloc
1274 * Before using an object allocated from zs_malloc, it must be mapped using
1275 * this function. When done with the object, it must be unmapped using
1278 * Only one object can be mapped per cpu at a time. There is no protection
1279 * against nested mappings.
1281 * This function returns with preemption and page faults disabled.
1283 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1287 unsigned long obj
, obj_idx
, off
;
1289 unsigned int class_idx
;
1290 enum fullness_group fg
;
1291 struct size_class
*class;
1292 struct mapping_area
*area
;
1293 struct page
*pages
[2];
1299 * Because we use per-cpu mapping areas shared among the
1300 * pools/users, we can't allow mapping in interrupt context
1301 * because it can corrupt another users mappings.
1303 BUG_ON(in_interrupt());
1305 /* From now on, migration cannot move the object */
1308 obj
= handle_to_obj(handle
);
1309 obj_to_location(obj
, &page
, &obj_idx
);
1310 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1311 class = pool
->size_class
[class_idx
];
1312 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1314 area
= &get_cpu_var(zs_map_area
);
1316 if (off
+ class->size
<= PAGE_SIZE
) {
1317 /* this object is contained entirely within a page */
1318 area
->vm_addr
= kmap_atomic(page
);
1319 ret
= area
->vm_addr
+ off
;
1323 /* this object spans two pages */
1325 pages
[1] = get_next_page(page
);
1328 ret
= __zs_map_object(area
, pages
, off
, class->size
);
1331 ret
+= ZS_HANDLE_SIZE
;
1335 EXPORT_SYMBOL_GPL(zs_map_object
);
1337 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1340 unsigned long obj
, obj_idx
, off
;
1342 unsigned int class_idx
;
1343 enum fullness_group fg
;
1344 struct size_class
*class;
1345 struct mapping_area
*area
;
1349 obj
= handle_to_obj(handle
);
1350 obj_to_location(obj
, &page
, &obj_idx
);
1351 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1352 class = pool
->size_class
[class_idx
];
1353 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1355 area
= this_cpu_ptr(&zs_map_area
);
1356 if (off
+ class->size
<= PAGE_SIZE
)
1357 kunmap_atomic(area
->vm_addr
);
1359 struct page
*pages
[2];
1362 pages
[1] = get_next_page(page
);
1365 __zs_unmap_object(area
, pages
, off
, class->size
);
1367 put_cpu_var(zs_map_area
);
1370 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1372 static unsigned long obj_malloc(struct page
*first_page
,
1373 struct size_class
*class, unsigned long handle
)
1376 struct link_free
*link
;
1378 struct page
*m_page
;
1379 unsigned long m_objidx
, m_offset
;
1382 handle
|= OBJ_ALLOCATED_TAG
;
1383 obj
= (unsigned long)first_page
->freelist
;
1384 obj_to_location(obj
, &m_page
, &m_objidx
);
1385 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
1387 vaddr
= kmap_atomic(m_page
);
1388 link
= (struct link_free
*)vaddr
+ m_offset
/ sizeof(*link
);
1389 first_page
->freelist
= link
->next
;
1391 /* record handle in the header of allocated chunk */
1392 link
->handle
= handle
;
1394 /* record handle in first_page->private */
1395 set_page_private(first_page
, handle
);
1396 kunmap_atomic(vaddr
);
1397 first_page
->inuse
++;
1398 zs_stat_inc(class, OBJ_USED
, 1);
1405 * zs_malloc - Allocate block of given size from pool.
1406 * @pool: pool to allocate from
1407 * @size: size of block to allocate
1409 * On success, handle to the allocated object is returned,
1411 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1413 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
)
1415 unsigned long handle
, obj
;
1416 struct size_class
*class;
1417 struct page
*first_page
;
1419 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
1422 handle
= alloc_handle(pool
);
1426 /* extra space in chunk to keep the handle */
1427 size
+= ZS_HANDLE_SIZE
;
1428 class = pool
->size_class
[get_size_class_index(size
)];
1429 /* In huge class size, we store the handle into first_page->private */
1431 size
-= ZS_HANDLE_SIZE
;
1432 class = pool
->size_class
[get_size_class_index(size
)];
1435 spin_lock(&class->lock
);
1436 first_page
= find_get_zspage(class);
1439 spin_unlock(&class->lock
);
1440 first_page
= alloc_zspage(class, pool
->flags
);
1441 if (unlikely(!first_page
)) {
1442 free_handle(pool
, handle
);
1446 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
1447 atomic_long_add(class->pages_per_zspage
,
1448 &pool
->pages_allocated
);
1450 spin_lock(&class->lock
);
1451 zs_stat_inc(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1452 class->size
, class->pages_per_zspage
));
1455 obj
= obj_malloc(first_page
, class, handle
);
1456 /* Now move the zspage to another fullness group, if required */
1457 fix_fullness_group(class, first_page
);
1458 record_obj(handle
, obj
);
1459 spin_unlock(&class->lock
);
1463 EXPORT_SYMBOL_GPL(zs_malloc
);
1465 static void obj_free(struct zs_pool
*pool
, struct size_class
*class,
1468 struct link_free
*link
;
1469 struct page
*first_page
, *f_page
;
1470 unsigned long f_objidx
, f_offset
;
1473 enum fullness_group fullness
;
1477 obj
&= ~OBJ_ALLOCATED_TAG
;
1478 obj_to_location(obj
, &f_page
, &f_objidx
);
1479 first_page
= get_first_page(f_page
);
1481 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1482 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
1484 vaddr
= kmap_atomic(f_page
);
1486 /* Insert this object in containing zspage's freelist */
1487 link
= (struct link_free
*)(vaddr
+ f_offset
);
1488 link
->next
= first_page
->freelist
;
1490 set_page_private(first_page
, 0);
1491 kunmap_atomic(vaddr
);
1492 first_page
->freelist
= (void *)obj
;
1493 first_page
->inuse
--;
1494 zs_stat_dec(class, OBJ_USED
, 1);
1497 void zs_free(struct zs_pool
*pool
, unsigned long handle
)
1499 struct page
*first_page
, *f_page
;
1500 unsigned long obj
, f_objidx
;
1502 struct size_class
*class;
1503 enum fullness_group fullness
;
1505 if (unlikely(!handle
))
1509 obj
= handle_to_obj(handle
);
1510 obj_to_location(obj
, &f_page
, &f_objidx
);
1511 first_page
= get_first_page(f_page
);
1513 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1514 class = pool
->size_class
[class_idx
];
1516 spin_lock(&class->lock
);
1517 obj_free(pool
, class, obj
);
1518 fullness
= fix_fullness_group(class, first_page
);
1519 if (fullness
== ZS_EMPTY
) {
1520 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1521 class->size
, class->pages_per_zspage
));
1522 atomic_long_sub(class->pages_per_zspage
,
1523 &pool
->pages_allocated
);
1524 free_zspage(first_page
);
1526 spin_unlock(&class->lock
);
1529 free_handle(pool
, handle
);
1531 EXPORT_SYMBOL_GPL(zs_free
);
1533 static void zs_object_copy(unsigned long src
, unsigned long dst
,
1534 struct size_class
*class)
1536 struct page
*s_page
, *d_page
;
1537 unsigned long s_objidx
, d_objidx
;
1538 unsigned long s_off
, d_off
;
1539 void *s_addr
, *d_addr
;
1540 int s_size
, d_size
, size
;
1543 s_size
= d_size
= class->size
;
1545 obj_to_location(src
, &s_page
, &s_objidx
);
1546 obj_to_location(dst
, &d_page
, &d_objidx
);
1548 s_off
= obj_idx_to_offset(s_page
, s_objidx
, class->size
);
1549 d_off
= obj_idx_to_offset(d_page
, d_objidx
, class->size
);
1551 if (s_off
+ class->size
> PAGE_SIZE
)
1552 s_size
= PAGE_SIZE
- s_off
;
1554 if (d_off
+ class->size
> PAGE_SIZE
)
1555 d_size
= PAGE_SIZE
- d_off
;
1557 s_addr
= kmap_atomic(s_page
);
1558 d_addr
= kmap_atomic(d_page
);
1561 size
= min(s_size
, d_size
);
1562 memcpy(d_addr
+ d_off
, s_addr
+ s_off
, size
);
1565 if (written
== class->size
)
1568 if (s_off
+ size
>= PAGE_SIZE
) {
1569 kunmap_atomic(d_addr
);
1570 kunmap_atomic(s_addr
);
1571 s_page
= get_next_page(s_page
);
1573 s_addr
= kmap_atomic(s_page
);
1574 d_addr
= kmap_atomic(d_page
);
1575 s_size
= class->size
- written
;
1582 if (d_off
+ size
>= PAGE_SIZE
) {
1583 kunmap_atomic(d_addr
);
1584 d_page
= get_next_page(d_page
);
1586 d_addr
= kmap_atomic(d_page
);
1587 d_size
= class->size
- written
;
1595 kunmap_atomic(d_addr
);
1596 kunmap_atomic(s_addr
);
1600 * Find alloced object in zspage from index object and
1603 static unsigned long find_alloced_obj(struct page
*page
, int index
,
1604 struct size_class
*class)
1608 unsigned long handle
= 0;
1609 void *addr
= kmap_atomic(page
);
1611 if (!is_first_page(page
))
1612 offset
= page
->index
;
1613 offset
+= class->size
* index
;
1615 while (offset
< PAGE_SIZE
) {
1616 head
= obj_to_head(class, page
, addr
+ offset
);
1617 if (head
& OBJ_ALLOCATED_TAG
) {
1618 handle
= head
& ~OBJ_ALLOCATED_TAG
;
1619 if (trypin_tag(handle
))
1624 offset
+= class->size
;
1628 kunmap_atomic(addr
);
1632 struct zs_compact_control
{
1633 /* Source page for migration which could be a subpage of zspage. */
1634 struct page
*s_page
;
1635 /* Destination page for migration which should be a first page
1637 struct page
*d_page
;
1638 /* Starting object index within @s_page which used for live object
1639 * in the subpage. */
1641 /* how many of objects are migrated */
1645 static int migrate_zspage(struct zs_pool
*pool
, struct size_class
*class,
1646 struct zs_compact_control
*cc
)
1648 unsigned long used_obj
, free_obj
;
1649 unsigned long handle
;
1650 struct page
*s_page
= cc
->s_page
;
1651 struct page
*d_page
= cc
->d_page
;
1652 unsigned long index
= cc
->index
;
1653 int nr_migrated
= 0;
1657 handle
= find_alloced_obj(s_page
, index
, class);
1659 s_page
= get_next_page(s_page
);
1666 /* Stop if there is no more space */
1667 if (zspage_full(d_page
)) {
1673 used_obj
= handle_to_obj(handle
);
1674 free_obj
= obj_malloc(d_page
, class, handle
);
1675 zs_object_copy(used_obj
, free_obj
, class);
1677 record_obj(handle
, free_obj
);
1679 obj_free(pool
, class, used_obj
);
1683 /* Remember last position in this iteration */
1684 cc
->s_page
= s_page
;
1686 cc
->nr_migrated
= nr_migrated
;
1691 static struct page
*alloc_target_page(struct size_class
*class)
1696 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
1697 page
= class->fullness_list
[i
];
1699 remove_zspage(page
, class, i
);
1707 static void putback_zspage(struct zs_pool
*pool
, struct size_class
*class,
1708 struct page
*first_page
)
1711 enum fullness_group fullness
;
1713 BUG_ON(!is_first_page(first_page
));
1715 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1716 insert_zspage(first_page
, class, fullness
);
1717 fullness
= fix_fullness_group(class, first_page
);
1718 if (fullness
== ZS_EMPTY
) {
1719 zs_stat_dec(class, OBJ_ALLOCATED
, get_maxobj_per_zspage(
1720 class->size
, class->pages_per_zspage
));
1721 atomic_long_sub(class->pages_per_zspage
,
1722 &pool
->pages_allocated
);
1724 free_zspage(first_page
);
1728 static struct page
*isolate_source_page(struct size_class
*class)
1732 page
= class->fullness_list
[ZS_ALMOST_EMPTY
];
1734 remove_zspage(page
, class, ZS_ALMOST_EMPTY
);
1739 static unsigned long __zs_compact(struct zs_pool
*pool
,
1740 struct size_class
*class)
1743 struct zs_compact_control cc
;
1744 struct page
*src_page
;
1745 struct page
*dst_page
= NULL
;
1746 unsigned long nr_total_migrated
= 0;
1750 spin_lock(&class->lock
);
1751 while ((src_page
= isolate_source_page(class))) {
1753 BUG_ON(!is_first_page(src_page
));
1755 /* The goal is to migrate all live objects in source page */
1756 nr_to_migrate
= src_page
->inuse
;
1758 cc
.s_page
= src_page
;
1760 while ((dst_page
= alloc_target_page(class))) {
1761 cc
.d_page
= dst_page
;
1763 * If there is no more space in dst_page, try to
1764 * allocate another zspage.
1766 if (!migrate_zspage(pool
, class, &cc
))
1769 putback_zspage(pool
, class, dst_page
);
1770 nr_total_migrated
+= cc
.nr_migrated
;
1771 nr_to_migrate
-= cc
.nr_migrated
;
1774 /* Stop if we couldn't find slot */
1775 if (dst_page
== NULL
)
1778 putback_zspage(pool
, class, dst_page
);
1779 putback_zspage(pool
, class, src_page
);
1780 spin_unlock(&class->lock
);
1781 nr_total_migrated
+= cc
.nr_migrated
;
1783 spin_lock(&class->lock
);
1787 putback_zspage(pool
, class, src_page
);
1789 spin_unlock(&class->lock
);
1791 return nr_total_migrated
;
1794 unsigned long zs_compact(struct zs_pool
*pool
)
1797 unsigned long nr_migrated
= 0;
1798 struct size_class
*class;
1800 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1801 class = pool
->size_class
[i
];
1804 if (class->index
!= i
)
1806 nr_migrated
+= __zs_compact(pool
, class);
1813 EXPORT_SYMBOL_GPL(zs_compact
);
1816 * zs_create_pool - Creates an allocation pool to work from.
1817 * @flags: allocation flags used to allocate pool metadata
1819 * This function must be called before anything when using
1820 * the zsmalloc allocator.
1822 * On success, a pointer to the newly created pool is returned,
1825 struct zs_pool
*zs_create_pool(char *name
, gfp_t flags
)
1828 struct zs_pool
*pool
;
1829 struct size_class
*prev_class
= NULL
;
1831 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
1835 pool
->size_class
= kcalloc(zs_size_classes
, sizeof(struct size_class
*),
1837 if (!pool
->size_class
) {
1842 pool
->name
= kstrdup(name
, GFP_KERNEL
);
1846 if (create_handle_cache(pool
))
1850 * Iterate reversly, because, size of size_class that we want to use
1851 * for merging should be larger or equal to current size.
1853 for (i
= zs_size_classes
- 1; i
>= 0; i
--) {
1855 int pages_per_zspage
;
1856 struct size_class
*class;
1858 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
1859 if (size
> ZS_MAX_ALLOC_SIZE
)
1860 size
= ZS_MAX_ALLOC_SIZE
;
1861 pages_per_zspage
= get_pages_per_zspage(size
);
1864 * size_class is used for normal zsmalloc operation such
1865 * as alloc/free for that size. Although it is natural that we
1866 * have one size_class for each size, there is a chance that we
1867 * can get more memory utilization if we use one size_class for
1868 * many different sizes whose size_class have same
1869 * characteristics. So, we makes size_class point to
1870 * previous size_class if possible.
1873 if (can_merge(prev_class
, size
, pages_per_zspage
)) {
1874 pool
->size_class
[i
] = prev_class
;
1879 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
1885 class->pages_per_zspage
= pages_per_zspage
;
1886 if (pages_per_zspage
== 1 &&
1887 get_maxobj_per_zspage(size
, pages_per_zspage
) == 1)
1889 spin_lock_init(&class->lock
);
1890 pool
->size_class
[i
] = class;
1895 pool
->flags
= flags
;
1897 if (zs_pool_stat_create(name
, pool
))
1903 zs_destroy_pool(pool
);
1906 EXPORT_SYMBOL_GPL(zs_create_pool
);
1908 void zs_destroy_pool(struct zs_pool
*pool
)
1912 zs_pool_stat_destroy(pool
);
1914 for (i
= 0; i
< zs_size_classes
; i
++) {
1916 struct size_class
*class = pool
->size_class
[i
];
1921 if (class->index
!= i
)
1924 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
1925 if (class->fullness_list
[fg
]) {
1926 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
1933 destroy_handle_cache(pool
);
1934 kfree(pool
->size_class
);
1938 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
1940 static int __init
zs_init(void)
1942 int ret
= zs_register_cpu_notifier();
1947 init_zs_size_classes();
1950 zpool_register_driver(&zs_zpool_driver
);
1953 ret
= zs_stat_init();
1955 pr_err("zs stat initialization failed\n");
1962 zpool_unregister_driver(&zs_zpool_driver
);
1965 zs_unregister_cpu_notifier();
1970 static void __exit
zs_exit(void)
1973 zpool_unregister_driver(&zs_zpool_driver
);
1975 zs_unregister_cpu_notifier();
1980 module_init(zs_init
);
1981 module_exit(zs_exit
);
1983 MODULE_LICENSE("Dual BSD/GPL");
1984 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");