2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
15 * This allocator is designed for use with zcache and zram. Thus, the
16 * allocator is supposed to work well under low memory conditions. In
17 * particular, it never attempts higher order page allocation which is
18 * very likely to fail under memory pressure. On the other hand, if we
19 * just use single (0-order) pages, it would suffer from very high
20 * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
21 * an entire page. This was one of the major issues with its predecessor
24 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
25 * and links them together using various 'struct page' fields. These linked
26 * pages act as a single higher-order page i.e. an object can span 0-order
27 * page boundaries. The code refers to these linked pages as a single entity
30 * Following is how we use various fields and flags of underlying
31 * struct page(s) to form a zspage.
33 * Usage of struct page fields:
34 * page->first_page: points to the first component (0-order) page
35 * page->index (union with page->freelist): offset of the first object
36 * starting in this page. For the first page, this is
37 * always 0, so we use this field (aka freelist) to point
38 * to the first free object in zspage.
39 * page->lru: links together all component pages (except the first page)
42 * For _first_ page only:
44 * page->private (union with page->first_page): refers to the
45 * component page after the first page
46 * page->freelist: points to the first free object in zspage.
47 * Free objects are linked together using in-place
49 * page->objects: maximum number of objects we can store in this
50 * zspage (class->zspage_order * PAGE_SIZE / class->size)
51 * page->lru: links together first pages of various zspages.
52 * Basically forming list of zspages in a fullness group.
53 * page->mapping: class index and fullness group of the zspage
55 * Usage of struct page flags:
56 * PG_private: identifies the first component page
57 * PG_private2: identifies the last component page
61 #ifdef CONFIG_ZSMALLOC_DEBUG
65 #include <linux/module.h>
66 #include <linux/kernel.h>
67 #include <linux/bitops.h>
68 #include <linux/errno.h>
69 #include <linux/highmem.h>
70 #include <linux/init.h>
71 #include <linux/string.h>
72 #include <linux/slab.h>
73 #include <asm/tlbflush.h>
74 #include <asm/pgtable.h>
75 #include <linux/cpumask.h>
76 #include <linux/cpu.h>
77 #include <linux/vmalloc.h>
78 #include <linux/hardirq.h>
81 #include "zsmalloc_int.h"
84 * A zspage's class index and fullness group
85 * are encoded in its (first)page->mapping
87 #define CLASS_IDX_BITS 28
88 #define FULLNESS_BITS 4
89 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
90 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
93 * By default, zsmalloc uses a copy-based object mapping method to access
94 * allocations that span two pages. However, if a particular architecture
95 * 1) Implements local_flush_tlb_kernel_range() and 2) Performs VM mapping
96 * faster than copying, then it should be added here so that
97 * USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use page table
98 * mapping rather than copying
101 #if defined(CONFIG_ARM)
102 #define USE_PGTABLE_MAPPING
105 struct mapping_area
{
106 #ifdef USE_PGTABLE_MAPPING
107 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
109 char *vm_buf
; /* copy buffer for objects that span pages */
111 char *vm_addr
; /* address of kmap_atomic()'ed pages */
112 enum zs_mapmode vm_mm
; /* mapping mode */
116 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
117 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
119 static int is_first_page(struct page
*page
)
121 return PagePrivate(page
);
124 static int is_last_page(struct page
*page
)
126 return PagePrivate2(page
);
129 static void get_zspage_mapping(struct page
*page
, unsigned int *class_idx
,
130 enum fullness_group
*fullness
)
133 BUG_ON(!is_first_page(page
));
135 m
= (unsigned long)page
->mapping
;
136 *fullness
= m
& FULLNESS_MASK
;
137 *class_idx
= (m
>> FULLNESS_BITS
) & CLASS_IDX_MASK
;
140 static void set_zspage_mapping(struct page
*page
, unsigned int class_idx
,
141 enum fullness_group fullness
)
144 BUG_ON(!is_first_page(page
));
146 m
= ((class_idx
& CLASS_IDX_MASK
) << FULLNESS_BITS
) |
147 (fullness
& FULLNESS_MASK
);
148 page
->mapping
= (struct address_space
*)m
;
151 static int get_size_class_index(int size
)
155 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
156 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
157 ZS_SIZE_CLASS_DELTA
);
162 static enum fullness_group
get_fullness_group(struct page
*page
)
164 int inuse
, max_objects
;
165 enum fullness_group fg
;
166 BUG_ON(!is_first_page(page
));
169 max_objects
= page
->objects
;
173 else if (inuse
== max_objects
)
175 else if (inuse
<= max_objects
/ fullness_threshold_frac
)
176 fg
= ZS_ALMOST_EMPTY
;
183 static void insert_zspage(struct page
*page
, struct size_class
*class,
184 enum fullness_group fullness
)
188 BUG_ON(!is_first_page(page
));
190 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
193 head
= &class->fullness_list
[fullness
];
195 list_add_tail(&page
->lru
, &(*head
)->lru
);
200 static void remove_zspage(struct page
*page
, struct size_class
*class,
201 enum fullness_group fullness
)
205 BUG_ON(!is_first_page(page
));
207 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
210 head
= &class->fullness_list
[fullness
];
212 if (list_empty(&(*head
)->lru
))
214 else if (*head
== page
)
215 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
218 list_del_init(&page
->lru
);
221 static enum fullness_group
fix_fullness_group(struct zs_pool
*pool
,
225 struct size_class
*class;
226 enum fullness_group currfg
, newfg
;
228 BUG_ON(!is_first_page(page
));
230 get_zspage_mapping(page
, &class_idx
, &currfg
);
231 newfg
= get_fullness_group(page
);
235 class = &pool
->size_class
[class_idx
];
236 remove_zspage(page
, class, currfg
);
237 insert_zspage(page
, class, newfg
);
238 set_zspage_mapping(page
, class_idx
, newfg
);
245 * We have to decide on how many pages to link together
246 * to form a zspage for each size class. This is important
247 * to reduce wastage due to unusable space left at end of
248 * each zspage which is given as:
249 * wastage = Zp - Zp % size_class
250 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
252 * For example, for size class of 3/8 * PAGE_SIZE, we should
253 * link together 3 PAGE_SIZE sized pages to form a zspage
254 * since then we can perfectly fit in 8 such objects.
256 static int get_pages_per_zspage(int class_size
)
258 int i
, max_usedpc
= 0;
259 /* zspage order which gives maximum used size per KB */
260 int max_usedpc_order
= 1;
262 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
266 zspage_size
= i
* PAGE_SIZE
;
267 waste
= zspage_size
% class_size
;
268 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
270 if (usedpc
> max_usedpc
) {
272 max_usedpc_order
= i
;
276 return max_usedpc_order
;
280 * A single 'zspage' is composed of many system pages which are
281 * linked together using fields in struct page. This function finds
282 * the first/head page, given any component page of a zspage.
284 static struct page
*get_first_page(struct page
*page
)
286 if (is_first_page(page
))
289 return page
->first_page
;
292 static struct page
*get_next_page(struct page
*page
)
296 if (is_last_page(page
))
298 else if (is_first_page(page
))
299 next
= (struct page
*)page
->private;
301 next
= list_entry(page
->lru
.next
, struct page
, lru
);
306 /* Encode <page, obj_idx> as a single handle value */
307 static void *obj_location_to_handle(struct page
*page
, unsigned long obj_idx
)
309 unsigned long handle
;
316 handle
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
317 handle
|= (obj_idx
& OBJ_INDEX_MASK
);
319 return (void *)handle
;
322 /* Decode <page, obj_idx> pair from the given object handle */
323 static void obj_handle_to_location(unsigned long handle
, struct page
**page
,
324 unsigned long *obj_idx
)
326 *page
= pfn_to_page(handle
>> OBJ_INDEX_BITS
);
327 *obj_idx
= handle
& OBJ_INDEX_MASK
;
330 static unsigned long obj_idx_to_offset(struct page
*page
,
331 unsigned long obj_idx
, int class_size
)
333 unsigned long off
= 0;
335 if (!is_first_page(page
))
338 return off
+ obj_idx
* class_size
;
341 static void reset_page(struct page
*page
)
343 clear_bit(PG_private
, &page
->flags
);
344 clear_bit(PG_private_2
, &page
->flags
);
345 set_page_private(page
, 0);
346 page
->mapping
= NULL
;
347 page
->freelist
= NULL
;
348 reset_page_mapcount(page
);
351 static void free_zspage(struct page
*first_page
)
353 struct page
*nextp
, *tmp
, *head_extra
;
355 BUG_ON(!is_first_page(first_page
));
356 BUG_ON(first_page
->inuse
);
358 head_extra
= (struct page
*)page_private(first_page
);
360 reset_page(first_page
);
361 __free_page(first_page
);
363 /* zspage with only 1 system page */
367 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
368 list_del(&nextp
->lru
);
372 reset_page(head_extra
);
373 __free_page(head_extra
);
376 /* Initialize a newly allocated zspage */
377 static void init_zspage(struct page
*first_page
, struct size_class
*class)
379 unsigned long off
= 0;
380 struct page
*page
= first_page
;
382 BUG_ON(!is_first_page(first_page
));
384 struct page
*next_page
;
385 struct link_free
*link
;
386 unsigned int i
, objs_on_page
;
389 * page->index stores offset of first object starting
390 * in the page. For the first page, this is always 0,
391 * so we use first_page->index (aka ->freelist) to store
392 * head of corresponding zspage's freelist.
394 if (page
!= first_page
)
397 link
= (struct link_free
*)kmap_atomic(page
) +
399 objs_on_page
= (PAGE_SIZE
- off
) / class->size
;
401 for (i
= 1; i
<= objs_on_page
; i
++) {
403 if (off
< PAGE_SIZE
) {
404 link
->next
= obj_location_to_handle(page
, i
);
405 link
+= class->size
/ sizeof(*link
);
410 * We now come to the last (full or partial) object on this
411 * page, which must point to the first object on the next
414 next_page
= get_next_page(page
);
415 link
->next
= obj_location_to_handle(next_page
, 0);
418 off
= (off
+ class->size
) % PAGE_SIZE
;
423 * Allocate a zspage for the given size class
425 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
428 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
431 * Allocate individual pages and link them together as:
432 * 1. first page->private = first sub-page
433 * 2. all sub-pages are linked together using page->lru
434 * 3. each sub-page is linked to the first page using page->first_page
436 * For each size class, First/Head pages are linked together using
437 * page->lru. Also, we set PG_private to identify the first page
438 * (i.e. no other sub-page has this flag set) and PG_private_2 to
439 * identify the last page.
442 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
445 page
= alloc_page(flags
);
449 INIT_LIST_HEAD(&page
->lru
);
450 if (i
== 0) { /* first page */
451 SetPagePrivate(page
);
452 set_page_private(page
, 0);
454 first_page
->inuse
= 0;
457 first_page
->private = (unsigned long)page
;
459 page
->first_page
= first_page
;
461 list_add(&page
->lru
, &prev_page
->lru
);
462 if (i
== class->pages_per_zspage
- 1) /* last page */
463 SetPagePrivate2(page
);
467 init_zspage(first_page
, class);
469 first_page
->freelist
= obj_location_to_handle(first_page
, 0);
470 /* Maximum number of objects we can store in this zspage */
471 first_page
->objects
= class->pages_per_zspage
* PAGE_SIZE
/ class->size
;
473 error
= 0; /* Success */
476 if (unlikely(error
) && first_page
) {
477 free_zspage(first_page
);
484 static struct page
*find_get_zspage(struct size_class
*class)
489 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
490 page
= class->fullness_list
[i
];
498 #ifdef USE_PGTABLE_MAPPING
499 static inline int __zs_cpu_up(struct mapping_area
*area
)
502 * Make sure we don't leak memory if a cpu UP notification
503 * and zs_init() race and both call zs_cpu_up() on the same cpu
507 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
513 static inline void __zs_cpu_down(struct mapping_area
*area
)
516 free_vm_area(area
->vm
);
520 static inline void *__zs_map_object(struct mapping_area
*area
,
521 struct page
*pages
[2], int off
, int size
)
523 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, &pages
));
524 area
->vm_addr
= area
->vm
->addr
;
525 return area
->vm_addr
+ off
;
528 static inline void __zs_unmap_object(struct mapping_area
*area
,
529 struct page
*pages
[2], int off
, int size
)
531 unsigned long addr
= (unsigned long)area
->vm_addr
;
532 unsigned long end
= addr
+ (PAGE_SIZE
* 2);
534 flush_cache_vunmap(addr
, end
);
535 unmap_kernel_range_noflush(addr
, PAGE_SIZE
* 2);
536 local_flush_tlb_kernel_range(addr
, end
);
539 #else /* USE_PGTABLE_MAPPING */
541 static inline int __zs_cpu_up(struct mapping_area
*area
)
544 * Make sure we don't leak memory if a cpu UP notification
545 * and zs_init() race and both call zs_cpu_up() on the same cpu
549 area
->vm_buf
= (char *)__get_free_page(GFP_KERNEL
);
555 static inline void __zs_cpu_down(struct mapping_area
*area
)
558 free_page((unsigned long)area
->vm_buf
);
562 static void *__zs_map_object(struct mapping_area
*area
,
563 struct page
*pages
[2], int off
, int size
)
567 char *buf
= area
->vm_buf
;
569 /* disable page faults to match kmap_atomic() return conditions */
572 /* no read fastpath */
573 if (area
->vm_mm
== ZS_MM_WO
)
576 sizes
[0] = PAGE_SIZE
- off
;
577 sizes
[1] = size
- sizes
[0];
579 /* copy object to per-cpu buffer */
580 addr
= kmap_atomic(pages
[0]);
581 memcpy(buf
, addr
+ off
, sizes
[0]);
583 addr
= kmap_atomic(pages
[1]);
584 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
590 static void __zs_unmap_object(struct mapping_area
*area
,
591 struct page
*pages
[2], int off
, int size
)
595 char *buf
= area
->vm_buf
;
597 /* no write fastpath */
598 if (area
->vm_mm
== ZS_MM_RO
)
601 sizes
[0] = PAGE_SIZE
- off
;
602 sizes
[1] = size
- sizes
[0];
604 /* copy per-cpu buffer to object */
605 addr
= kmap_atomic(pages
[0]);
606 memcpy(addr
+ off
, buf
, sizes
[0]);
608 addr
= kmap_atomic(pages
[1]);
609 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
613 /* enable page faults to match kunmap_atomic() return conditions */
617 #endif /* USE_PGTABLE_MAPPING */
619 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
622 int ret
, cpu
= (long)pcpu
;
623 struct mapping_area
*area
;
627 area
= &per_cpu(zs_map_area
, cpu
);
628 ret
= __zs_cpu_up(area
);
630 return notifier_from_errno(ret
);
633 case CPU_UP_CANCELED
:
634 area
= &per_cpu(zs_map_area
, cpu
);
642 static struct notifier_block zs_cpu_nb
= {
643 .notifier_call
= zs_cpu_notifier
646 static void zs_exit(void)
650 for_each_online_cpu(cpu
)
651 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
652 unregister_cpu_notifier(&zs_cpu_nb
);
655 static int zs_init(void)
659 register_cpu_notifier(&zs_cpu_nb
);
660 for_each_online_cpu(cpu
) {
661 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
662 if (notifier_to_errno(ret
))
668 return notifier_to_errno(ret
);
671 struct zs_pool
*zs_create_pool(const char *name
, gfp_t flags
)
674 struct zs_pool
*pool
;
679 ovhd_size
= roundup(sizeof(*pool
), PAGE_SIZE
);
680 pool
= kzalloc(ovhd_size
, GFP_KERNEL
);
684 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
686 struct size_class
*class;
688 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
689 if (size
> ZS_MAX_ALLOC_SIZE
)
690 size
= ZS_MAX_ALLOC_SIZE
;
692 class = &pool
->size_class
[i
];
695 spin_lock_init(&class->lock
);
696 class->pages_per_zspage
= get_pages_per_zspage(size
);
705 EXPORT_SYMBOL_GPL(zs_create_pool
);
707 void zs_destroy_pool(struct zs_pool
*pool
)
711 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
713 struct size_class
*class = &pool
->size_class
[i
];
715 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
716 if (class->fullness_list
[fg
]) {
717 pr_info("Freeing non-empty class with size "
718 "%db, fullness group %d\n",
725 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
728 * zs_malloc - Allocate block of given size from pool.
729 * @pool: pool to allocate from
730 * @size: size of block to allocate
732 * On success, handle to the allocated object is returned,
734 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
736 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
)
739 struct link_free
*link
;
741 struct size_class
*class;
743 struct page
*first_page
, *m_page
;
744 unsigned long m_objidx
, m_offset
;
746 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
749 class_idx
= get_size_class_index(size
);
750 class = &pool
->size_class
[class_idx
];
751 BUG_ON(class_idx
!= class->index
);
753 spin_lock(&class->lock
);
754 first_page
= find_get_zspage(class);
757 spin_unlock(&class->lock
);
758 first_page
= alloc_zspage(class, pool
->flags
);
759 if (unlikely(!first_page
))
762 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
763 spin_lock(&class->lock
);
764 class->pages_allocated
+= class->pages_per_zspage
;
767 obj
= (unsigned long)first_page
->freelist
;
768 obj_handle_to_location(obj
, &m_page
, &m_objidx
);
769 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
771 link
= (struct link_free
*)kmap_atomic(m_page
) +
772 m_offset
/ sizeof(*link
);
773 first_page
->freelist
= link
->next
;
774 memset(link
, POISON_INUSE
, sizeof(*link
));
778 /* Now move the zspage to another fullness group, if required */
779 fix_fullness_group(pool
, first_page
);
780 spin_unlock(&class->lock
);
784 EXPORT_SYMBOL_GPL(zs_malloc
);
786 void zs_free(struct zs_pool
*pool
, unsigned long obj
)
788 struct link_free
*link
;
789 struct page
*first_page
, *f_page
;
790 unsigned long f_objidx
, f_offset
;
793 struct size_class
*class;
794 enum fullness_group fullness
;
799 obj_handle_to_location(obj
, &f_page
, &f_objidx
);
800 first_page
= get_first_page(f_page
);
802 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
803 class = &pool
->size_class
[class_idx
];
804 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
806 spin_lock(&class->lock
);
808 /* Insert this object in containing zspage's freelist */
809 link
= (struct link_free
*)((unsigned char *)kmap_atomic(f_page
)
811 link
->next
= first_page
->freelist
;
813 first_page
->freelist
= (void *)obj
;
816 fullness
= fix_fullness_group(pool
, first_page
);
818 if (fullness
== ZS_EMPTY
)
819 class->pages_allocated
-= class->pages_per_zspage
;
821 spin_unlock(&class->lock
);
823 if (fullness
== ZS_EMPTY
)
824 free_zspage(first_page
);
826 EXPORT_SYMBOL_GPL(zs_free
);
829 * zs_map_object - get address of allocated object from handle.
830 * @pool: pool from which the object was allocated
831 * @handle: handle returned from zs_malloc
833 * Before using an object allocated from zs_malloc, it must be mapped using
834 * this function. When done with the object, it must be unmapped using
837 * Only one object can be mapped per cpu at a time. There is no protection
838 * against nested mappings.
840 * This function returns with preemption and page faults disabled.
842 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
846 unsigned long obj_idx
, off
;
848 unsigned int class_idx
;
849 enum fullness_group fg
;
850 struct size_class
*class;
851 struct mapping_area
*area
;
852 struct page
*pages
[2];
857 * Because we use per-cpu mapping areas shared among the
858 * pools/users, we can't allow mapping in interrupt context
859 * because it can corrupt another users mappings.
861 BUG_ON(in_interrupt());
863 obj_handle_to_location(handle
, &page
, &obj_idx
);
864 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
865 class = &pool
->size_class
[class_idx
];
866 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
868 area
= &get_cpu_var(zs_map_area
);
870 if (off
+ class->size
<= PAGE_SIZE
) {
871 /* this object is contained entirely within a page */
872 area
->vm_addr
= kmap_atomic(page
);
873 return area
->vm_addr
+ off
;
876 /* this object spans two pages */
878 pages
[1] = get_next_page(page
);
881 return __zs_map_object(area
, pages
, off
, class->size
);
883 EXPORT_SYMBOL_GPL(zs_map_object
);
885 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
888 unsigned long obj_idx
, off
;
890 unsigned int class_idx
;
891 enum fullness_group fg
;
892 struct size_class
*class;
893 struct mapping_area
*area
;
897 obj_handle_to_location(handle
, &page
, &obj_idx
);
898 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
899 class = &pool
->size_class
[class_idx
];
900 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
902 area
= &__get_cpu_var(zs_map_area
);
903 if (off
+ class->size
<= PAGE_SIZE
)
904 kunmap_atomic(area
->vm_addr
);
906 struct page
*pages
[2];
909 pages
[1] = get_next_page(page
);
912 __zs_unmap_object(area
, pages
, off
, class->size
);
914 put_cpu_var(zs_map_area
);
916 EXPORT_SYMBOL_GPL(zs_unmap_object
);
918 u64
zs_get_total_size_bytes(struct zs_pool
*pool
)
923 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++)
924 npages
+= pool
->size_class
[i
].pages_allocated
;
926 return npages
<< PAGE_SHIFT
;
928 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes
);
930 module_init(zs_init
);
931 module_exit(zs_exit
);
933 MODULE_LICENSE("Dual BSD/GPL");
934 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
This page took 0.059513 seconds and 6 git commands to generate.