Merge branches 'acpica', 'acpidump', 'intel-idle', 'misc', 'module_acpi_driver-simpli...
[deliverable/linux.git] / drivers / staging / zsmalloc / zsmalloc_int.h
CommitLineData
61989a80
NG
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13#ifndef _ZS_MALLOC_INT_H_
14#define _ZS_MALLOC_INT_H_
15
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/types.h>
19
20/*
21 * This must be power of 2 and greater than of equal to sizeof(link_free).
22 * These two conditions ensure that any 'struct link_free' itself doesn't
23 * span more than 1 page which avoids complex case of mapping 2 pages simply
24 * to restore link_free pointer values.
25 */
26#define ZS_ALIGN 8
27
84d4faab
SJ
28/*
29 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
30 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
31 */
32#define ZS_MAX_ZSPAGE_ORDER 2
33#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
34
aafefe93
SJ
35/*
36 * Object location (<PFN>, <obj_idx>) is encoded as
37 * as single (void *) handle value.
38 *
39 * Note that object index <obj_idx> is relative to system
40 * page <PFN> it is stored in, so for each sub-page belonging
41 * to a zspage, obj_idx starts with 0.
6e00ec00
SJ
42 *
43 * This is made more complicated by various memory models and PAE.
44 */
45
46#ifndef MAX_PHYSMEM_BITS
47#ifdef CONFIG_HIGHMEM64G
48#define MAX_PHYSMEM_BITS 36
49#else /* !CONFIG_HIGHMEM64G */
50/*
51 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
52 * be PAGE_SHIFT
aafefe93 53 */
6e00ec00
SJ
54#define MAX_PHYSMEM_BITS BITS_PER_LONG
55#endif
56#endif
aafefe93
SJ
57#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
58#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
59#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
60
b9ed4f6c 61#define MAX(a, b) ((a) >= (b) ? (a) : (b))
61989a80 62/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
b9ed4f6c
SJ
63#define ZS_MIN_ALLOC_SIZE \
64 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
61989a80
NG
65#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
66
67/*
68 * On systems with 4K page size, this gives 254 size classes! There is a
69 * trader-off here:
70 * - Large number of size classes is potentially wasteful as free page are
71 * spread across these classes
72 * - Small number of size classes causes large internal fragmentation
73 * - Probably its better to use specific size classes (empirically
74 * determined). NOTE: all those class sizes must be set as multiple of
75 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
76 *
77 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
78 * (reason above)
79 */
80#define ZS_SIZE_CLASS_DELTA 16
81#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
82 ZS_SIZE_CLASS_DELTA + 1)
83
61989a80
NG
84/*
85 * We do not maintain any list for completely empty or full pages
86 */
87enum fullness_group {
88 ZS_ALMOST_FULL,
89 ZS_ALMOST_EMPTY,
90 _ZS_NR_FULLNESS_GROUPS,
91
92 ZS_EMPTY,
93 ZS_FULL
94};
95
96/*
97 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
98 * n <= N / f, where
99 * n = number of allocated objects
100 * N = total number of objects zspage can store
101 * f = 1/fullness_threshold_frac
102 *
103 * Similarly, we assign zspage to:
104 * ZS_ALMOST_FULL when n > N / f
105 * ZS_EMPTY when n == 0
106 * ZS_FULL when n == N
107 *
108 * (see: fix_fullness_group())
109 */
110static const int fullness_threshold_frac = 4;
111
112struct mapping_area {
5f601902
SJ
113 char *vm_buf; /* copy buffer for objects that span pages */
114 char *vm_addr; /* address of kmap_atomic()'ed pages */
b7418510 115 enum zs_mapmode vm_mm; /* mapping mode */
61989a80
NG
116};
117
118struct size_class {
119 /*
120 * Size of objects stored in this class. Must be multiple
121 * of ZS_ALIGN.
122 */
123 int size;
124 unsigned int index;
125
126 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
2e3b6154 127 int pages_per_zspage;
61989a80
NG
128
129 spinlock_t lock;
130
131 /* stats */
132 u64 pages_allocated;
133
134 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
135};
136
137/*
138 * Placed within free objects to form a singly linked list.
139 * For every zspage, first_page->freelist gives head of this list.
140 *
141 * This must be power of 2 and less than or equal to ZS_ALIGN
142 */
143struct link_free {
144 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
145 void *next;
146};
147
148struct zs_pool {
149 struct size_class size_class[ZS_SIZE_CLASSES];
150
151 gfp_t flags; /* allocation flags used when growing pool */
152 const char *name;
153};
154
155#endif
This page took 0.096972 seconds and 5 git commands to generate.