Merge branch 'fortglx/3.7/time' of git://git.linaro.org/people/jstultz/linux into...
[deliverable/linux.git] / include / linux / slab_def.h
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
3
4 /*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17
18 /*
19 * struct kmem_cache
20 *
21 * manages a cache.
22 */
23
24 struct kmem_cache {
25 /* 1) Cache tunables. Protected by cache_chain_mutex */
26 unsigned int batchcount;
27 unsigned int limit;
28 unsigned int shared;
29
30 unsigned int size;
31 u32 reciprocal_buffer_size;
32 /* 2) touched by every alloc & free from the backend */
33
34 unsigned int flags; /* constant flags */
35 unsigned int num; /* # of objs per slab */
36
37 /* 3) cache_grow/shrink */
38 /* order of pgs per slab (2^n) */
39 unsigned int gfporder;
40
41 /* force GFP flags, e.g. GFP_DMA */
42 gfp_t allocflags;
43
44 size_t colour; /* cache colouring range */
45 unsigned int colour_off; /* colour offset */
46 struct kmem_cache *slabp_cache;
47 unsigned int slab_size;
48 unsigned int dflags; /* dynamic flags */
49
50 /* constructor func */
51 void (*ctor)(void *obj);
52
53 /* 4) cache creation/removal */
54 const char *name;
55 struct list_head list;
56 int refcount;
57 int object_size;
58 int align;
59
60 /* 5) statistics */
61 #ifdef CONFIG_DEBUG_SLAB
62 unsigned long num_active;
63 unsigned long num_allocations;
64 unsigned long high_mark;
65 unsigned long grown;
66 unsigned long reaped;
67 unsigned long errors;
68 unsigned long max_freeable;
69 unsigned long node_allocs;
70 unsigned long node_frees;
71 unsigned long node_overflow;
72 atomic_t allochit;
73 atomic_t allocmiss;
74 atomic_t freehit;
75 atomic_t freemiss;
76
77 /*
78 * If debugging is enabled, then the allocator can add additional
79 * fields and/or padding to every object. size contains the total
80 * object size including these internal fields, the following two
81 * variables contain the offset to the user object and its size.
82 */
83 int obj_offset;
84 #endif /* CONFIG_DEBUG_SLAB */
85
86 /* 6) per-cpu/per-node data, touched during every alloc/free */
87 /*
88 * We put array[] at the end of kmem_cache, because we want to size
89 * this array to nr_cpu_ids slots instead of NR_CPUS
90 * (see kmem_cache_init())
91 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of cpus.
93 */
94 struct kmem_list3 **nodelists;
95 struct array_cache *array[NR_CPUS];
96 /*
97 * Do not add fields after array[]
98 */
99 };
100
101 /* Size description struct for general caches. */
102 struct cache_sizes {
103 size_t cs_size;
104 struct kmem_cache *cs_cachep;
105 #ifdef CONFIG_ZONE_DMA
106 struct kmem_cache *cs_dmacachep;
107 #endif
108 };
109 extern struct cache_sizes malloc_sizes[];
110
111 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
112 void *__kmalloc(size_t size, gfp_t flags);
113
114 #ifdef CONFIG_TRACING
115 extern void *kmem_cache_alloc_trace(size_t size,
116 struct kmem_cache *cachep, gfp_t flags);
117 extern size_t slab_buffer_size(struct kmem_cache *cachep);
118 #else
119 static __always_inline void *
120 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
121 {
122 return kmem_cache_alloc(cachep, flags);
123 }
124 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
125 {
126 return 0;
127 }
128 #endif
129
130 static __always_inline void *kmalloc(size_t size, gfp_t flags)
131 {
132 struct kmem_cache *cachep;
133 void *ret;
134
135 if (__builtin_constant_p(size)) {
136 int i = 0;
137
138 if (!size)
139 return ZERO_SIZE_PTR;
140
141 #define CACHE(x) \
142 if (size <= x) \
143 goto found; \
144 else \
145 i++;
146 #include <linux/kmalloc_sizes.h>
147 #undef CACHE
148 return NULL;
149 found:
150 #ifdef CONFIG_ZONE_DMA
151 if (flags & GFP_DMA)
152 cachep = malloc_sizes[i].cs_dmacachep;
153 else
154 #endif
155 cachep = malloc_sizes[i].cs_cachep;
156
157 ret = kmem_cache_alloc_trace(size, cachep, flags);
158
159 return ret;
160 }
161 return __kmalloc(size, flags);
162 }
163
164 #ifdef CONFIG_NUMA
165 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
166 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
167
168 #ifdef CONFIG_TRACING
169 extern void *kmem_cache_alloc_node_trace(size_t size,
170 struct kmem_cache *cachep,
171 gfp_t flags,
172 int nodeid);
173 #else
174 static __always_inline void *
175 kmem_cache_alloc_node_trace(size_t size,
176 struct kmem_cache *cachep,
177 gfp_t flags,
178 int nodeid)
179 {
180 return kmem_cache_alloc_node(cachep, flags, nodeid);
181 }
182 #endif
183
184 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
185 {
186 struct kmem_cache *cachep;
187
188 if (__builtin_constant_p(size)) {
189 int i = 0;
190
191 if (!size)
192 return ZERO_SIZE_PTR;
193
194 #define CACHE(x) \
195 if (size <= x) \
196 goto found; \
197 else \
198 i++;
199 #include <linux/kmalloc_sizes.h>
200 #undef CACHE
201 return NULL;
202 found:
203 #ifdef CONFIG_ZONE_DMA
204 if (flags & GFP_DMA)
205 cachep = malloc_sizes[i].cs_dmacachep;
206 else
207 #endif
208 cachep = malloc_sizes[i].cs_cachep;
209
210 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
211 }
212 return __kmalloc_node(size, flags, node);
213 }
214
215 #endif /* CONFIG_NUMA */
216
217 #endif /* _LINUX_SLAB_DEF_H */
This page took 0.040104 seconds and 6 git commands to generate.