Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
2e892f43 CL |
2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
3 | * | |
cde53535 | 4 | * (C) SGI 2006, Christoph Lameter |
2e892f43 CL |
5 | * Cleaned up and restructured to ease the addition of alternative |
6 | * implementations of SLAB allocators. | |
f1b6eb6e CL |
7 | * (C) Linux Foundation 2008-2013 |
8 | * Unified interface for all slab allocators | |
1da177e4 LT |
9 | */ |
10 | ||
11 | #ifndef _LINUX_SLAB_H | |
12 | #define _LINUX_SLAB_H | |
13 | ||
1b1cec4b | 14 | #include <linux/gfp.h> |
1b1cec4b | 15 | #include <linux/types.h> |
1f458cbf GC |
16 | #include <linux/workqueue.h> |
17 | ||
1da177e4 | 18 | |
2e892f43 CL |
19 | /* |
20 | * Flags to pass to kmem_cache_create(). | |
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | |
1da177e4 | 22 | */ |
55935a34 | 23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
55935a34 CL |
24 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
25 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | |
26 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | |
2e892f43 | 27 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
2e892f43 | 28 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
2e892f43 | 29 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
d7de4c1d PZ |
30 | /* |
31 | * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! | |
32 | * | |
33 | * This delays freeing the SLAB page by a grace period, it does _NOT_ | |
34 | * delay object freeing. This means that if you do kmem_cache_free() | |
35 | * that memory location is free to be reused at any time. Thus it may | |
36 | * be possible to see another object there in the same RCU grace period. | |
37 | * | |
38 | * This feature only ensures the memory location backing the object | |
39 | * stays valid, the trick to using this is relying on an independent | |
40 | * object validation pass. Something like: | |
41 | * | |
42 | * rcu_read_lock() | |
43 | * again: | |
44 | * obj = lockless_lookup(key); | |
45 | * if (obj) { | |
46 | * if (!try_get_ref(obj)) // might fail for free objects | |
47 | * goto again; | |
48 | * | |
49 | * if (obj->key != key) { // not the object we expected | |
50 | * put_ref(obj); | |
51 | * goto again; | |
52 | * } | |
53 | * } | |
54 | * rcu_read_unlock(); | |
55 | * | |
56 | * See also the comment on struct slab_rcu in mm/slab.c. | |
57 | */ | |
2e892f43 | 58 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
101a5001 | 59 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
81819f0f | 60 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
1da177e4 | 61 | |
30327acf TG |
62 | /* Flag to prevent checks on free */ |
63 | #ifdef CONFIG_DEBUG_OBJECTS | |
64 | # define SLAB_DEBUG_OBJECTS 0x00400000UL | |
65 | #else | |
66 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | |
67 | #endif | |
68 | ||
d5cff635 CM |
69 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
70 | ||
2dff4405 VN |
71 | /* Don't track use of uninitialized memory */ |
72 | #ifdef CONFIG_KMEMCHECK | |
73 | # define SLAB_NOTRACK 0x01000000UL | |
74 | #else | |
75 | # define SLAB_NOTRACK 0x00000000UL | |
76 | #endif | |
4c13dd3b DM |
77 | #ifdef CONFIG_FAILSLAB |
78 | # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ | |
79 | #else | |
80 | # define SLAB_FAILSLAB 0x00000000UL | |
81 | #endif | |
2dff4405 | 82 | |
e12ba74d MG |
83 | /* The following flags affect the page allocator grouping pages by mobility */ |
84 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | |
85 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | |
6cb8f913 CL |
86 | /* |
87 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | |
88 | * | |
89 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | |
90 | * | |
91 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | |
92 | * Both make kfree a no-op. | |
93 | */ | |
94 | #define ZERO_SIZE_PTR ((void *)16) | |
95 | ||
1d4ec7b1 | 96 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
6cb8f913 CL |
97 | (unsigned long)ZERO_SIZE_PTR) |
98 | ||
f1b6eb6e | 99 | #include <linux/kmemleak.h> |
3b0efdfa | 100 | |
2633d7a0 | 101 | struct mem_cgroup; |
2e892f43 CL |
102 | /* |
103 | * struct kmem_cache related prototypes | |
104 | */ | |
105 | void __init kmem_cache_init(void); | |
81819f0f | 106 | int slab_is_available(void); |
1da177e4 | 107 | |
2e892f43 | 108 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
ebe29738 | 109 | unsigned long, |
51cc5068 | 110 | void (*)(void *)); |
2633d7a0 GC |
111 | struct kmem_cache * |
112 | kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, | |
943a451a | 113 | unsigned long, void (*)(void *), struct kmem_cache *); |
2e892f43 CL |
114 | void kmem_cache_destroy(struct kmem_cache *); |
115 | int kmem_cache_shrink(struct kmem_cache *); | |
2e892f43 | 116 | void kmem_cache_free(struct kmem_cache *, void *); |
2e892f43 | 117 | |
0a31bd5f CL |
118 | /* |
119 | * Please use this macro to create slab caches. Simply specify the | |
120 | * name of the structure and maybe some flags that are listed above. | |
121 | * | |
122 | * The alignment of the struct determines object alignment. If you | |
123 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration | |
124 | * then the objects will be properly aligned in SMP configurations. | |
125 | */ | |
126 | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | |
127 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
20c2df83 | 128 | (__flags), NULL) |
0a31bd5f | 129 | |
34504667 CL |
130 | /* |
131 | * Common kmalloc functions provided by all allocators | |
132 | */ | |
133 | void * __must_check __krealloc(const void *, size_t, gfp_t); | |
134 | void * __must_check krealloc(const void *, size_t, gfp_t); | |
135 | void kfree(const void *); | |
136 | void kzfree(const void *); | |
137 | size_t ksize(const void *); | |
138 | ||
c601fd69 CL |
139 | /* |
140 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | |
141 | * alignment larger than the alignment of a 64-bit integer. | |
142 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. | |
143 | */ | |
144 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | |
145 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | |
146 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | |
147 | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) | |
148 | #else | |
149 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | |
150 | #endif | |
151 | ||
ce6a5026 CL |
152 | #ifdef CONFIG_SLOB |
153 | /* | |
154 | * Common fields provided in kmem_cache by all slab allocators | |
155 | * This struct is either used directly by the allocator (SLOB) | |
156 | * or the allocator must include definitions for all fields | |
157 | * provided in kmem_cache_common in their definition of kmem_cache. | |
158 | * | |
159 | * Once we can do anonymous structs (C11 standard) we could put a | |
160 | * anonymous struct definition in these allocators so that the | |
161 | * separate allocations in the kmem_cache structure of SLAB and | |
162 | * SLUB is no longer needed. | |
163 | */ | |
164 | struct kmem_cache { | |
165 | unsigned int object_size;/* The original size of the object */ | |
166 | unsigned int size; /* The aligned/padded/added on size */ | |
167 | unsigned int align; /* Alignment as calculated */ | |
168 | unsigned long flags; /* Active flags on the slab */ | |
169 | const char *name; /* Slab name for sysfs */ | |
170 | int refcount; /* Use counter */ | |
171 | void (*ctor)(void *); /* Called on object slot creation */ | |
172 | struct list_head list; /* List of all slab caches on the system */ | |
173 | }; | |
174 | ||
069e2b35 | 175 | #endif /* CONFIG_SLOB */ |
ce6a5026 | 176 | |
0aa817f0 | 177 | /* |
95a05b42 CL |
178 | * Kmalloc array related definitions |
179 | */ | |
180 | ||
181 | #ifdef CONFIG_SLAB | |
182 | /* | |
183 | * The largest kmalloc size supported by the SLAB allocators is | |
0aa817f0 CL |
184 | * 32 megabyte (2^25) or the maximum allocatable page order if that is |
185 | * less than 32 MB. | |
186 | * | |
187 | * WARNING: Its not easy to increase this value since the allocators have | |
188 | * to do various tricks to work around compiler limitations in order to | |
189 | * ensure proper constant folding. | |
190 | */ | |
debee076 CL |
191 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ |
192 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | |
95a05b42 | 193 | #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH |
c601fd69 | 194 | #ifndef KMALLOC_SHIFT_LOW |
95a05b42 | 195 | #define KMALLOC_SHIFT_LOW 5 |
c601fd69 | 196 | #endif |
069e2b35 CL |
197 | #endif |
198 | ||
199 | #ifdef CONFIG_SLUB | |
95a05b42 CL |
200 | /* |
201 | * SLUB allocates up to order 2 pages directly and otherwise | |
202 | * passes the request to the page allocator. | |
203 | */ | |
204 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) | |
205 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) | |
c601fd69 | 206 | #ifndef KMALLOC_SHIFT_LOW |
95a05b42 CL |
207 | #define KMALLOC_SHIFT_LOW 3 |
208 | #endif | |
c601fd69 | 209 | #endif |
0aa817f0 | 210 | |
069e2b35 CL |
211 | #ifdef CONFIG_SLOB |
212 | /* | |
213 | * SLOB passes all page size and larger requests to the page allocator. | |
214 | * No kmalloc array is necessary since objects of different sizes can | |
215 | * be allocated from the same page. | |
216 | */ | |
217 | #define KMALLOC_SHIFT_MAX 30 | |
218 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT | |
219 | #ifndef KMALLOC_SHIFT_LOW | |
220 | #define KMALLOC_SHIFT_LOW 3 | |
221 | #endif | |
222 | #endif | |
223 | ||
95a05b42 CL |
224 | /* Maximum allocatable size */ |
225 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) | |
226 | /* Maximum size for which we actually use a slab cache */ | |
227 | #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) | |
228 | /* Maximum order allocatable via the slab allocagtor */ | |
229 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) | |
0aa817f0 | 230 | |
ce6a5026 CL |
231 | /* |
232 | * Kmalloc subsystem. | |
233 | */ | |
c601fd69 | 234 | #ifndef KMALLOC_MIN_SIZE |
95a05b42 | 235 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) |
ce6a5026 CL |
236 | #endif |
237 | ||
069e2b35 | 238 | #ifndef CONFIG_SLOB |
9425c58e CL |
239 | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
240 | #ifdef CONFIG_ZONE_DMA | |
241 | extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | |
242 | #endif | |
243 | ||
ce6a5026 CL |
244 | /* |
245 | * Figure out which kmalloc slab an allocation of a certain size | |
246 | * belongs to. | |
247 | * 0 = zero alloc | |
248 | * 1 = 65 .. 96 bytes | |
249 | * 2 = 120 .. 192 bytes | |
250 | * n = 2^(n-1) .. 2^n -1 | |
251 | */ | |
252 | static __always_inline int kmalloc_index(size_t size) | |
253 | { | |
254 | if (!size) | |
255 | return 0; | |
256 | ||
257 | if (size <= KMALLOC_MIN_SIZE) | |
258 | return KMALLOC_SHIFT_LOW; | |
259 | ||
260 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | |
261 | return 1; | |
262 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | |
263 | return 2; | |
264 | if (size <= 8) return 3; | |
265 | if (size <= 16) return 4; | |
266 | if (size <= 32) return 5; | |
267 | if (size <= 64) return 6; | |
268 | if (size <= 128) return 7; | |
269 | if (size <= 256) return 8; | |
270 | if (size <= 512) return 9; | |
271 | if (size <= 1024) return 10; | |
272 | if (size <= 2 * 1024) return 11; | |
273 | if (size <= 4 * 1024) return 12; | |
274 | if (size <= 8 * 1024) return 13; | |
275 | if (size <= 16 * 1024) return 14; | |
276 | if (size <= 32 * 1024) return 15; | |
277 | if (size <= 64 * 1024) return 16; | |
278 | if (size <= 128 * 1024) return 17; | |
279 | if (size <= 256 * 1024) return 18; | |
280 | if (size <= 512 * 1024) return 19; | |
281 | if (size <= 1024 * 1024) return 20; | |
282 | if (size <= 2 * 1024 * 1024) return 21; | |
283 | if (size <= 4 * 1024 * 1024) return 22; | |
284 | if (size <= 8 * 1024 * 1024) return 23; | |
285 | if (size <= 16 * 1024 * 1024) return 24; | |
286 | if (size <= 32 * 1024 * 1024) return 25; | |
287 | if (size <= 64 * 1024 * 1024) return 26; | |
288 | BUG(); | |
289 | ||
290 | /* Will never be reached. Needed because the compiler may complain */ | |
291 | return -1; | |
292 | } | |
069e2b35 | 293 | #endif /* !CONFIG_SLOB */ |
ce6a5026 | 294 | |
f1b6eb6e CL |
295 | void *__kmalloc(size_t size, gfp_t flags); |
296 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | |
297 | ||
298 | #ifdef CONFIG_NUMA | |
299 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
300 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
301 | #else | |
302 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
303 | { | |
304 | return __kmalloc(size, flags); | |
305 | } | |
306 | ||
307 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | |
308 | { | |
309 | return kmem_cache_alloc(s, flags); | |
310 | } | |
311 | #endif | |
312 | ||
313 | #ifdef CONFIG_TRACING | |
314 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | |
315 | ||
316 | #ifdef CONFIG_NUMA | |
317 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
318 | gfp_t gfpflags, | |
319 | int node, size_t size); | |
320 | #else | |
321 | static __always_inline void * | |
322 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
323 | gfp_t gfpflags, | |
324 | int node, size_t size) | |
325 | { | |
326 | return kmem_cache_alloc_trace(s, gfpflags, size); | |
327 | } | |
328 | #endif /* CONFIG_NUMA */ | |
329 | ||
330 | #else /* CONFIG_TRACING */ | |
331 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | |
332 | gfp_t flags, size_t size) | |
333 | { | |
334 | return kmem_cache_alloc(s, flags); | |
335 | } | |
336 | ||
337 | static __always_inline void * | |
338 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
339 | gfp_t gfpflags, | |
340 | int node, size_t size) | |
341 | { | |
342 | return kmem_cache_alloc_node(s, gfpflags, node); | |
343 | } | |
344 | #endif /* CONFIG_TRACING */ | |
345 | ||
ce6a5026 CL |
346 | #ifdef CONFIG_SLAB |
347 | #include <linux/slab_def.h> | |
069e2b35 CL |
348 | #endif |
349 | ||
350 | #ifdef CONFIG_SLUB | |
ce6a5026 | 351 | #include <linux/slub_def.h> |
069e2b35 CL |
352 | #endif |
353 | ||
f1b6eb6e CL |
354 | static __always_inline void * |
355 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |
356 | { | |
357 | void *ret; | |
358 | ||
359 | flags |= (__GFP_COMP | __GFP_KMEMCG); | |
360 | ret = (void *) __get_free_pages(flags, order); | |
361 | kmemleak_alloc(ret, size, 1, flags); | |
362 | return ret; | |
363 | } | |
364 | ||
365 | #ifdef CONFIG_TRACING | |
366 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | |
367 | #else | |
368 | static __always_inline void * | |
369 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | |
370 | { | |
371 | return kmalloc_order(size, flags, order); | |
372 | } | |
ce6a5026 CL |
373 | #endif |
374 | ||
f1b6eb6e CL |
375 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
376 | { | |
377 | unsigned int order = get_order(size); | |
378 | return kmalloc_order_trace(size, flags, order); | |
379 | } | |
380 | ||
381 | /** | |
382 | * kmalloc - allocate memory | |
383 | * @size: how many bytes of memory are required. | |
384 | * @flags: the type of memory to allocate (see kcalloc). | |
385 | * | |
386 | * kmalloc is the normal method of allocating memory | |
387 | * for objects smaller than page size in the kernel. | |
388 | */ | |
389 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
390 | { | |
391 | if (__builtin_constant_p(size)) { | |
392 | if (size > KMALLOC_MAX_CACHE_SIZE) | |
393 | return kmalloc_large(size, flags); | |
394 | #ifndef CONFIG_SLOB | |
395 | if (!(flags & GFP_DMA)) { | |
396 | int index = kmalloc_index(size); | |
397 | ||
398 | if (!index) | |
399 | return ZERO_SIZE_PTR; | |
400 | ||
401 | return kmem_cache_alloc_trace(kmalloc_caches[index], | |
402 | flags, size); | |
403 | } | |
404 | #endif | |
405 | } | |
406 | return __kmalloc(size, flags); | |
407 | } | |
408 | ||
ce6a5026 CL |
409 | /* |
410 | * Determine size used for the nth kmalloc cache. | |
411 | * return size or 0 if a kmalloc cache for that | |
412 | * size does not exist | |
413 | */ | |
414 | static __always_inline int kmalloc_size(int n) | |
415 | { | |
069e2b35 | 416 | #ifndef CONFIG_SLOB |
ce6a5026 CL |
417 | if (n > 2) |
418 | return 1 << n; | |
419 | ||
420 | if (n == 1 && KMALLOC_MIN_SIZE <= 32) | |
421 | return 96; | |
422 | ||
423 | if (n == 2 && KMALLOC_MIN_SIZE <= 64) | |
424 | return 192; | |
069e2b35 | 425 | #endif |
ce6a5026 CL |
426 | return 0; |
427 | } | |
ce6a5026 | 428 | |
f1b6eb6e CL |
429 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
430 | { | |
431 | #ifndef CONFIG_SLOB | |
432 | if (__builtin_constant_p(size) && | |
23774a2f | 433 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { |
f1b6eb6e CL |
434 | int i = kmalloc_index(size); |
435 | ||
436 | if (!i) | |
437 | return ZERO_SIZE_PTR; | |
438 | ||
439 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | |
440 | flags, node, size); | |
441 | } | |
442 | #endif | |
443 | return __kmalloc_node(size, flags, node); | |
444 | } | |
445 | ||
90810645 CL |
446 | /* |
447 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | |
448 | * Intended for arches that get misalignment faults even for 64 bit integer | |
449 | * aligned buffers. | |
450 | */ | |
3192b920 CL |
451 | #ifndef ARCH_SLAB_MINALIGN |
452 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | |
453 | #endif | |
ba6c496e GC |
454 | /* |
455 | * This is the main placeholder for memcg-related information in kmem caches. | |
456 | * struct kmem_cache will hold a pointer to it, so the memory cost while | |
457 | * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it | |
458 | * would otherwise be if that would be bundled in kmem_cache: we'll need an | |
459 | * extra pointer chase. But the trade off clearly lays in favor of not | |
460 | * penalizing non-users. | |
461 | * | |
462 | * Both the root cache and the child caches will have it. For the root cache, | |
463 | * this will hold a dynamically allocated array large enough to hold | |
464 | * information about the currently limited memcgs in the system. | |
465 | * | |
466 | * Child caches will hold extra metadata needed for its operation. Fields are: | |
467 | * | |
468 | * @memcg: pointer to the memcg this cache belongs to | |
2633d7a0 GC |
469 | * @list: list_head for the list of all caches in this memcg |
470 | * @root_cache: pointer to the global, root cache, this cache was derived from | |
1f458cbf GC |
471 | * @dead: set to true after the memcg dies; the cache may still be around. |
472 | * @nr_pages: number of pages that belongs to this cache. | |
473 | * @destroy: worker to be called whenever we are ready, or believe we may be | |
474 | * ready, to destroy this cache. | |
ba6c496e GC |
475 | */ |
476 | struct memcg_cache_params { | |
477 | bool is_root_cache; | |
478 | union { | |
479 | struct kmem_cache *memcg_caches[0]; | |
2633d7a0 GC |
480 | struct { |
481 | struct mem_cgroup *memcg; | |
482 | struct list_head list; | |
483 | struct kmem_cache *root_cache; | |
1f458cbf GC |
484 | bool dead; |
485 | atomic_t nr_pages; | |
486 | struct work_struct destroy; | |
2633d7a0 | 487 | }; |
ba6c496e GC |
488 | }; |
489 | }; | |
490 | ||
2633d7a0 GC |
491 | int memcg_update_all_caches(int num_memcgs); |
492 | ||
749c5415 GC |
493 | struct seq_file; |
494 | int cache_show(struct kmem_cache *s, struct seq_file *m); | |
495 | void print_slabinfo_header(struct seq_file *m); | |
496 | ||
2e892f43 | 497 | /** |
e7efa615 MO |
498 | * kmalloc - allocate memory |
499 | * @size: how many bytes of memory are required. | |
2e892f43 | 500 | * @flags: the type of memory to allocate. |
800590f5 PD |
501 | * |
502 | * The @flags argument may be one of: | |
503 | * | |
504 | * %GFP_USER - Allocate memory on behalf of user. May sleep. | |
505 | * | |
506 | * %GFP_KERNEL - Allocate normal kernel ram. May sleep. | |
507 | * | |
6193a2ff | 508 | * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. |
800590f5 PD |
509 | * For example, use this inside interrupt handlers. |
510 | * | |
511 | * %GFP_HIGHUSER - Allocate pages from high memory. | |
512 | * | |
513 | * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | |
514 | * | |
515 | * %GFP_NOFS - Do not make any fs calls while trying to get memory. | |
516 | * | |
6193a2ff PM |
517 | * %GFP_NOWAIT - Allocation will not sleep. |
518 | * | |
519 | * %GFP_THISNODE - Allocate node-local memory only. | |
520 | * | |
521 | * %GFP_DMA - Allocation suitable for DMA. | |
522 | * Should only be used for kmalloc() caches. Otherwise, use a | |
523 | * slab created with SLAB_DMA. | |
524 | * | |
800590f5 PD |
525 | * Also it is possible to set different flags by OR'ing |
526 | * in one or more of the following additional @flags: | |
527 | * | |
528 | * %__GFP_COLD - Request cache-cold pages instead of | |
529 | * trying to return cache-warm pages. | |
530 | * | |
800590f5 PD |
531 | * %__GFP_HIGH - This allocation has high priority and may use emergency pools. |
532 | * | |
800590f5 PD |
533 | * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail |
534 | * (think twice before using). | |
535 | * | |
536 | * %__GFP_NORETRY - If memory is not immediately available, | |
537 | * then give up at once. | |
538 | * | |
539 | * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | |
540 | * | |
541 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | |
6193a2ff PM |
542 | * |
543 | * There are other flags available as well, but these are not intended | |
544 | * for general use, and so are not documented here. For a full list of | |
545 | * potential flags, always refer to linux/gfp.h. | |
e7efa615 MO |
546 | * |
547 | * kmalloc is the normal method of allocating memory | |
548 | * in the kernel. | |
549 | */ | |
550 | static __always_inline void *kmalloc(size_t size, gfp_t flags); | |
551 | ||
552 | /** | |
553 | * kmalloc_array - allocate memory for an array. | |
554 | * @n: number of elements. | |
555 | * @size: element size. | |
556 | * @flags: the type of memory to allocate (see kmalloc). | |
800590f5 | 557 | */ |
a8203725 | 558 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
1da177e4 | 559 | { |
a3860c1c | 560 | if (size != 0 && n > SIZE_MAX / size) |
6193a2ff | 561 | return NULL; |
a8203725 XW |
562 | return __kmalloc(n * size, flags); |
563 | } | |
564 | ||
565 | /** | |
566 | * kcalloc - allocate memory for an array. The memory is set to zero. | |
567 | * @n: number of elements. | |
568 | * @size: element size. | |
569 | * @flags: the type of memory to allocate (see kmalloc). | |
570 | */ | |
571 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |
572 | { | |
573 | return kmalloc_array(n, size, flags | __GFP_ZERO); | |
1da177e4 LT |
574 | } |
575 | ||
1d2c8eea CH |
576 | /* |
577 | * kmalloc_track_caller is a special version of kmalloc that records the | |
578 | * calling function of the routine calling it for slab leak tracking instead | |
579 | * of just the calling function (confusing, eh?). | |
580 | * It's useful when the call to kmalloc comes from a widely-used standard | |
581 | * allocator where we care about the real place the memory allocation | |
582 | * request comes from. | |
583 | */ | |
7adde04a | 584 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
f3f74101 EG |
585 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
586 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
ce71e27c | 587 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
1d2c8eea | 588 | #define kmalloc_track_caller(size, flags) \ |
ce71e27c | 589 | __kmalloc_track_caller(size, flags, _RET_IP_) |
2e892f43 CL |
590 | #else |
591 | #define kmalloc_track_caller(size, flags) \ | |
592 | __kmalloc(size, flags) | |
593 | #endif /* DEBUG_SLAB */ | |
1da177e4 | 594 | |
97e2bde4 | 595 | #ifdef CONFIG_NUMA |
8b98c169 CH |
596 | /* |
597 | * kmalloc_node_track_caller is a special version of kmalloc_node that | |
598 | * records the calling function of the routine calling it for slab leak | |
599 | * tracking instead of just the calling function (confusing, eh?). | |
600 | * It's useful when the call to kmalloc_node comes from a widely-used | |
601 | * standard allocator where we care about the real place the memory | |
602 | * allocation request comes from. | |
603 | */ | |
7adde04a | 604 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
f3f74101 EG |
605 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
606 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
ce71e27c | 607 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
8b98c169 CH |
608 | #define kmalloc_node_track_caller(size, flags, node) \ |
609 | __kmalloc_node_track_caller(size, flags, node, \ | |
ce71e27c | 610 | _RET_IP_) |
2e892f43 CL |
611 | #else |
612 | #define kmalloc_node_track_caller(size, flags, node) \ | |
613 | __kmalloc_node(size, flags, node) | |
8b98c169 | 614 | #endif |
2e892f43 | 615 | |
8b98c169 | 616 | #else /* CONFIG_NUMA */ |
8b98c169 CH |
617 | |
618 | #define kmalloc_node_track_caller(size, flags, node) \ | |
619 | kmalloc_track_caller(size, flags) | |
97e2bde4 | 620 | |
dfcd3610 | 621 | #endif /* CONFIG_NUMA */ |
10cef602 | 622 | |
81cda662 CL |
623 | /* |
624 | * Shortcuts | |
625 | */ | |
626 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | |
627 | { | |
628 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | |
629 | } | |
630 | ||
631 | /** | |
632 | * kzalloc - allocate memory. The memory is set to zero. | |
633 | * @size: how many bytes of memory are required. | |
634 | * @flags: the type of memory to allocate (see kmalloc). | |
635 | */ | |
636 | static inline void *kzalloc(size_t size, gfp_t flags) | |
637 | { | |
638 | return kmalloc(size, flags | __GFP_ZERO); | |
639 | } | |
640 | ||
979b0fea JL |
641 | /** |
642 | * kzalloc_node - allocate zeroed memory from a particular memory node. | |
643 | * @size: how many bytes of memory are required. | |
644 | * @flags: the type of memory to allocate (see kmalloc). | |
645 | * @node: memory node from which to allocate | |
646 | */ | |
647 | static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |
648 | { | |
649 | return kmalloc_node(size, flags | __GFP_ZERO, node); | |
650 | } | |
651 | ||
242860a4 EG |
652 | /* |
653 | * Determine the size of a slab object | |
654 | */ | |
655 | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | |
656 | { | |
657 | return s->object_size; | |
658 | } | |
659 | ||
7e85ee0c PE |
660 | void __init kmem_cache_init_late(void); |
661 | ||
1da177e4 | 662 | #endif /* _LINUX_SLAB_H */ |