1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 #include <trace/kmemtrace.h>
19 /* Size description struct for general caches. */
22 struct kmem_cache
*cs_cachep
;
23 #ifdef CONFIG_ZONE_DMA
24 struct kmem_cache
*cs_dmacachep
;
27 extern struct cache_sizes malloc_sizes
[];
29 void *kmem_cache_alloc(struct kmem_cache
*, gfp_t
);
30 void *__kmalloc(size_t size
, gfp_t flags
);
32 #ifdef CONFIG_KMEMTRACE
33 extern void *kmem_cache_alloc_notrace(struct kmem_cache
*cachep
, gfp_t flags
);
34 extern size_t slab_buffer_size(struct kmem_cache
*cachep
);
36 static __always_inline
void *
37 kmem_cache_alloc_notrace(struct kmem_cache
*cachep
, gfp_t flags
)
39 return kmem_cache_alloc(cachep
, flags
);
41 static inline size_t slab_buffer_size(struct kmem_cache
*cachep
)
47 static __always_inline
void *kmalloc(size_t size
, gfp_t flags
)
49 struct kmem_cache
*cachep
;
52 if (__builtin_constant_p(size
)) {
63 #include <linux/kmalloc_sizes.h>
66 extern void __you_cannot_kmalloc_that_much(void);
67 __you_cannot_kmalloc_that_much();
70 #ifdef CONFIG_ZONE_DMA
72 cachep
= malloc_sizes
[i
].cs_dmacachep
;
75 cachep
= malloc_sizes
[i
].cs_cachep
;
77 ret
= kmem_cache_alloc_notrace(cachep
, flags
);
79 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC
, _THIS_IP_
, ret
,
80 size
, slab_buffer_size(cachep
), flags
);
84 return __kmalloc(size
, flags
);
88 extern void *__kmalloc_node(size_t size
, gfp_t flags
, int node
);
89 extern void *kmem_cache_alloc_node(struct kmem_cache
*, gfp_t flags
, int node
);
91 #ifdef CONFIG_KMEMTRACE
92 extern void *kmem_cache_alloc_node_notrace(struct kmem_cache
*cachep
,
96 static __always_inline
void *
97 kmem_cache_alloc_node_notrace(struct kmem_cache
*cachep
,
101 return kmem_cache_alloc_node(cachep
, flags
, nodeid
);
105 static __always_inline
void *kmalloc_node(size_t size
, gfp_t flags
, int node
)
107 struct kmem_cache
*cachep
;
110 if (__builtin_constant_p(size
)) {
114 return ZERO_SIZE_PTR
;
121 #include <linux/kmalloc_sizes.h>
124 extern void __you_cannot_kmalloc_that_much(void);
125 __you_cannot_kmalloc_that_much();
128 #ifdef CONFIG_ZONE_DMA
130 cachep
= malloc_sizes
[i
].cs_dmacachep
;
133 cachep
= malloc_sizes
[i
].cs_cachep
;
135 ret
= kmem_cache_alloc_node_notrace(cachep
, flags
, node
);
137 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC
, _THIS_IP_
,
138 ret
, size
, slab_buffer_size(cachep
),
143 return __kmalloc_node(size
, flags
, node
);
146 #endif /* CONFIG_NUMA */
148 #endif /* _LINUX_SLAB_DEF_H */
This page took 0.033083 seconds and 5 git commands to generate.