Commit | Line | Data |
---|---|---|
2e892f43 CL |
1 | #ifndef _LINUX_SLAB_DEF_H |
2 | #define _LINUX_SLAB_DEF_H | |
3 | ||
4 | /* | |
5 | * Definitions unique to the original Linux SLAB allocator. | |
6 | * | |
7 | * What we provide here is a way to optimize the frequent kmalloc | |
8 | * calls in the kernel by selecting the appropriate general cache | |
9 | * if kmalloc was called with a size that can be established at | |
10 | * compile time. | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | |
16 | #include <linux/compiler.h> | |
17 | ||
18 | /* Size description struct for general caches. */ | |
19 | struct cache_sizes { | |
20 | size_t cs_size; | |
21 | struct kmem_cache *cs_cachep; | |
22 | struct kmem_cache *cs_dmacachep; | |
23 | }; | |
24 | extern struct cache_sizes malloc_sizes[]; | |
25 | ||
26 | static inline void *kmalloc(size_t size, gfp_t flags) | |
27 | { | |
28 | if (__builtin_constant_p(size)) { | |
29 | int i = 0; | |
30 | #define CACHE(x) \ | |
31 | if (size <= x) \ | |
32 | goto found; \ | |
33 | else \ | |
34 | i++; | |
35 | #include "kmalloc_sizes.h" | |
36 | #undef CACHE | |
37 | { | |
38 | extern void __you_cannot_kmalloc_that_much(void); | |
39 | __you_cannot_kmalloc_that_much(); | |
40 | } | |
41 | found: | |
42 | return kmem_cache_alloc((flags & GFP_DMA) ? | |
43 | malloc_sizes[i].cs_dmacachep : | |
44 | malloc_sizes[i].cs_cachep, flags); | |
45 | } | |
46 | return __kmalloc(size, flags); | |
47 | } | |
48 | ||
49 | static inline void *kzalloc(size_t size, gfp_t flags) | |
50 | { | |
51 | if (__builtin_constant_p(size)) { | |
52 | int i = 0; | |
53 | #define CACHE(x) \ | |
54 | if (size <= x) \ | |
55 | goto found; \ | |
56 | else \ | |
57 | i++; | |
58 | #include "kmalloc_sizes.h" | |
59 | #undef CACHE | |
60 | { | |
61 | extern void __you_cannot_kzalloc_that_much(void); | |
62 | __you_cannot_kzalloc_that_much(); | |
63 | } | |
64 | found: | |
65 | return kmem_cache_zalloc((flags & GFP_DMA) ? | |
66 | malloc_sizes[i].cs_dmacachep : | |
67 | malloc_sizes[i].cs_cachep, flags); | |
68 | } | |
69 | return __kzalloc(size, flags); | |
70 | } | |
71 | ||
72 | #ifdef CONFIG_NUMA | |
73 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
74 | ||
75 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
76 | { | |
77 | if (__builtin_constant_p(size)) { | |
78 | int i = 0; | |
79 | #define CACHE(x) \ | |
80 | if (size <= x) \ | |
81 | goto found; \ | |
82 | else \ | |
83 | i++; | |
84 | #include "kmalloc_sizes.h" | |
85 | #undef CACHE | |
86 | { | |
87 | extern void __you_cannot_kmalloc_that_much(void); | |
88 | __you_cannot_kmalloc_that_much(); | |
89 | } | |
90 | found: | |
91 | return kmem_cache_alloc_node((flags & GFP_DMA) ? | |
92 | malloc_sizes[i].cs_dmacachep : | |
93 | malloc_sizes[i].cs_cachep, flags, node); | |
94 | } | |
95 | return __kmalloc_node(size, flags, node); | |
96 | } | |
97 | ||
98 | #endif /* CONFIG_NUMA */ | |
99 | ||
100 | #endif /* _LINUX_SLAB_DEF_H */ |