Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
2e892f43 CL |
2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
3 | * | |
4 | * (C) SGI 2006, Christoph Lameter <clameter@sgi.com> | |
5 | * Cleaned up and restructured to ease the addition of alternative | |
6 | * implementations of SLAB allocators. | |
1da177e4 LT |
7 | */ |
8 | ||
9 | #ifndef _LINUX_SLAB_H | |
10 | #define _LINUX_SLAB_H | |
11 | ||
1b1cec4b | 12 | #ifdef __KERNEL__ |
1da177e4 | 13 | |
1b1cec4b | 14 | #include <linux/gfp.h> |
1b1cec4b | 15 | #include <linux/types.h> |
1da177e4 | 16 | |
1b1cec4b | 17 | typedef struct kmem_cache kmem_cache_t __deprecated; |
1da177e4 | 18 | |
2e892f43 CL |
19 | /* |
20 | * Flags to pass to kmem_cache_create(). | |
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | |
1da177e4 | 22 | */ |
55935a34 CL |
23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
24 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ | |
25 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ | |
26 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | |
27 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | |
2e892f43 CL |
28 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
29 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ | |
30 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ | |
31 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | |
32 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ | |
33 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ | |
101a5001 | 34 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
1da177e4 | 35 | |
2e892f43 | 36 | /* Flags passed to a constructor functions */ |
55935a34 | 37 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ |
2e892f43 | 38 | #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ |
55935a34 | 39 | #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ |
1da177e4 | 40 | |
2e892f43 CL |
41 | /* |
42 | * struct kmem_cache related prototypes | |
43 | */ | |
44 | void __init kmem_cache_init(void); | |
45 | extern int slab_is_available(void); | |
1da177e4 | 46 | |
2e892f43 | 47 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
ebe29738 CL |
48 | unsigned long, |
49 | void (*)(void *, struct kmem_cache *, unsigned long), | |
50 | void (*)(void *, struct kmem_cache *, unsigned long)); | |
2e892f43 CL |
51 | void kmem_cache_destroy(struct kmem_cache *); |
52 | int kmem_cache_shrink(struct kmem_cache *); | |
53 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |
54 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | |
55 | void kmem_cache_free(struct kmem_cache *, void *); | |
56 | unsigned int kmem_cache_size(struct kmem_cache *); | |
57 | const char *kmem_cache_name(struct kmem_cache *); | |
55935a34 | 58 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); |
2e892f43 CL |
59 | |
60 | #ifdef CONFIG_NUMA | |
61 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
62 | #else | |
63 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |
64 | gfp_t flags, int node) | |
65 | { | |
66 | return kmem_cache_alloc(cachep, flags); | |
67 | } | |
68 | #endif | |
69 | ||
70 | /* | |
71 | * Common kmalloc functions provided by all allocators | |
72 | */ | |
73 | void *__kmalloc(size_t, gfp_t); | |
74 | void *__kzalloc(size_t, gfp_t); | |
fd76bab2 | 75 | void * __must_check krealloc(const void *, size_t, gfp_t); |
2e892f43 | 76 | void kfree(const void *); |
fd76bab2 | 77 | size_t ksize(const void *); |
2e892f43 CL |
78 | |
79 | /** | |
80 | * kcalloc - allocate memory for an array. The memory is set to zero. | |
81 | * @n: number of elements. | |
82 | * @size: element size. | |
83 | * @flags: the type of memory to allocate. | |
84 | */ | |
85 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |
86 | { | |
87 | if (n != 0 && size > ULONG_MAX / n) | |
88 | return NULL; | |
89 | return __kzalloc(n * size, flags); | |
90 | } | |
1da177e4 | 91 | |
2e892f43 CL |
92 | /* |
93 | * Allocator specific definitions. These are mainly used to establish optimized | |
94 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting | |
95 | * the appropriate general cache at compile time. | |
96 | */ | |
55935a34 | 97 | |
2e892f43 CL |
98 | #ifdef CONFIG_SLAB |
99 | #include <linux/slab_def.h> | |
100 | #else | |
2e892f43 CL |
101 | /* |
102 | * Fallback definitions for an allocator not wanting to provide | |
103 | * its own optimized kmalloc definitions (like SLOB). | |
104 | */ | |
105 | ||
800590f5 PD |
106 | /** |
107 | * kmalloc - allocate memory | |
108 | * @size: how many bytes of memory are required. | |
109 | * @flags: the type of memory to allocate. | |
110 | * | |
111 | * kmalloc is the normal method of allocating memory | |
112 | * in the kernel. | |
113 | * | |
114 | * The @flags argument may be one of: | |
115 | * | |
116 | * %GFP_USER - Allocate memory on behalf of user. May sleep. | |
117 | * | |
118 | * %GFP_KERNEL - Allocate normal kernel ram. May sleep. | |
119 | * | |
120 | * %GFP_ATOMIC - Allocation will not sleep. | |
121 | * For example, use this inside interrupt handlers. | |
122 | * | |
123 | * %GFP_HIGHUSER - Allocate pages from high memory. | |
124 | * | |
125 | * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | |
126 | * | |
127 | * %GFP_NOFS - Do not make any fs calls while trying to get memory. | |
128 | * | |
129 | * Also it is possible to set different flags by OR'ing | |
130 | * in one or more of the following additional @flags: | |
131 | * | |
132 | * %__GFP_COLD - Request cache-cold pages instead of | |
133 | * trying to return cache-warm pages. | |
134 | * | |
135 | * %__GFP_DMA - Request memory from the DMA-capable zone. | |
136 | * | |
137 | * %__GFP_HIGH - This allocation has high priority and may use emergency pools. | |
138 | * | |
139 | * %__GFP_HIGHMEM - Allocated memory may be from highmem. | |
140 | * | |
141 | * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail | |
142 | * (think twice before using). | |
143 | * | |
144 | * %__GFP_NORETRY - If memory is not immediately available, | |
145 | * then give up at once. | |
146 | * | |
147 | * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | |
148 | * | |
149 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | |
150 | */ | |
55935a34 | 151 | static inline void *kmalloc(size_t size, gfp_t flags) |
1da177e4 | 152 | { |
1da177e4 LT |
153 | return __kmalloc(size, flags); |
154 | } | |
155 | ||
2e892f43 CL |
156 | /** |
157 | * kzalloc - allocate memory. The memory is set to zero. | |
158 | * @size: how many bytes of memory are required. | |
159 | * @flags: the type of memory to allocate (see kmalloc). | |
160 | */ | |
55935a34 | 161 | static inline void *kzalloc(size_t size, gfp_t flags) |
2e892f43 CL |
162 | { |
163 | return __kzalloc(size, flags); | |
164 | } | |
165 | #endif | |
166 | ||
55935a34 CL |
167 | #ifndef CONFIG_NUMA |
168 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
169 | { | |
170 | return kmalloc(size, flags); | |
171 | } | |
172 | ||
173 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
174 | { | |
175 | return __kmalloc(size, flags); | |
176 | } | |
177 | #endif /* !CONFIG_NUMA */ | |
178 | ||
1d2c8eea CH |
179 | /* |
180 | * kmalloc_track_caller is a special version of kmalloc that records the | |
181 | * calling function of the routine calling it for slab leak tracking instead | |
182 | * of just the calling function (confusing, eh?). | |
183 | * It's useful when the call to kmalloc comes from a widely-used standard | |
184 | * allocator where we care about the real place the memory allocation | |
185 | * request comes from. | |
186 | */ | |
2e892f43 | 187 | #ifdef CONFIG_DEBUG_SLAB |
1d2c8eea CH |
188 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); |
189 | #define kmalloc_track_caller(size, flags) \ | |
190 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | |
2e892f43 CL |
191 | #else |
192 | #define kmalloc_track_caller(size, flags) \ | |
193 | __kmalloc(size, flags) | |
194 | #endif /* DEBUG_SLAB */ | |
1da177e4 | 195 | |
97e2bde4 | 196 | #ifdef CONFIG_NUMA |
8b98c169 CH |
197 | /* |
198 | * kmalloc_node_track_caller is a special version of kmalloc_node that | |
199 | * records the calling function of the routine calling it for slab leak | |
200 | * tracking instead of just the calling function (confusing, eh?). | |
201 | * It's useful when the call to kmalloc_node comes from a widely-used | |
202 | * standard allocator where we care about the real place the memory | |
203 | * allocation request comes from. | |
204 | */ | |
2e892f43 | 205 | #ifdef CONFIG_DEBUG_SLAB |
8b98c169 CH |
206 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); |
207 | #define kmalloc_node_track_caller(size, flags, node) \ | |
208 | __kmalloc_node_track_caller(size, flags, node, \ | |
209 | __builtin_return_address(0)) | |
2e892f43 CL |
210 | #else |
211 | #define kmalloc_node_track_caller(size, flags, node) \ | |
212 | __kmalloc_node(size, flags, node) | |
8b98c169 | 213 | #endif |
2e892f43 | 214 | |
8b98c169 | 215 | #else /* CONFIG_NUMA */ |
8b98c169 CH |
216 | |
217 | #define kmalloc_node_track_caller(size, flags, node) \ | |
218 | kmalloc_track_caller(size, flags) | |
97e2bde4 | 219 | |
55935a34 | 220 | #endif /* DEBUG_SLAB */ |
10cef602 | 221 | |
ac267728 AB |
222 | extern const struct seq_operations slabinfo_op; |
223 | ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); | |
224 | ||
1da177e4 | 225 | #endif /* __KERNEL__ */ |
1da177e4 | 226 | #endif /* _LINUX_SLAB_H */ |
2e892f43 | 227 |