081f1b8d9a7b21ae5fb4824fd3e08200fc7f0071
[deliverable/linux.git] / mm / slab_common.c
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
7
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22
23 #include "slab.h"
24
25 enum slab_state slab_state;
26 LIST_HEAD(slab_caches);
27 DEFINE_MUTEX(slab_mutex);
28 struct kmem_cache *kmem_cache;
29
30 #ifdef CONFIG_DEBUG_VM
31 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
32 size_t size)
33 {
34 struct kmem_cache *s = NULL;
35
36 if (!name || in_interrupt() || size < sizeof(void *) ||
37 size > KMALLOC_MAX_SIZE) {
38 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
39 return -EINVAL;
40 }
41
42 list_for_each_entry(s, &slab_caches, list) {
43 char tmp;
44 int res;
45
46 /*
47 * This happens when the module gets unloaded and doesn't
48 * destroy its slab cache and no-one else reuses the vmalloc
49 * area of the module. Print a warning.
50 */
51 res = probe_kernel_address(s->name, tmp);
52 if (res) {
53 pr_err("Slab cache with size %d has lost its name\n",
54 s->object_size);
55 continue;
56 }
57
58 /*
59 * For simplicity, we won't check this in the list of memcg
60 * caches. We have control over memcg naming, and if there
61 * aren't duplicates in the global list, there won't be any
62 * duplicates in the memcg lists as well.
63 */
64 if (!memcg && !strcmp(s->name, name)) {
65 pr_err("%s (%s): Cache name already exists.\n",
66 __func__, name);
67 dump_stack();
68 s = NULL;
69 return -EINVAL;
70 }
71 }
72
73 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
74 return 0;
75 }
76 #else
77 static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
78 const char *name, size_t size)
79 {
80 return 0;
81 }
82 #endif
83
84 #ifdef CONFIG_MEMCG_KMEM
85 int memcg_update_all_caches(int num_memcgs)
86 {
87 struct kmem_cache *s;
88 int ret = 0;
89 mutex_lock(&slab_mutex);
90
91 list_for_each_entry(s, &slab_caches, list) {
92 if (!is_root_cache(s))
93 continue;
94
95 ret = memcg_update_cache_size(s, num_memcgs);
96 /*
97 * See comment in memcontrol.c, memcg_update_cache_size:
98 * Instead of freeing the memory, we'll just leave the caches
99 * up to this point in an updated state.
100 */
101 if (ret)
102 goto out;
103 }
104
105 memcg_update_array_size(num_memcgs);
106 out:
107 mutex_unlock(&slab_mutex);
108 return ret;
109 }
110 #endif
111
112 /*
113 * Figure out what the alignment of the objects will be given a set of
114 * flags, a user specified alignment and the size of the objects.
115 */
116 unsigned long calculate_alignment(unsigned long flags,
117 unsigned long align, unsigned long size)
118 {
119 /*
120 * If the user wants hardware cache aligned objects then follow that
121 * suggestion if the object is sufficiently large.
122 *
123 * The hardware cache alignment cannot override the specified
124 * alignment though. If that is greater then use it.
125 */
126 if (flags & SLAB_HWCACHE_ALIGN) {
127 unsigned long ralign = cache_line_size();
128 while (size <= ralign / 2)
129 ralign /= 2;
130 align = max(align, ralign);
131 }
132
133 if (align < ARCH_SLAB_MINALIGN)
134 align = ARCH_SLAB_MINALIGN;
135
136 return ALIGN(align, sizeof(void *));
137 }
138
139
140 /*
141 * kmem_cache_create - Create a cache.
142 * @name: A string which is used in /proc/slabinfo to identify this cache.
143 * @size: The size of objects to be created in this cache.
144 * @align: The required alignment for the objects.
145 * @flags: SLAB flags
146 * @ctor: A constructor for the objects.
147 *
148 * Returns a ptr to the cache on success, NULL on failure.
149 * Cannot be called within a interrupt, but can be interrupted.
150 * The @ctor is run when new pages are allocated by the cache.
151 *
152 * The flags are
153 *
154 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
155 * to catch references to uninitialised memory.
156 *
157 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
158 * for buffer overruns.
159 *
160 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
161 * cacheline. This can be beneficial if you're counting cycles as closely
162 * as davem.
163 */
164
165 struct kmem_cache *
166 kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167 size_t align, unsigned long flags, void (*ctor)(void *))
168 {
169 struct kmem_cache *s = NULL;
170 int err = 0;
171
172 get_online_cpus();
173 mutex_lock(&slab_mutex);
174
175 if (!kmem_cache_sanity_check(memcg, name, size) == 0)
176 goto out_locked;
177
178 /*
179 * Some allocators will constraint the set of valid flags to a subset
180 * of all flags. We expect them to define CACHE_CREATE_MASK in this
181 * case, and we'll just provide them with a sanitized version of the
182 * passed flags.
183 */
184 flags &= CACHE_CREATE_MASK;
185
186 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
187 if (s)
188 goto out_locked;
189
190 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
191 if (s) {
192 s->object_size = s->size = size;
193 s->align = calculate_alignment(flags, align, size);
194 s->ctor = ctor;
195
196 if (memcg_register_cache(memcg, s)) {
197 kmem_cache_free(kmem_cache, s);
198 err = -ENOMEM;
199 goto out_locked;
200 }
201
202 s->name = kstrdup(name, GFP_KERNEL);
203 if (!s->name) {
204 kmem_cache_free(kmem_cache, s);
205 err = -ENOMEM;
206 goto out_locked;
207 }
208
209 err = __kmem_cache_create(s, flags);
210 if (!err) {
211 s->refcount = 1;
212 list_add(&s->list, &slab_caches);
213 memcg_cache_list_add(memcg, s);
214 } else {
215 kfree(s->name);
216 kmem_cache_free(kmem_cache, s);
217 }
218 } else
219 err = -ENOMEM;
220
221 out_locked:
222 mutex_unlock(&slab_mutex);
223 put_online_cpus();
224
225 if (err) {
226
227 if (flags & SLAB_PANIC)
228 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
229 name, err);
230 else {
231 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
232 name, err);
233 dump_stack();
234 }
235
236 return NULL;
237 }
238
239 return s;
240 }
241
242 struct kmem_cache *
243 kmem_cache_create(const char *name, size_t size, size_t align,
244 unsigned long flags, void (*ctor)(void *))
245 {
246 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor);
247 }
248 EXPORT_SYMBOL(kmem_cache_create);
249
250 void kmem_cache_destroy(struct kmem_cache *s)
251 {
252 /* Destroy all the children caches if we aren't a memcg cache */
253 kmem_cache_destroy_memcg_children(s);
254
255 get_online_cpus();
256 mutex_lock(&slab_mutex);
257 s->refcount--;
258 if (!s->refcount) {
259 list_del(&s->list);
260
261 if (!__kmem_cache_shutdown(s)) {
262 mutex_unlock(&slab_mutex);
263 if (s->flags & SLAB_DESTROY_BY_RCU)
264 rcu_barrier();
265
266 memcg_release_cache(s);
267 kfree(s->name);
268 kmem_cache_free(kmem_cache, s);
269 } else {
270 list_add(&s->list, &slab_caches);
271 mutex_unlock(&slab_mutex);
272 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
273 s->name);
274 dump_stack();
275 }
276 } else {
277 mutex_unlock(&slab_mutex);
278 }
279 put_online_cpus();
280 }
281 EXPORT_SYMBOL(kmem_cache_destroy);
282
283 int slab_is_available(void)
284 {
285 return slab_state >= UP;
286 }
287
288 #ifndef CONFIG_SLOB
289 /* Create a cache during boot when no slab services are available yet */
290 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
291 unsigned long flags)
292 {
293 int err;
294
295 s->name = name;
296 s->size = s->object_size = size;
297 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
298 err = __kmem_cache_create(s, flags);
299
300 if (err)
301 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
302 name, size, err);
303
304 s->refcount = -1; /* Exempt from merging for now */
305 }
306
307 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
308 unsigned long flags)
309 {
310 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
311
312 if (!s)
313 panic("Out of memory when creating slab %s\n", name);
314
315 create_boot_cache(s, name, size, flags);
316 list_add(&s->list, &slab_caches);
317 s->refcount = 1;
318 return s;
319 }
320
321 #endif /* !CONFIG_SLOB */
322
323
324 #ifdef CONFIG_SLABINFO
325 void print_slabinfo_header(struct seq_file *m)
326 {
327 /*
328 * Output format version, so at least we can change it
329 * without _too_ many complaints.
330 */
331 #ifdef CONFIG_DEBUG_SLAB
332 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
333 #else
334 seq_puts(m, "slabinfo - version: 2.1\n");
335 #endif
336 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
337 "<objperslab> <pagesperslab>");
338 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
339 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
340 #ifdef CONFIG_DEBUG_SLAB
341 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
342 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
343 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
344 #endif
345 seq_putc(m, '\n');
346 }
347
348 static void *s_start(struct seq_file *m, loff_t *pos)
349 {
350 loff_t n = *pos;
351
352 mutex_lock(&slab_mutex);
353 if (!n)
354 print_slabinfo_header(m);
355
356 return seq_list_start(&slab_caches, *pos);
357 }
358
359 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
360 {
361 return seq_list_next(p, &slab_caches, pos);
362 }
363
364 static void s_stop(struct seq_file *m, void *p)
365 {
366 mutex_unlock(&slab_mutex);
367 }
368
369 static void
370 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
371 {
372 struct kmem_cache *c;
373 struct slabinfo sinfo;
374 int i;
375
376 if (!is_root_cache(s))
377 return;
378
379 for_each_memcg_cache_index(i) {
380 c = cache_from_memcg(s, i);
381 if (!c)
382 continue;
383
384 memset(&sinfo, 0, sizeof(sinfo));
385 get_slabinfo(c, &sinfo);
386
387 info->active_slabs += sinfo.active_slabs;
388 info->num_slabs += sinfo.num_slabs;
389 info->shared_avail += sinfo.shared_avail;
390 info->active_objs += sinfo.active_objs;
391 info->num_objs += sinfo.num_objs;
392 }
393 }
394
395 int cache_show(struct kmem_cache *s, struct seq_file *m)
396 {
397 struct slabinfo sinfo;
398
399 memset(&sinfo, 0, sizeof(sinfo));
400 get_slabinfo(s, &sinfo);
401
402 memcg_accumulate_slabinfo(s, &sinfo);
403
404 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
405 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
406 sinfo.objects_per_slab, (1 << sinfo.cache_order));
407
408 seq_printf(m, " : tunables %4u %4u %4u",
409 sinfo.limit, sinfo.batchcount, sinfo.shared);
410 seq_printf(m, " : slabdata %6lu %6lu %6lu",
411 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
412 slabinfo_show_stats(m, s);
413 seq_putc(m, '\n');
414 return 0;
415 }
416
417 static int s_show(struct seq_file *m, void *p)
418 {
419 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
420
421 if (!is_root_cache(s))
422 return 0;
423 return cache_show(s, m);
424 }
425
426 /*
427 * slabinfo_op - iterator that generates /proc/slabinfo
428 *
429 * Output layout:
430 * cache-name
431 * num-active-objs
432 * total-objs
433 * object size
434 * num-active-slabs
435 * total-slabs
436 * num-pages-per-slab
437 * + further values on SMP and with statistics enabled
438 */
439 static const struct seq_operations slabinfo_op = {
440 .start = s_start,
441 .next = s_next,
442 .stop = s_stop,
443 .show = s_show,
444 };
445
446 static int slabinfo_open(struct inode *inode, struct file *file)
447 {
448 return seq_open(file, &slabinfo_op);
449 }
450
451 static const struct file_operations proc_slabinfo_operations = {
452 .open = slabinfo_open,
453 .read = seq_read,
454 .write = slabinfo_write,
455 .llseek = seq_lseek,
456 .release = seq_release,
457 };
458
459 static int __init slab_proc_init(void)
460 {
461 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
462 return 0;
463 }
464 module_init(slab_proc_init);
465 #endif /* CONFIG_SLABINFO */
This page took 0.049702 seconds and 5 git commands to generate.