2 * Slab allocator functions that are independent of the allocator strategy
4 * (C) 2012 Christoph Lameter <cl@linux.com>
6 #include <linux/slab.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <linux/memcontrol.h>
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
28 enum slab_state slab_state
;
29 LIST_HEAD(slab_caches
);
30 DEFINE_MUTEX(slab_mutex
);
31 struct kmem_cache
*kmem_cache
;
34 * Set of flags that will prevent slab merging
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
40 #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
44 * Merge control. If this is set then no merging of slab caches will occur.
45 * (Could be removed. This was introduced to pacify the merge skeptics.)
47 static int slab_nomerge
;
49 static int __init
setup_slab_nomerge(char *str
)
56 __setup_param("slub_nomerge", slub_nomerge
, setup_slab_nomerge
, 0);
59 __setup("slab_nomerge", setup_slab_nomerge
);
62 * Determine the size of a slab object
64 unsigned int kmem_cache_size(struct kmem_cache
*s
)
66 return s
->object_size
;
68 EXPORT_SYMBOL(kmem_cache_size
);
70 #ifdef CONFIG_DEBUG_VM
71 static int kmem_cache_sanity_check(const char *name
, size_t size
)
73 struct kmem_cache
*s
= NULL
;
75 if (!name
|| in_interrupt() || size
< sizeof(void *) ||
76 size
> KMALLOC_MAX_SIZE
) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
81 list_for_each_entry(s
, &slab_caches
, list
) {
86 * This happens when the module gets unloaded and doesn't
87 * destroy its slab cache and no-one else reuses the vmalloc
88 * area of the module. Print a warning.
90 res
= probe_kernel_address(s
->name
, tmp
);
92 pr_err("Slab cache with size %d has lost its name\n",
98 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
102 static inline int kmem_cache_sanity_check(const char *name
, size_t size
)
108 #ifdef CONFIG_MEMCG_KMEM
109 static int memcg_alloc_cache_params(struct mem_cgroup
*memcg
,
110 struct kmem_cache
*s
, struct kmem_cache
*root_cache
)
114 if (!memcg_kmem_enabled())
118 size
= offsetof(struct memcg_cache_params
, memcg_caches
);
119 size
+= memcg_limited_groups_array_size
* sizeof(void *);
121 size
= sizeof(struct memcg_cache_params
);
123 s
->memcg_params
= kzalloc(size
, GFP_KERNEL
);
124 if (!s
->memcg_params
)
128 s
->memcg_params
->memcg
= memcg
;
129 s
->memcg_params
->root_cache
= root_cache
;
131 s
->memcg_params
->is_root_cache
= true;
136 static void memcg_free_cache_params(struct kmem_cache
*s
)
138 kfree(s
->memcg_params
);
141 static int memcg_update_cache_params(struct kmem_cache
*s
, int num_memcgs
)
144 struct memcg_cache_params
*new_params
, *cur_params
;
146 BUG_ON(!is_root_cache(s
));
148 size
= offsetof(struct memcg_cache_params
, memcg_caches
);
149 size
+= num_memcgs
* sizeof(void *);
151 new_params
= kzalloc(size
, GFP_KERNEL
);
155 cur_params
= s
->memcg_params
;
156 memcpy(new_params
->memcg_caches
, cur_params
->memcg_caches
,
157 memcg_limited_groups_array_size
* sizeof(void *));
159 new_params
->is_root_cache
= true;
161 rcu_assign_pointer(s
->memcg_params
, new_params
);
163 kfree_rcu(cur_params
, rcu_head
);
168 int memcg_update_all_caches(int num_memcgs
)
170 struct kmem_cache
*s
;
172 mutex_lock(&slab_mutex
);
174 list_for_each_entry(s
, &slab_caches
, list
) {
175 if (!is_root_cache(s
))
178 ret
= memcg_update_cache_params(s
, num_memcgs
);
180 * Instead of freeing the memory, we'll just leave the caches
181 * up to this point in an updated state.
187 memcg_update_array_size(num_memcgs
);
189 mutex_unlock(&slab_mutex
);
193 static inline int memcg_alloc_cache_params(struct mem_cgroup
*memcg
,
194 struct kmem_cache
*s
, struct kmem_cache
*root_cache
)
199 static inline void memcg_free_cache_params(struct kmem_cache
*s
)
202 #endif /* CONFIG_MEMCG_KMEM */
205 * Find a mergeable slab cache
207 int slab_unmergeable(struct kmem_cache
*s
)
209 if (slab_nomerge
|| (s
->flags
& SLAB_NEVER_MERGE
))
212 if (!is_root_cache(s
))
219 * We may have set a slab to be unmergeable during bootstrap.
227 struct kmem_cache
*find_mergeable(size_t size
, size_t align
,
228 unsigned long flags
, const char *name
, void (*ctor
)(void *))
230 struct kmem_cache
*s
;
232 if (slab_nomerge
|| (flags
& SLAB_NEVER_MERGE
))
238 size
= ALIGN(size
, sizeof(void *));
239 align
= calculate_alignment(flags
, align
, size
);
240 size
= ALIGN(size
, align
);
241 flags
= kmem_cache_flags(size
, flags
, name
, NULL
);
243 list_for_each_entry(s
, &slab_caches
, list
) {
244 if (slab_unmergeable(s
))
250 if ((flags
& SLAB_MERGE_SAME
) != (s
->flags
& SLAB_MERGE_SAME
))
253 * Check if alignment is compatible.
254 * Courtesy of Adrian Drzewiecki
256 if ((s
->size
& ~(align
- 1)) != s
->size
)
259 if (s
->size
- size
>= sizeof(void *))
268 * Figure out what the alignment of the objects will be given a set of
269 * flags, a user specified alignment and the size of the objects.
271 unsigned long calculate_alignment(unsigned long flags
,
272 unsigned long align
, unsigned long size
)
275 * If the user wants hardware cache aligned objects then follow that
276 * suggestion if the object is sufficiently large.
278 * The hardware cache alignment cannot override the specified
279 * alignment though. If that is greater then use it.
281 if (flags
& SLAB_HWCACHE_ALIGN
) {
282 unsigned long ralign
= cache_line_size();
283 while (size
<= ralign
/ 2)
285 align
= max(align
, ralign
);
288 if (align
< ARCH_SLAB_MINALIGN
)
289 align
= ARCH_SLAB_MINALIGN
;
291 return ALIGN(align
, sizeof(void *));
294 static struct kmem_cache
*
295 do_kmem_cache_create(char *name
, size_t object_size
, size_t size
, size_t align
,
296 unsigned long flags
, void (*ctor
)(void *),
297 struct mem_cgroup
*memcg
, struct kmem_cache
*root_cache
)
299 struct kmem_cache
*s
;
303 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
308 s
->object_size
= object_size
;
313 err
= memcg_alloc_cache_params(memcg
, s
, root_cache
);
317 err
= __kmem_cache_create(s
, flags
);
322 list_add(&s
->list
, &slab_caches
);
329 memcg_free_cache_params(s
);
335 * kmem_cache_create - Create a cache.
336 * @name: A string which is used in /proc/slabinfo to identify this cache.
337 * @size: The size of objects to be created in this cache.
338 * @align: The required alignment for the objects.
340 * @ctor: A constructor for the objects.
342 * Returns a ptr to the cache on success, NULL on failure.
343 * Cannot be called within a interrupt, but can be interrupted.
344 * The @ctor is run when new pages are allocated by the cache.
348 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
349 * to catch references to uninitialised memory.
351 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
352 * for buffer overruns.
354 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
355 * cacheline. This can be beneficial if you're counting cycles as closely
359 kmem_cache_create(const char *name
, size_t size
, size_t align
,
360 unsigned long flags
, void (*ctor
)(void *))
362 struct kmem_cache
*s
;
369 mutex_lock(&slab_mutex
);
371 err
= kmem_cache_sanity_check(name
, size
);
373 s
= NULL
; /* suppress uninit var warning */
378 * Some allocators will constraint the set of valid flags to a subset
379 * of all flags. We expect them to define CACHE_CREATE_MASK in this
380 * case, and we'll just provide them with a sanitized version of the
383 flags
&= CACHE_CREATE_MASK
;
385 s
= __kmem_cache_alias(name
, size
, align
, flags
, ctor
);
389 cache_name
= kstrdup(name
, GFP_KERNEL
);
395 s
= do_kmem_cache_create(cache_name
, size
, size
,
396 calculate_alignment(flags
, align
, size
),
397 flags
, ctor
, NULL
, NULL
);
404 mutex_unlock(&slab_mutex
);
410 if (flags
& SLAB_PANIC
)
411 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
414 printk(KERN_WARNING
"kmem_cache_create(%s) failed with error %d",
422 EXPORT_SYMBOL(kmem_cache_create
);
424 #ifdef CONFIG_MEMCG_KMEM
426 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
427 * @memcg: The memory cgroup the new cache is for.
428 * @root_cache: The parent of the new cache.
429 * @memcg_name: The name of the memory cgroup (used for naming the new cache).
431 * This function attempts to create a kmem cache that will serve allocation
432 * requests going from @memcg to @root_cache. The new cache inherits properties
435 struct kmem_cache
*memcg_create_kmem_cache(struct mem_cgroup
*memcg
,
436 struct kmem_cache
*root_cache
,
437 const char *memcg_name
)
439 struct kmem_cache
*s
= NULL
;
445 mutex_lock(&slab_mutex
);
447 cache_name
= kasprintf(GFP_KERNEL
, "%s(%d:%s)", root_cache
->name
,
448 memcg_cache_id(memcg
), memcg_name
);
452 s
= do_kmem_cache_create(cache_name
, root_cache
->object_size
,
453 root_cache
->size
, root_cache
->align
,
454 root_cache
->flags
, root_cache
->ctor
,
462 mutex_unlock(&slab_mutex
);
470 static int memcg_cleanup_cache_params(struct kmem_cache
*s
)
474 if (!s
->memcg_params
||
475 !s
->memcg_params
->is_root_cache
)
478 mutex_unlock(&slab_mutex
);
479 rc
= __memcg_cleanup_cache_params(s
);
480 mutex_lock(&slab_mutex
);
485 static int memcg_cleanup_cache_params(struct kmem_cache
*s
)
489 #endif /* CONFIG_MEMCG_KMEM */
491 void slab_kmem_cache_release(struct kmem_cache
*s
)
494 kmem_cache_free(kmem_cache
, s
);
497 void kmem_cache_destroy(struct kmem_cache
*s
)
502 mutex_lock(&slab_mutex
);
508 if (memcg_cleanup_cache_params(s
) != 0)
511 if (__kmem_cache_shutdown(s
) != 0) {
512 printk(KERN_ERR
"kmem_cache_destroy %s: "
513 "Slab cache still has objects\n", s
->name
);
520 mutex_unlock(&slab_mutex
);
521 if (s
->flags
& SLAB_DESTROY_BY_RCU
)
524 memcg_free_cache_params(s
);
525 #ifdef SLAB_SUPPORTS_SYSFS
526 sysfs_slab_remove(s
);
528 slab_kmem_cache_release(s
);
533 mutex_unlock(&slab_mutex
);
538 EXPORT_SYMBOL(kmem_cache_destroy
);
541 * kmem_cache_shrink - Shrink a cache.
542 * @cachep: The cache to shrink.
544 * Releases as many slabs as possible for a cache.
545 * To help debugging, a zero exit status indicates all slabs were released.
547 int kmem_cache_shrink(struct kmem_cache
*cachep
)
553 ret
= __kmem_cache_shrink(cachep
);
558 EXPORT_SYMBOL(kmem_cache_shrink
);
560 int slab_is_available(void)
562 return slab_state
>= UP
;
566 /* Create a cache during boot when no slab services are available yet */
567 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
, size_t size
,
573 s
->size
= s
->object_size
= size
;
574 s
->align
= calculate_alignment(flags
, ARCH_KMALLOC_MINALIGN
, size
);
575 err
= __kmem_cache_create(s
, flags
);
578 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
581 s
->refcount
= -1; /* Exempt from merging for now */
584 struct kmem_cache
*__init
create_kmalloc_cache(const char *name
, size_t size
,
587 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
590 panic("Out of memory when creating slab %s\n", name
);
592 create_boot_cache(s
, name
, size
, flags
);
593 list_add(&s
->list
, &slab_caches
);
598 struct kmem_cache
*kmalloc_caches
[KMALLOC_SHIFT_HIGH
+ 1];
599 EXPORT_SYMBOL(kmalloc_caches
);
601 #ifdef CONFIG_ZONE_DMA
602 struct kmem_cache
*kmalloc_dma_caches
[KMALLOC_SHIFT_HIGH
+ 1];
603 EXPORT_SYMBOL(kmalloc_dma_caches
);
607 * Conversion table for small slabs sizes / 8 to the index in the
608 * kmalloc array. This is necessary for slabs < 192 since we have non power
609 * of two cache sizes there. The size of larger slabs can be determined using
612 static s8 size_index
[24] = {
639 static inline int size_index_elem(size_t bytes
)
641 return (bytes
- 1) / 8;
645 * Find the kmem_cache structure that serves a given size of
648 struct kmem_cache
*kmalloc_slab(size_t size
, gfp_t flags
)
652 if (unlikely(size
> KMALLOC_MAX_SIZE
)) {
653 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
659 return ZERO_SIZE_PTR
;
661 index
= size_index
[size_index_elem(size
)];
663 index
= fls(size
- 1);
665 #ifdef CONFIG_ZONE_DMA
666 if (unlikely((flags
& GFP_DMA
)))
667 return kmalloc_dma_caches
[index
];
670 return kmalloc_caches
[index
];
674 * Create the kmalloc array. Some of the regular kmalloc arrays
675 * may already have been created because they were needed to
676 * enable allocations for slab creation.
678 void __init
create_kmalloc_caches(unsigned long flags
)
683 * Patch up the size_index table if we have strange large alignment
684 * requirements for the kmalloc array. This is only the case for
685 * MIPS it seems. The standard arches will not generate any code here.
687 * Largest permitted alignment is 256 bytes due to the way we
688 * handle the index determination for the smaller caches.
690 * Make sure that nothing crazy happens if someone starts tinkering
691 * around with ARCH_KMALLOC_MINALIGN
693 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
694 (KMALLOC_MIN_SIZE
& (KMALLOC_MIN_SIZE
- 1)));
696 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
697 int elem
= size_index_elem(i
);
699 if (elem
>= ARRAY_SIZE(size_index
))
701 size_index
[elem
] = KMALLOC_SHIFT_LOW
;
704 if (KMALLOC_MIN_SIZE
>= 64) {
706 * The 96 byte size cache is not used if the alignment
709 for (i
= 64 + 8; i
<= 96; i
+= 8)
710 size_index
[size_index_elem(i
)] = 7;
714 if (KMALLOC_MIN_SIZE
>= 128) {
716 * The 192 byte sized cache is not used if the alignment
717 * is 128 byte. Redirect kmalloc to use the 256 byte cache
720 for (i
= 128 + 8; i
<= 192; i
+= 8)
721 size_index
[size_index_elem(i
)] = 8;
723 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
724 if (!kmalloc_caches
[i
]) {
725 kmalloc_caches
[i
] = create_kmalloc_cache(NULL
,
730 * Caches that are not of the two-to-the-power-of size.
731 * These have to be created immediately after the
732 * earlier power of two caches
734 if (KMALLOC_MIN_SIZE
<= 32 && !kmalloc_caches
[1] && i
== 6)
735 kmalloc_caches
[1] = create_kmalloc_cache(NULL
, 96, flags
);
737 if (KMALLOC_MIN_SIZE
<= 64 && !kmalloc_caches
[2] && i
== 7)
738 kmalloc_caches
[2] = create_kmalloc_cache(NULL
, 192, flags
);
741 /* Kmalloc array is now usable */
744 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
745 struct kmem_cache
*s
= kmalloc_caches
[i
];
749 n
= kasprintf(GFP_NOWAIT
, "kmalloc-%d", kmalloc_size(i
));
756 #ifdef CONFIG_ZONE_DMA
757 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
758 struct kmem_cache
*s
= kmalloc_caches
[i
];
761 int size
= kmalloc_size(i
);
762 char *n
= kasprintf(GFP_NOWAIT
,
763 "dma-kmalloc-%d", size
);
766 kmalloc_dma_caches
[i
] = create_kmalloc_cache(n
,
767 size
, SLAB_CACHE_DMA
| flags
);
772 #endif /* !CONFIG_SLOB */
775 * To avoid unnecessary overhead, we pass through large allocation requests
776 * directly to the page allocator. We use __GFP_COMP, because we will need to
777 * know the allocation order to free the pages properly in kfree.
779 void *kmalloc_order(size_t size
, gfp_t flags
, unsigned int order
)
785 page
= alloc_kmem_pages(flags
, order
);
786 ret
= page
? page_address(page
) : NULL
;
787 kmemleak_alloc(ret
, size
, 1, flags
);
790 EXPORT_SYMBOL(kmalloc_order
);
792 #ifdef CONFIG_TRACING
793 void *kmalloc_order_trace(size_t size
, gfp_t flags
, unsigned int order
)
795 void *ret
= kmalloc_order(size
, flags
, order
);
796 trace_kmalloc(_RET_IP_
, ret
, size
, PAGE_SIZE
<< order
, flags
);
799 EXPORT_SYMBOL(kmalloc_order_trace
);
802 #ifdef CONFIG_SLABINFO
805 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
807 #define SLABINFO_RIGHTS S_IRUSR
810 void print_slabinfo_header(struct seq_file
*m
)
813 * Output format version, so at least we can change it
814 * without _too_ many complaints.
816 #ifdef CONFIG_DEBUG_SLAB
817 seq_puts(m
, "slabinfo - version: 2.1 (statistics)\n");
819 seq_puts(m
, "slabinfo - version: 2.1\n");
821 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> "
822 "<objperslab> <pagesperslab>");
823 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
824 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
825 #ifdef CONFIG_DEBUG_SLAB
826 seq_puts(m
, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
827 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
828 seq_puts(m
, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
833 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
837 mutex_lock(&slab_mutex
);
839 print_slabinfo_header(m
);
841 return seq_list_start(&slab_caches
, *pos
);
844 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
846 return seq_list_next(p
, &slab_caches
, pos
);
849 void slab_stop(struct seq_file
*m
, void *p
)
851 mutex_unlock(&slab_mutex
);
855 memcg_accumulate_slabinfo(struct kmem_cache
*s
, struct slabinfo
*info
)
857 struct kmem_cache
*c
;
858 struct slabinfo sinfo
;
861 if (!is_root_cache(s
))
864 for_each_memcg_cache_index(i
) {
865 c
= cache_from_memcg_idx(s
, i
);
869 memset(&sinfo
, 0, sizeof(sinfo
));
870 get_slabinfo(c
, &sinfo
);
872 info
->active_slabs
+= sinfo
.active_slabs
;
873 info
->num_slabs
+= sinfo
.num_slabs
;
874 info
->shared_avail
+= sinfo
.shared_avail
;
875 info
->active_objs
+= sinfo
.active_objs
;
876 info
->num_objs
+= sinfo
.num_objs
;
880 int cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
882 struct slabinfo sinfo
;
884 memset(&sinfo
, 0, sizeof(sinfo
));
885 get_slabinfo(s
, &sinfo
);
887 memcg_accumulate_slabinfo(s
, &sinfo
);
889 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
890 cache_name(s
), sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
891 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
893 seq_printf(m
, " : tunables %4u %4u %4u",
894 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
895 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
896 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
897 slabinfo_show_stats(m
, s
);
902 static int s_show(struct seq_file
*m
, void *p
)
904 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
906 if (!is_root_cache(s
))
908 return cache_show(s
, m
);
912 * slabinfo_op - iterator that generates /proc/slabinfo
922 * + further values on SMP and with statistics enabled
924 static const struct seq_operations slabinfo_op
= {
931 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
933 return seq_open(file
, &slabinfo_op
);
936 static const struct file_operations proc_slabinfo_operations
= {
937 .open
= slabinfo_open
,
939 .write
= slabinfo_write
,
941 .release
= seq_release
,
944 static int __init
slab_proc_init(void)
946 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
,
947 &proc_slabinfo_operations
);
950 module_init(slab_proc_init
);
951 #endif /* CONFIG_SLABINFO */
953 static __always_inline
void *__do_krealloc(const void *p
, size_t new_size
,
965 ret
= kmalloc_track_caller(new_size
, flags
);
973 * __krealloc - like krealloc() but don't free @p.
974 * @p: object to reallocate memory for.
975 * @new_size: how many bytes of memory are required.
976 * @flags: the type of memory to allocate.
978 * This function is like krealloc() except it never frees the originally
979 * allocated buffer. Use this if you don't want to free the buffer immediately
980 * like, for example, with RCU.
982 void *__krealloc(const void *p
, size_t new_size
, gfp_t flags
)
984 if (unlikely(!new_size
))
985 return ZERO_SIZE_PTR
;
987 return __do_krealloc(p
, new_size
, flags
);
990 EXPORT_SYMBOL(__krealloc
);
993 * krealloc - reallocate memory. The contents will remain unchanged.
994 * @p: object to reallocate memory for.
995 * @new_size: how many bytes of memory are required.
996 * @flags: the type of memory to allocate.
998 * The contents of the object pointed to are preserved up to the
999 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1000 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1001 * %NULL pointer, the object pointed to is freed.
1003 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1007 if (unlikely(!new_size
)) {
1009 return ZERO_SIZE_PTR
;
1012 ret
= __do_krealloc(p
, new_size
, flags
);
1013 if (ret
&& p
!= ret
)
1018 EXPORT_SYMBOL(krealloc
);
1021 * kzfree - like kfree but zero memory
1022 * @p: object to free memory of
1024 * The memory of the object @p points to is zeroed before freed.
1025 * If @p is %NULL, kzfree() does nothing.
1027 * Note: this function zeroes the whole allocated buffer which can be a good
1028 * deal bigger than the requested buffer size passed to kmalloc(). So be
1029 * careful when using this function in performance sensitive code.
1031 void kzfree(const void *p
)
1034 void *mem
= (void *)p
;
1036 if (unlikely(ZERO_OR_NULL_PTR(mem
)))
1042 EXPORT_SYMBOL(kzfree
);
1044 /* Tracepoints definitions. */
1045 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
1046 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
1047 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node
);
1048 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node
);
1049 EXPORT_TRACEPOINT_SYMBOL(kfree
);
1050 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);