Commit | Line | Data |
---|---|---|
81819f0f CL |
1 | #ifndef _LINUX_SLUB_DEF_H |
2 | #define _LINUX_SLUB_DEF_H | |
3 | ||
4 | /* | |
5 | * SLUB : A Slab allocator without object queues. | |
6 | * | |
cde53535 | 7 | * (C) 2007 SGI, Christoph Lameter |
81819f0f CL |
8 | */ |
9 | #include <linux/types.h> | |
10 | #include <linux/gfp.h> | |
187f1882 | 11 | #include <linux/bug.h> |
81819f0f CL |
12 | #include <linux/workqueue.h> |
13 | #include <linux/kobject.h> | |
14 | ||
4a92379b | 15 | #include <linux/kmemleak.h> |
039ca4e7 | 16 | |
8ff12cfc CL |
17 | enum stat_item { |
18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | |
19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | |
20 | FREE_FASTPATH, /* Free to cpu slub */ | |
21 | FREE_SLOWPATH, /* Freeing not to cpu slab */ | |
22 | FREE_FROZEN, /* Freeing to frozen slab */ | |
23 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | |
24 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ | |
8028dcea | 25 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ |
8ff12cfc CL |
26 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
27 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | |
e36a2652 | 28 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
8ff12cfc CL |
29 | FREE_SLAB, /* Slab freed to the page allocator */ |
30 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | |
31 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | |
32 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ | |
33 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | |
34 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | |
35 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | |
03e404af | 36 | DEACTIVATE_BYPASS, /* Implicit deactivation */ |
65c3376a | 37 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
4fdccdfb | 38 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
b789ef51 | 39 | CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ |
49e22585 | 40 | CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
8028dcea AS |
41 | CPU_PARTIAL_FREE, /* Refill cpu partial on free */ |
42 | CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ | |
43 | CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ | |
8ff12cfc CL |
44 | NR_SLUB_STAT_ITEMS }; |
45 | ||
dfb4f096 | 46 | struct kmem_cache_cpu { |
8a5ec0ba | 47 | void **freelist; /* Pointer to next available object */ |
8a5ec0ba | 48 | unsigned long tid; /* Globally unique transaction id */ |
da89b79e | 49 | struct page *page; /* The slab from which we are allocating */ |
49e22585 | 50 | struct page *partial; /* Partially allocated frozen slabs */ |
8ff12cfc CL |
51 | #ifdef CONFIG_SLUB_STATS |
52 | unsigned stat[NR_SLUB_STAT_ITEMS]; | |
53 | #endif | |
4c93c355 | 54 | }; |
dfb4f096 | 55 | |
834f3d11 CL |
56 | /* |
57 | * Word size structure that can be atomically updated or read and that | |
58 | * contains both the order and the number of objects that a slab of the | |
59 | * given order would contain. | |
60 | */ | |
61 | struct kmem_cache_order_objects { | |
62 | unsigned long x; | |
63 | }; | |
64 | ||
81819f0f CL |
65 | /* |
66 | * Slab cache management. | |
67 | */ | |
68 | struct kmem_cache { | |
1b5ad248 | 69 | struct kmem_cache_cpu __percpu *cpu_slab; |
81819f0f CL |
70 | /* Used for retriving partial slabs etc */ |
71 | unsigned long flags; | |
1a757fe5 | 72 | unsigned long min_partial; |
81819f0f | 73 | int size; /* The size of an object including meta data */ |
3b0efdfa | 74 | int object_size; /* The size of an object without meta data */ |
81819f0f | 75 | int offset; /* Free pointer offset. */ |
9f264904 | 76 | int cpu_partial; /* Number of per cpu partial objects to keep around */ |
834f3d11 | 77 | struct kmem_cache_order_objects oo; |
81819f0f | 78 | |
81819f0f | 79 | /* Allocation and freeing of slabs */ |
205ab99d | 80 | struct kmem_cache_order_objects max; |
65c3376a | 81 | struct kmem_cache_order_objects min; |
b7a49f0d | 82 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
81819f0f | 83 | int refcount; /* Refcount for slab cache destroy */ |
51cc5068 | 84 | void (*ctor)(void *); |
81819f0f CL |
85 | int inuse; /* Offset to metadata */ |
86 | int align; /* Alignment */ | |
ab9a0f19 | 87 | int reserved; /* Reserved bytes at the end of slabs */ |
81819f0f CL |
88 | const char *name; /* Name (only for display!) */ |
89 | struct list_head list; /* List of slab caches */ | |
ab4d5ed5 | 90 | #ifdef CONFIG_SYSFS |
81819f0f | 91 | struct kobject kobj; /* For sysfs */ |
0c710013 | 92 | #endif |
ba6c496e GC |
93 | #ifdef CONFIG_MEMCG_KMEM |
94 | struct memcg_cache_params *memcg_params; | |
107dab5c | 95 | int max_attr_size; /* for propagation, maximum size of a stored attr */ |
ba6c496e | 96 | #endif |
81819f0f CL |
97 | |
98 | #ifdef CONFIG_NUMA | |
9824601e CL |
99 | /* |
100 | * Defragmentation by allocating from a remote node. | |
101 | */ | |
102 | int remote_node_defrag_ratio; | |
81819f0f | 103 | #endif |
7340cc84 | 104 | struct kmem_cache_node *node[MAX_NUMNODES]; |
81819f0f CL |
105 | }; |
106 | ||
6193a2ff PM |
107 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
108 | void *__kmalloc(size_t size, gfp_t flags); | |
109 | ||
4a92379b RK |
110 | static __always_inline void * |
111 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |
112 | { | |
d79923fa GC |
113 | void *ret; |
114 | ||
115 | flags |= (__GFP_COMP | __GFP_KMEMCG); | |
116 | ret = (void *) __get_free_pages(flags, order); | |
4a92379b RK |
117 | kmemleak_alloc(ret, size, 1, flags); |
118 | return ret; | |
119 | } | |
120 | ||
d18a90dd BG |
121 | /** |
122 | * Calling this on allocated memory will check that the memory | |
123 | * is expected to be in use, and print warnings if not. | |
124 | */ | |
125 | #ifdef CONFIG_SLUB_DEBUG | |
126 | extern bool verify_mem_not_deleted(const void *x); | |
127 | #else | |
128 | static inline bool verify_mem_not_deleted(const void *x) | |
129 | { | |
130 | return true; | |
131 | } | |
132 | #endif | |
133 | ||
0f24f128 | 134 | #ifdef CONFIG_TRACING |
4a92379b RK |
135 | extern void * |
136 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | |
137 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | |
5b882be4 EGM |
138 | #else |
139 | static __always_inline void * | |
4a92379b | 140 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
5b882be4 EGM |
141 | { |
142 | return kmem_cache_alloc(s, gfpflags); | |
143 | } | |
4a92379b RK |
144 | |
145 | static __always_inline void * | |
146 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | |
147 | { | |
148 | return kmalloc_order(size, flags, order); | |
149 | } | |
5b882be4 EGM |
150 | #endif |
151 | ||
eada35ef PE |
152 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
153 | { | |
5b882be4 | 154 | unsigned int order = get_order(size); |
4a92379b | 155 | return kmalloc_order_trace(size, flags, order); |
eada35ef PE |
156 | } |
157 | ||
aa137f9d | 158 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
81819f0f | 159 | { |
aadb4bc4 | 160 | if (__builtin_constant_p(size)) { |
95a05b42 | 161 | if (size > KMALLOC_MAX_CACHE_SIZE) |
eada35ef | 162 | return kmalloc_large(size, flags); |
81819f0f | 163 | |
2c59dd65 CL |
164 | if (!(flags & GFP_DMA)) { |
165 | int index = kmalloc_index(size); | |
aadb4bc4 | 166 | |
2c59dd65 | 167 | if (!index) |
aadb4bc4 | 168 | return ZERO_SIZE_PTR; |
81819f0f | 169 | |
2c59dd65 CL |
170 | return kmem_cache_alloc_trace(kmalloc_caches[index], |
171 | flags, size); | |
aadb4bc4 CL |
172 | } |
173 | } | |
174 | return __kmalloc(size, flags); | |
81819f0f CL |
175 | } |
176 | ||
81819f0f | 177 | #ifdef CONFIG_NUMA |
6193a2ff PM |
178 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
179 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
81819f0f | 180 | |
0f24f128 | 181 | #ifdef CONFIG_TRACING |
4a92379b | 182 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
5b882be4 | 183 | gfp_t gfpflags, |
4a92379b | 184 | int node, size_t size); |
5b882be4 EGM |
185 | #else |
186 | static __always_inline void * | |
4a92379b | 187 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
5b882be4 | 188 | gfp_t gfpflags, |
4a92379b | 189 | int node, size_t size) |
5b882be4 EGM |
190 | { |
191 | return kmem_cache_alloc_node(s, gfpflags, node); | |
192 | } | |
193 | #endif | |
194 | ||
aa137f9d | 195 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
81819f0f | 196 | { |
aadb4bc4 | 197 | if (__builtin_constant_p(size) && |
2c59dd65 CL |
198 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { |
199 | int index = kmalloc_index(size); | |
81819f0f | 200 | |
2c59dd65 | 201 | if (!index) |
272c1d21 | 202 | return ZERO_SIZE_PTR; |
81819f0f | 203 | |
2c59dd65 CL |
204 | return kmem_cache_alloc_node_trace(kmalloc_caches[index], |
205 | flags, node, size); | |
aadb4bc4 CL |
206 | } |
207 | return __kmalloc_node(size, flags, node); | |
81819f0f CL |
208 | } |
209 | #endif | |
210 | ||
211 | #endif /* _LINUX_SLUB_DEF_H */ |