Rename percpu-alloc files to mempool
[librseq.git] / include / rseq / mempool.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
4 #ifndef _RSEQ_MEMPOOL_H
5 #define _RSEQ_MEMPOOL_H
6
7 #include <stddef.h>
8 #include <sys/types.h>
9 #include <sys/mman.h>
10
11 /*
12 * rseq/mempool.h: rseq CPU-Local Storage (CLS) memory allocator.
13 *
14 * The rseq per-CPU memory allocator allows the application the request
15 * memory pools of CPU-Local memory each of containing objects of a
16 * given size (rounded to next power of 2), reserving a given virtual
17 * address size per CPU, for a given maximum number of CPUs.
18 *
19 * The per-CPU memory allocator is analogous to TLS (Thread-Local
20 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
21 * memory allocator provides CPU-Local Storage.
22 */
23
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27
28 /*
29 * Tag pointers returned by:
30 * - rseq_percpu_malloc(),
31 * - rseq_percpu_zmalloc(),
32 * - rseq_percpu_pool_set_malloc(),
33 * - rseq_percpu_pool_set_zmalloc().
34 *
35 * and passed as parameter to:
36 * - rseq_percpu_ptr(),
37 * - rseq_percpu_free().
38 *
39 * with __rseq_percpu for use by static analyzers.
40 */
41 #define __rseq_percpu
42
43 struct rseq_pool_attr;
44 struct rseq_percpu_pool;
45
46 /*
47 * rseq_percpu_pool_create: Create a per-cpu memory pool.
48 *
49 * Create a per-cpu memory pool for items of size @item_len (rounded to
50 * next power of two). The reserved allocation size is @percpu_len, and
51 * the maximum CPU value expected is (@max_nr_cpus - 1).
52 *
53 * The @attr pointer used to specify the pool attributes. If NULL, use a
54 * default attribute values. The @attr can be destroyed immediately
55 * after rseq_percpu_pool_create() returns. The caller keeps ownership
56 * of @attr.
57 *
58 * The argument @pool_name can be used to given a name to the pool for
59 * debugging purposes. It can be NULL if no name is given.
60 *
61 * Returns a pointer to the created percpu pool. Return NULL on error,
62 * with errno set accordingly:
63 * EINVAL: Invalid argument.
64 * ENOMEM: Not enough resources (memory or pool indexes) available to
65 * allocate pool.
66 *
67 * In addition, if the attr mmap callback fails, NULL is returned and
68 * errno is propagated from the callback. The default callback can
69 * return errno=ENOMEM.
70 *
71 * This API is MT-safe.
72 */
73 struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
74 size_t item_len, size_t percpu_len, int max_nr_cpus,
75 const struct rseq_pool_attr *attr);
76
77 /*
78 * rseq_percpu_pool_destroy: Destroy a per-cpu memory pool.
79 *
80 * Destroy a per-cpu memory pool, unmapping its memory and removing the
81 * pool entry from the global index. No pointers allocated from the
82 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
83 *
84 * Argument @pool is a pointer to the per-cpu pool to destroy.
85 *
86 * Return values: 0 on success, -1 on error, with errno set accordingly:
87 * ENOENT: Trying to free a pool which was not allocated.
88 *
89 * If the munmap_func callback fails, -1 is returned and errno is
90 * propagated from the callback. The default callback can return
91 * errno=EINVAL.
92 *
93 * This API is MT-safe.
94 */
95 int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool);
96
97 /*
98 * rseq_percpu_malloc: Allocate memory from a per-cpu pool.
99 *
100 * Allocate an item from a per-cpu @pool. The allocation will reserve
101 * an item of the size specified by @item_len (rounded to next power of
102 * two) at pool creation. This effectively reserves space for this item
103 * on all CPUs.
104 *
105 * On success, return a "__rseq_percpu" encoded pointer to the pool
106 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
107 * to be decoded to a valid address before being accessed.
108 *
109 * Return NULL (errno=ENOMEM) if there is not enough space left in the
110 * pool to allocate an item.
111 *
112 * This API is MT-safe.
113 */
114 void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool);
115
116 /*
117 * rseq_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
118 *
119 * Allocate memory for an item within the pool, and zero-initialize its
120 * memory on all CPUs. See rseq_percpu_malloc for details.
121 *
122 * This API is MT-safe.
123 */
124 void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool);
125
126 /*
127 * rseq_percpu_free: Free memory from a per-cpu pool.
128 *
129 * Free an item pointed to by @ptr from its per-cpu pool.
130 *
131 * The @ptr argument is a __rseq_percpu encoded pointer returned by
132 * either:
133 *
134 * - rseq_percpu_malloc(),
135 * - rseq_percpu_zmalloc(),
136 * - rseq_percpu_pool_set_malloc(),
137 * - rseq_percpu_pool_set_zmalloc().
138 *
139 * This API is MT-safe.
140 */
141 void rseq_percpu_free(void __rseq_percpu *ptr);
142
143 /*
144 * rseq_percpu_ptr: Decode a per-cpu pointer.
145 *
146 * Decode a per-cpu pointer @ptr to get the associated pointer for the
147 * given @cpu. The @ptr argument is a __rseq_percpu encoded pointer
148 * returned by either:
149 *
150 * - rseq_percpu_malloc(),
151 * - rseq_percpu_zmalloc(),
152 * - rseq_percpu_pool_set_malloc(),
153 * - rseq_percpu_pool_set_zmalloc().
154 *
155 * The __rseq_percpu pointer can be decoded with rseq_percpu_ptr() even
156 * after it has been freed, as long as its associated pool has not been
157 * destroyed. However, memory pointed to by the decoded pointer should
158 * not be accessed after the __rseq_percpu pointer has been freed.
159 *
160 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
161 * for the returned pointer, but removes the __rseq_percpu annotation.
162 *
163 * This API is MT-safe.
164 */
165 void *__rseq_percpu_ptr(void __rseq_percpu *ptr, int cpu);
166 #define rseq_percpu_ptr(ptr, cpu) ((__typeof__(*(ptr)) *) __rseq_percpu_ptr(ptr, cpu))
167
168 /*
169 * rseq_percpu_pool_set_create: Create a pool set.
170 *
171 * Create a set of pools. Its purpose is to offer a memory allocator API
172 * for variable-length items (e.g. variable length strings). When
173 * created, the pool set has no pool. Pools can be created and added to
174 * the set. One common approach would be to create pools for each
175 * relevant power of two allocation size useful for the application.
176 * Only one pool can be added to the pool set for each power of two
177 * allocation size.
178 *
179 * Returns a pool set pointer on success, else returns NULL with
180 * errno=ENOMEM (out of memory).
181 *
182 * This API is MT-safe.
183 */
184 struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void);
185
186 /*
187 * rseq_percpu_pool_set_destroy: Destroy a pool set.
188 *
189 * Destroy a pool set and its associated resources. The pools that were
190 * added to the pool set are destroyed as well.
191 *
192 * Returns 0 on success, -1 on failure (or partial failure), with errno
193 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
194 * failure is undefined.
195 *
196 * This API is MT-safe.
197 */
198 int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set);
199
200 /*
201 * rseq_percpu_pool_set_add_pool: Add a pool to a pool set.
202 *
203 * Add a @pool to the @pool_set. On success, its ownership is handed
204 * over to the pool set, so the caller should not destroy it explicitly.
205 * Only one pool can be added to the pool set for each power of two
206 * allocation size.
207 *
208 * Returns 0 on success, -1 on error with the following errno:
209 * - EBUSY: A pool already exists in the pool set for this power of two
210 * allocation size.
211 *
212 * This API is MT-safe.
213 */
214 int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set,
215 struct rseq_percpu_pool *pool);
216
217 /*
218 * rseq_percpu_pool_set_malloc: Allocate memory from a per-cpu pool set.
219 *
220 * Allocate an item from a per-cpu @pool. The allocation will reserve
221 * an item of the size specified by @len (rounded to next power of
222 * two). This effectively reserves space for this item on all CPUs.
223 *
224 * The space reservation will search for the smallest pool within
225 * @pool_set which respects the following conditions:
226 *
227 * - it has an item size large enough to fit @len,
228 * - it has space available.
229 *
230 * On success, return a "__rseq_percpu" encoded pointer to the pool
231 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
232 * to be decoded to a valid address before being accessed.
233 *
234 * Return NULL (errno=ENOMEM) if there is not enough space left in the
235 * pool to allocate an item.
236 *
237 * This API is MT-safe.
238 */
239 void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len);
240
241 /*
242 * rseq_percpu_pool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
243 *
244 * Allocate memory for an item within the pool, and zero-initialize its
245 * memory on all CPUs. See rseq_percpu_pool_set_malloc for details.
246 *
247 * This API is MT-safe.
248 */
249 void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len);
250
251 /*
252 * rseq_percpu_pool_init_numa: Move pages to the NUMA node associated to their CPU topology.
253 *
254 * For pages allocated within @pool, invoke move_pages(2) with the given
255 * @numa_flags to move the pages to the NUMA node associated to their
256 * CPU topology.
257 *
258 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
259 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
260 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
261 * (requires CAP_SYS_NICE).
262 *
263 * Returns 0 on success, else return -1 with errno set by move_pages(2).
264 */
265 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags);
266
267 /*
268 * rseq_pool_attr_create: Create a pool attribute structure.
269 */
270 struct rseq_pool_attr *rseq_pool_attr_create(void);
271
272 /*
273 * rseq_pool_attr_destroy: Destroy a pool attribute structure.
274 */
275 void rseq_pool_attr_destroy(struct rseq_pool_attr *attr);
276
277 /*
278 * rseq_pool_attr_set_mmap: Set pool attribute structure mmap functions.
279 *
280 * The @mmap_func callback used to map the memory for the pool.
281 *
282 * The @munmap_func callback used to unmap the memory when the pool
283 * is destroyed.
284 *
285 * The @mmap_priv argument is a private data pointer passed to both
286 * @mmap_func and @munmap_func callbacks.
287 *
288 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
289 */
290 int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
291 void *(*mmap_func)(void *priv, size_t len),
292 int (*munmap_func)(void *priv, void *ptr, size_t len),
293 void *mmap_priv);
294
295 /*
296 * rseq_pool_attr_set_robust: Set pool robust attribute.
297 *
298 * The robust pool attribute enables runtime validation of the pool:
299 *
300 * - Check for double-free of pointers.
301 *
302 * - Detect memory leaks on pool destruction.
303 *
304 * - Detect free-list corruption on pool destruction.
305 *
306 * There is a marginal runtime overhead on malloc/free operations.
307 *
308 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
309 * bytes, over the lifetime of the pool.
310 *
311 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
312 */
313 int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr);
314
315 #ifdef __cplusplus
316 }
317 #endif
318
319 #endif /* _RSEQ_MEMPOOL_H */
This page took 0.036715 seconds and 5 git commands to generate.