Commit | Line | Data |
---|---|---|
ef6695f1 MD |
1 | /* SPDX-License-Identifier: MIT */ |
2 | /* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */ | |
3 | ||
34337fec MD |
4 | #ifndef _RSEQ_MEMPOOL_H |
5 | #define _RSEQ_MEMPOOL_H | |
ef6695f1 | 6 | |
f2981623 | 7 | #include <rseq/compiler.h> |
ef6695f1 MD |
8 | #include <stddef.h> |
9 | #include <sys/types.h> | |
e229a2dd | 10 | #include <sys/mman.h> |
ef6695f1 MD |
11 | |
12 | /* | |
89b7e681 MD |
13 | * rseq/mempool.h: rseq memory pool allocator. |
14 | * | |
15 | * The rseq memory pool allocator can be configured as either a global | |
16 | * allocator (default) or a per-CPU memory allocator. | |
17 | * | |
18 | * The rseq global memory allocator allows the application to request | |
19 | * memory pools of global memory each of containing objects of a | |
20 | * given size (rounded to next power of 2), reserving a given virtual | |
21 | * address size of the requested stride. | |
8aa1462d MD |
22 | * |
23 | * The rseq per-CPU memory allocator allows the application the request | |
24 | * memory pools of CPU-Local memory each of containing objects of a | |
25 | * given size (rounded to next power of 2), reserving a given virtual | |
26 | * address size per CPU, for a given maximum number of CPUs. | |
27 | * | |
28 | * The per-CPU memory allocator is analogous to TLS (Thread-Local | |
29 | * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU | |
30 | * memory allocator provides CPU-Local Storage. | |
89b7e681 MD |
31 | * |
32 | * Memory pool sets can be created by adding one or more pools into | |
33 | * them. They can be used to perform allocation of variable length | |
34 | * objects. | |
ef6695f1 MD |
35 | */ |
36 | ||
c7ec94e0 MD |
37 | #ifdef __cplusplus |
38 | extern "C" { | |
39 | #endif | |
40 | ||
bef24483 MD |
41 | /* |
42 | * The percpu offset stride can be overridden by the user code. | |
43 | * The stride *must* match for all objects belonging to a given pool | |
44 | * between arguments to: | |
45 | * | |
06e0b1c0 MD |
46 | * - rseq_mempool_create(), |
47 | * - rseq_percpu_ptr(). | |
e30d5eb8 | 48 | * - rseq_mempool_percpu_free(), |
bef24483 | 49 | */ |
cb475906 | 50 | #define RSEQ_MEMPOOL_STRIDE (1U << 16) /* stride: 64kB */ |
f2981623 | 51 | |
d24ee051 MD |
52 | /* |
53 | * Tag pointers returned by: | |
e30d5eb8 MD |
54 | * - rseq_mempool_percpu_malloc(), |
55 | * - rseq_mempool_percpu_zmalloc(), | |
56 | * - rseq_mempool_set_percpu_malloc(), | |
57 | * - rseq_mempool_set_percpu_zmalloc(). | |
d24ee051 | 58 | * |
8aa1462d MD |
59 | * and passed as parameter to: |
60 | * - rseq_percpu_ptr(), | |
e30d5eb8 | 61 | * - rseq_mempool_percpu_free(). |
8aa1462d | 62 | * |
d24ee051 MD |
63 | * with __rseq_percpu for use by static analyzers. |
64 | */ | |
65 | #define __rseq_percpu | |
66 | ||
0ba2a93e MD |
67 | struct rseq_mempool_attr; |
68 | struct rseq_mempool; | |
ef6695f1 | 69 | |
8aa1462d | 70 | /* |
e30d5eb8 | 71 | * rseq_mempool_create: Create a memory pool. |
8aa1462d | 72 | * |
cb475906 MD |
73 | * Create a memory pool for items of size @item_len (rounded to |
74 | * next power of two). | |
8aa1462d | 75 | * |
d6acc8aa MD |
76 | * The @attr pointer used to specify the pool attributes. If NULL, use a |
77 | * default attribute values. The @attr can be destroyed immediately | |
e30d5eb8 | 78 | * after rseq_mempool_create() returns. The caller keeps ownership |
89b7e681 | 79 | * of @attr. Default attributes select a global mempool type. |
8aa1462d | 80 | * |
ca452fee MD |
81 | * The argument @pool_name can be used to given a name to the pool for |
82 | * debugging purposes. It can be NULL if no name is given. | |
83 | * | |
8aa1462d MD |
84 | * Returns a pointer to the created percpu pool. Return NULL on error, |
85 | * with errno set accordingly: | |
cb475906 | 86 | * |
8aa1462d MD |
87 | * EINVAL: Invalid argument. |
88 | * ENOMEM: Not enough resources (memory or pool indexes) available to | |
89 | * allocate pool. | |
90 | * | |
a82006d0 MD |
91 | * In addition, if the attr mmap callback fails, NULL is returned and |
92 | * errno is propagated from the callback. The default callback can | |
9bd07c29 | 93 | * return errno=ENOMEM. |
8aa1462d MD |
94 | * |
95 | * This API is MT-safe. | |
96 | */ | |
0ba2a93e | 97 | struct rseq_mempool *rseq_mempool_create(const char *pool_name, |
cb475906 | 98 | size_t item_len, const struct rseq_mempool_attr *attr); |
8aa1462d MD |
99 | |
100 | /* | |
0ba2a93e | 101 | * rseq_mempool_destroy: Destroy a per-cpu memory pool. |
8aa1462d MD |
102 | * |
103 | * Destroy a per-cpu memory pool, unmapping its memory and removing the | |
104 | * pool entry from the global index. No pointers allocated from the | |
105 | * pool should be used when it is destroyed. This includes rseq_percpu_ptr(). | |
106 | * | |
107 | * Argument @pool is a pointer to the per-cpu pool to destroy. | |
108 | * | |
109 | * Return values: 0 on success, -1 on error, with errno set accordingly: | |
89b7e681 | 110 | * |
8aa1462d MD |
111 | * ENOENT: Trying to free a pool which was not allocated. |
112 | * | |
9bd07c29 MD |
113 | * If the munmap_func callback fails, -1 is returned and errno is |
114 | * propagated from the callback. The default callback can return | |
115 | * errno=EINVAL. | |
8aa1462d MD |
116 | * |
117 | * This API is MT-safe. | |
118 | */ | |
0ba2a93e | 119 | int rseq_mempool_destroy(struct rseq_mempool *pool); |
ef6695f1 | 120 | |
8aa1462d | 121 | /* |
15da5c27 | 122 | * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool. |
8aa1462d MD |
123 | * |
124 | * Allocate an item from a per-cpu @pool. The allocation will reserve | |
125 | * an item of the size specified by @item_len (rounded to next power of | |
126 | * two) at pool creation. This effectively reserves space for this item | |
127 | * on all CPUs. | |
128 | * | |
129 | * On success, return a "__rseq_percpu" encoded pointer to the pool | |
130 | * item. This encoded pointer is meant to be passed to rseq_percpu_ptr() | |
131 | * to be decoded to a valid address before being accessed. | |
132 | * | |
133 | * Return NULL (errno=ENOMEM) if there is not enough space left in the | |
134 | * pool to allocate an item. | |
135 | * | |
136 | * This API is MT-safe. | |
137 | */ | |
15da5c27 | 138 | void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool); |
8aa1462d MD |
139 | |
140 | /* | |
15da5c27 | 141 | * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool. |
8aa1462d MD |
142 | * |
143 | * Allocate memory for an item within the pool, and zero-initialize its | |
15da5c27 | 144 | * memory on all CPUs. See rseq_mempool_percpu_malloc for details. |
8aa1462d MD |
145 | * |
146 | * This API is MT-safe. | |
147 | */ | |
15da5c27 | 148 | void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool); |
8aa1462d MD |
149 | |
150 | /* | |
15da5c27 MD |
151 | * rseq_mempool_malloc: Allocate memory from a global pool. |
152 | * | |
153 | * Wrapper to allocate memory from a global pool, which can be | |
154 | * used directly without per-cpu indexing. Would normally be used | |
155 | * with pools created with max_nr_cpus=1. | |
156 | */ | |
157 | static inline | |
158 | void *rseq_mempool_malloc(struct rseq_mempool *pool) | |
159 | { | |
160 | return (void *) rseq_mempool_percpu_malloc(pool); | |
161 | } | |
162 | ||
163 | /* | |
164 | * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool. | |
165 | * | |
166 | * Wrapper to allocate memory from a global pool, which can be | |
167 | * used directly without per-cpu indexing. Would normally be used | |
168 | * with pools created with max_nr_cpus=1. | |
169 | */ | |
170 | static inline | |
171 | void *rseq_mempool_zmalloc(struct rseq_mempool *pool) | |
172 | { | |
173 | return (void *) rseq_mempool_percpu_zmalloc(pool); | |
174 | } | |
175 | ||
176 | /* | |
177 | * rseq_mempool_percpu_free: Free memory from a per-cpu pool. | |
8aa1462d MD |
178 | * |
179 | * Free an item pointed to by @ptr from its per-cpu pool. | |
180 | * | |
181 | * The @ptr argument is a __rseq_percpu encoded pointer returned by | |
182 | * either: | |
183 | * | |
15da5c27 MD |
184 | * - rseq_mempool_percpu_malloc(), |
185 | * - rseq_mempool_percpu_zmalloc(), | |
186 | * - rseq_mempool_set_percpu_malloc(), | |
187 | * - rseq_mempool_set_percpu_zmalloc(). | |
8aa1462d | 188 | * |
06e0b1c0 | 189 | * The @stride optional argument to rseq_percpu_free() is a configurable |
4aa3220c | 190 | * stride, which must match the stride received by pool creation. |
cb475906 | 191 | * If the argument is not present, use the default RSEQ_MEMPOOL_STRIDE. |
4aa3220c | 192 | * |
8aa1462d MD |
193 | * This API is MT-safe. |
194 | */ | |
cb475906 | 195 | void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t stride); |
15da5c27 MD |
196 | |
197 | #define rseq_mempool_percpu_free(_ptr, _stride...) \ | |
cb475906 | 198 | librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE)) |
f2981623 | 199 | |
15da5c27 MD |
200 | /* |
201 | * rseq_free: Free memory from a global pool. | |
202 | * | |
203 | * Free an item pointed to by @ptr from its global pool. Would normally | |
204 | * be used with pools created with max_nr_cpus=1. | |
205 | * | |
206 | * The @ptr argument is a pointer returned by either: | |
207 | * | |
208 | * - rseq_mempool_malloc(), | |
209 | * - rseq_mempool_zmalloc(), | |
210 | * - rseq_mempool_set_malloc(), | |
211 | * - rseq_mempool_set_zmalloc(). | |
212 | * | |
213 | * The @stride optional argument to rseq_free() is a configurable | |
214 | * stride, which must match the stride received by pool creation. If | |
cb475906 | 215 | * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE. |
15da5c27 MD |
216 | * The stride is needed even for a global pool to know the mapping |
217 | * address range. | |
218 | * | |
219 | * This API is MT-safe. | |
220 | */ | |
221 | #define rseq_mempool_free(_ptr, _stride...) \ | |
cb475906 | 222 | librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE)) |
ef6695f1 | 223 | |
8aa1462d | 224 | /* |
4aa3220c | 225 | * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU. |
8aa1462d | 226 | * |
4aa3220c MD |
227 | * Offset a per-cpu pointer @ptr to get the associated pointer for the |
228 | * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by | |
229 | * either: | |
8aa1462d | 230 | * |
15da5c27 MD |
231 | * - rseq_mempool_percpu_malloc(), |
232 | * - rseq_mempool_percpu_zmalloc(), | |
233 | * - rseq_mempool_set_percpu_malloc(), | |
234 | * - rseq_mempool_set_percpu_zmalloc(). | |
8aa1462d | 235 | * |
06e0b1c0 MD |
236 | * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter |
237 | * for the returned pointer, but removes the __rseq_percpu annotation. | |
8aa1462d | 238 | * |
06e0b1c0 | 239 | * The macro rseq_percpu_ptr() takes an optional @stride argument. If |
cb475906 | 240 | * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE. |
4aa3220c | 241 | * This must match the stride used for pool creation. |
8aa1462d MD |
242 | * |
243 | * This API is MT-safe. | |
244 | */ | |
06e0b1c0 MD |
245 | #define rseq_percpu_ptr(_ptr, _cpu, _stride...) \ |
246 | ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \ | |
247 | ((unsigned int) (_cpu) * \ | |
cb475906 | 248 | (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE)))) |
ef6695f1 | 249 | |
8aa1462d | 250 | /* |
0ba2a93e | 251 | * rseq_mempool_set_create: Create a pool set. |
8aa1462d MD |
252 | * |
253 | * Create a set of pools. Its purpose is to offer a memory allocator API | |
254 | * for variable-length items (e.g. variable length strings). When | |
255 | * created, the pool set has no pool. Pools can be created and added to | |
256 | * the set. One common approach would be to create pools for each | |
257 | * relevant power of two allocation size useful for the application. | |
258 | * Only one pool can be added to the pool set for each power of two | |
259 | * allocation size. | |
260 | * | |
261 | * Returns a pool set pointer on success, else returns NULL with | |
262 | * errno=ENOMEM (out of memory). | |
263 | * | |
264 | * This API is MT-safe. | |
265 | */ | |
0ba2a93e | 266 | struct rseq_mempool_set *rseq_mempool_set_create(void); |
8aa1462d MD |
267 | |
268 | /* | |
0ba2a93e | 269 | * rseq_mempool_set_destroy: Destroy a pool set. |
8aa1462d MD |
270 | * |
271 | * Destroy a pool set and its associated resources. The pools that were | |
272 | * added to the pool set are destroyed as well. | |
273 | * | |
274 | * Returns 0 on success, -1 on failure (or partial failure), with errno | |
275 | * set by rseq_percpu_pool_destroy(). Using a pool set after destroy | |
276 | * failure is undefined. | |
277 | * | |
278 | * This API is MT-safe. | |
279 | */ | |
0ba2a93e | 280 | int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set); |
8aa1462d MD |
281 | |
282 | /* | |
0ba2a93e | 283 | * rseq_mempool_set_add_pool: Add a pool to a pool set. |
8aa1462d MD |
284 | * |
285 | * Add a @pool to the @pool_set. On success, its ownership is handed | |
286 | * over to the pool set, so the caller should not destroy it explicitly. | |
287 | * Only one pool can be added to the pool set for each power of two | |
288 | * allocation size. | |
289 | * | |
290 | * Returns 0 on success, -1 on error with the following errno: | |
291 | * - EBUSY: A pool already exists in the pool set for this power of two | |
292 | * allocation size. | |
293 | * | |
294 | * This API is MT-safe. | |
295 | */ | |
0ba2a93e MD |
296 | int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set, |
297 | struct rseq_mempool *pool); | |
ef6695f1 | 298 | |
8aa1462d | 299 | /* |
e30d5eb8 | 300 | * rseq_mempool_set_percpu_malloc: Allocate memory from a per-cpu pool set. |
8aa1462d MD |
301 | * |
302 | * Allocate an item from a per-cpu @pool. The allocation will reserve | |
303 | * an item of the size specified by @len (rounded to next power of | |
304 | * two). This effectively reserves space for this item on all CPUs. | |
305 | * | |
306 | * The space reservation will search for the smallest pool within | |
307 | * @pool_set which respects the following conditions: | |
308 | * | |
309 | * - it has an item size large enough to fit @len, | |
310 | * - it has space available. | |
311 | * | |
312 | * On success, return a "__rseq_percpu" encoded pointer to the pool | |
313 | * item. This encoded pointer is meant to be passed to rseq_percpu_ptr() | |
314 | * to be decoded to a valid address before being accessed. | |
315 | * | |
316 | * Return NULL (errno=ENOMEM) if there is not enough space left in the | |
317 | * pool to allocate an item. | |
318 | * | |
319 | * This API is MT-safe. | |
320 | */ | |
15da5c27 | 321 | void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len); |
8aa1462d MD |
322 | |
323 | /* | |
e30d5eb8 | 324 | * rseq_mempool_set_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool set. |
8aa1462d MD |
325 | * |
326 | * Allocate memory for an item within the pool, and zero-initialize its | |
e30d5eb8 | 327 | * memory on all CPUs. See rseq_mempool_set_percpu_malloc for details. |
8aa1462d MD |
328 | * |
329 | * This API is MT-safe. | |
330 | */ | |
15da5c27 MD |
331 | void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len); |
332 | ||
333 | /* | |
334 | * rseq_mempool_set_malloc: Allocate memory from a global pool set. | |
335 | * | |
336 | * Wrapper to allocate memory from a global pool, which can be | |
337 | * used directly without per-cpu indexing. Would normally be used | |
338 | * with pools created with max_nr_cpus=1. | |
339 | */ | |
340 | static inline | |
341 | void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len) | |
342 | { | |
343 | return (void *) rseq_mempool_set_percpu_malloc(pool_set, len); | |
344 | } | |
345 | ||
346 | /* | |
347 | * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set. | |
348 | * | |
349 | * Wrapper to allocate memory from a global pool, which can be | |
350 | * used directly without per-cpu indexing. Would normally be used | |
351 | * with pools created with max_nr_cpus=1. | |
352 | */ | |
353 | static inline | |
354 | void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len) | |
355 | { | |
356 | return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len); | |
357 | } | |
ef6695f1 | 358 | |
9bd07c29 | 359 | /* |
0ba2a93e | 360 | * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology. |
9bd07c29 MD |
361 | * |
362 | * For pages allocated within @pool, invoke move_pages(2) with the given | |
363 | * @numa_flags to move the pages to the NUMA node associated to their | |
364 | * CPU topology. | |
365 | * | |
366 | * Argument @numa_flags are passed to move_pages(2). The expected flags are: | |
367 | * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes. | |
368 | * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes | |
369 | * (requires CAP_SYS_NICE). | |
370 | * | |
371 | * Returns 0 on success, else return -1 with errno set by move_pages(2). | |
372 | */ | |
0ba2a93e | 373 | int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags); |
9bd07c29 MD |
374 | |
375 | /* | |
0ba2a93e | 376 | * rseq_mempool_attr_create: Create a pool attribute structure. |
a82006d0 | 377 | */ |
0ba2a93e | 378 | struct rseq_mempool_attr *rseq_mempool_attr_create(void); |
a82006d0 MD |
379 | |
380 | /* | |
0ba2a93e | 381 | * rseq_mempool_attr_destroy: Destroy a pool attribute structure. |
a82006d0 | 382 | */ |
0ba2a93e | 383 | void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr); |
a82006d0 MD |
384 | |
385 | /* | |
0ba2a93e | 386 | * rseq_mempool_attr_set_mmap: Set pool attribute structure mmap functions. |
9bd07c29 MD |
387 | * |
388 | * The @mmap_func callback used to map the memory for the pool. | |
389 | * | |
390 | * The @munmap_func callback used to unmap the memory when the pool | |
391 | * is destroyed. | |
392 | * | |
393 | * The @mmap_priv argument is a private data pointer passed to both | |
394 | * @mmap_func and @munmap_func callbacks. | |
8118247e MD |
395 | * |
396 | * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid. | |
9bd07c29 | 397 | */ |
0ba2a93e | 398 | int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr, |
a82006d0 | 399 | void *(*mmap_func)(void *priv, size_t len), |
9bd07c29 MD |
400 | int (*munmap_func)(void *priv, void *ptr, size_t len), |
401 | void *mmap_priv); | |
402 | ||
135811f2 MD |
403 | /* |
404 | * rseq_mempool_attr_set_init: Set pool attribute structure memory init functions. | |
405 | * | |
406 | * The @init_func callback used to initialized memory after allocation | |
6e329183 MD |
407 | * for the pool. The @init_func callback must return 0 on success, -1 on |
408 | * error with errno set. If @init_func returns failure, the allocation | |
409 | * of the pool memory fails, which either causes the pool creation to | |
410 | * fail or memory allocation to fail (for extensible memory pools). | |
135811f2 MD |
411 | * |
412 | * The @init_priv argument is a private data pointer passed to the | |
413 | * @init_func callback. | |
414 | * | |
415 | * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid. | |
416 | */ | |
417 | int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr, | |
6e329183 | 418 | int (*init_func)(void *priv, void *addr, size_t len, int cpu), |
135811f2 MD |
419 | void *init_priv); |
420 | ||
d6acc8aa | 421 | /* |
0ba2a93e | 422 | * rseq_mempool_attr_set_robust: Set pool robust attribute. |
d6acc8aa MD |
423 | * |
424 | * The robust pool attribute enables runtime validation of the pool: | |
425 | * | |
426 | * - Check for double-free of pointers. | |
427 | * | |
428 | * - Detect memory leaks on pool destruction. | |
429 | * | |
430 | * - Detect free-list corruption on pool destruction. | |
431 | * | |
432 | * There is a marginal runtime overhead on malloc/free operations. | |
433 | * | |
434 | * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT | |
435 | * bytes, over the lifetime of the pool. | |
436 | * | |
437 | * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid. | |
438 | */ | |
0ba2a93e | 439 | int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr); |
d6acc8aa | 440 | |
cb475906 MD |
441 | /* |
442 | * rseq_mempool_attr_set_percpu: Set pool type as percpu. | |
443 | * | |
89b7e681 | 444 | * A pool created with this type is a per-cpu memory pool. The reserved |
cb475906 MD |
445 | * allocation size is @stride, and the maximum CPU value expected |
446 | * is (@max_nr_cpus - 1). A @stride of 0 uses the default | |
447 | * RSEQ_MEMPOOL_STRIDE. | |
448 | * | |
449 | * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid. | |
450 | */ | |
451 | int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr, | |
452 | size_t stride, int max_nr_cpus); | |
453 | ||
454 | /* | |
455 | * rseq_mempool_attr_set_global: Set pool type as global. | |
456 | * | |
89b7e681 | 457 | * A pool created with this type is a global memory pool. The reserved |
cb475906 MD |
458 | * allocation size is @stride. A @stride of 0 uses the default |
459 | * RSEQ_MEMPOOL_STRIDE. | |
460 | * | |
461 | * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid. | |
462 | */ | |
463 | int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr, size_t stride); | |
464 | ||
c6fd3981 MD |
465 | /* |
466 | * rseq_mempool_range_init_numa: NUMA initialization helper for memory range. | |
467 | * | |
468 | * Helper which can be used from mempool_attr @init_func to move a CPU | |
469 | * memory range to the NUMA node associated to its topology. | |
470 | * | |
471 | * Returns 0 on success, -1 with errno set by move_pages(2) on error. | |
472 | * Returns -1, errno=ENOSYS if NUMA support is not present. | |
473 | */ | |
474 | int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags); | |
475 | ||
6037d364 MD |
476 | /* |
477 | * rseq_mempool_get_max_nr_cpus: Get the max_nr_cpus value configured for a pool. | |
478 | * | |
479 | * Returns a value >= 0 for a per-cpu pool. | |
480 | * Returns -1, errno=EINVAL if the mempool is NULL or if the pool has a | |
481 | * global pool type. | |
482 | */ | |
483 | int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool); | |
484 | ||
c7ec94e0 MD |
485 | #ifdef __cplusplus |
486 | } | |
487 | #endif | |
488 | ||
34337fec | 489 | #endif /* _RSEQ_MEMPOOL_H */ |