1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
4 #ifndef _RSEQ_PERCPU_ALLOC_H
5 #define _RSEQ_PERCPU_ALLOC_H
12 * rseq/percpu-alloc.h: rseq CPU-Local Storage (CLS) memory allocator.
14 * The rseq per-CPU memory allocator allows the application the request
15 * memory pools of CPU-Local memory each of containing objects of a
16 * given size (rounded to next power of 2), reserving a given virtual
17 * address size per CPU, for a given maximum number of CPUs.
19 * The per-CPU memory allocator is analogous to TLS (Thread-Local
20 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
21 * memory allocator provides CPU-Local Storage.
29 * Tag pointers returned by:
30 * - rseq_percpu_malloc(),
31 * - rseq_percpu_zmalloc(),
32 * - rseq_percpu_pool_set_malloc(),
33 * - rseq_percpu_pool_set_zmalloc().
35 * and passed as parameter to:
36 * - rseq_percpu_ptr(),
37 * - rseq_percpu_free().
39 * with __rseq_percpu for use by static analyzers.
43 struct rseq_mmap_attr
;
44 struct rseq_percpu_pool
;
47 * Create a robust pool. This enables the following runtime checks:
49 * - Check for double free of pointers.
51 * - Check that all items were freed when destroying the pool, i.e. no memory
54 * There is a marginal runtime overhead on malloc/free operations.
56 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
57 * bytes, over the lifetime of the pool.
59 #define RSEQ_POOL_ROBUST (1 << 0)
62 * rseq_percpu_pool_create: Create a per-cpu memory pool.
64 * Create a per-cpu memory pool for items of size @item_len (rounded to
65 * next power of two). The reserved allocation size is @percpu_len, and
66 * the maximum CPU value expected is (@max_nr_cpus - 1).
68 * The @mmap_attr pointer used to specify the memory allocator callbacks
69 * to use to manage the memory for the pool. If NULL, use a default
70 * internal implementation. The @mmap_attr can be destroyed immediately
71 * after rseq_percpu_pool_create() returns. The caller keeps ownership
74 * Argument @flags is a bitwise-or'd selector of:
77 * Returns a pointer to the created percpu pool. Return NULL on error,
78 * with errno set accordingly:
79 * EINVAL: Invalid argument.
80 * ENOMEM: Not enough resources (memory or pool indexes) available to
83 * In addition, if the mmap_attr mmap callback fails, NULL is returned
84 * and errno is propagated from the callback. The default callback can
85 * return errno=ENOMEM.
87 * This API is MT-safe.
89 struct rseq_percpu_pool
*rseq_percpu_pool_create(size_t item_len
,
90 size_t percpu_len
, int max_nr_cpus
,
91 const struct rseq_mmap_attr
*mmap_attr
,
95 * rseq_percpu_pool_destroy: Destroy a per-cpu memory pool.
97 * Destroy a per-cpu memory pool, unmapping its memory and removing the
98 * pool entry from the global index. No pointers allocated from the
99 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
101 * Argument @pool is a pointer to the per-cpu pool to destroy.
103 * Return values: 0 on success, -1 on error, with errno set accordingly:
104 * ENOENT: Trying to free a pool which was not allocated.
106 * If the munmap_func callback fails, -1 is returned and errno is
107 * propagated from the callback. The default callback can return
110 * This API is MT-safe.
112 int rseq_percpu_pool_destroy(struct rseq_percpu_pool
*pool
);
115 * rseq_percpu_malloc: Allocate memory from a per-cpu pool.
117 * Allocate an item from a per-cpu @pool. The allocation will reserve
118 * an item of the size specified by @item_len (rounded to next power of
119 * two) at pool creation. This effectively reserves space for this item
122 * On success, return a "__rseq_percpu" encoded pointer to the pool
123 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
124 * to be decoded to a valid address before being accessed.
126 * Return NULL (errno=ENOMEM) if there is not enough space left in the
127 * pool to allocate an item.
129 * This API is MT-safe.
131 void __rseq_percpu
*rseq_percpu_malloc(struct rseq_percpu_pool
*pool
);
134 * rseq_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
136 * Allocate memory for an item within the pool, and zero-initialize its
137 * memory on all CPUs. See rseq_percpu_malloc for details.
139 * This API is MT-safe.
141 void __rseq_percpu
*rseq_percpu_zmalloc(struct rseq_percpu_pool
*pool
);
144 * rseq_percpu_free: Free memory from a per-cpu pool.
146 * Free an item pointed to by @ptr from its per-cpu pool.
148 * The @ptr argument is a __rseq_percpu encoded pointer returned by
151 * - rseq_percpu_malloc(),
152 * - rseq_percpu_zmalloc(),
153 * - rseq_percpu_pool_set_malloc(),
154 * - rseq_percpu_pool_set_zmalloc().
156 * This API is MT-safe.
158 void rseq_percpu_free(void __rseq_percpu
*ptr
);
161 * rseq_percpu_ptr: Decode a per-cpu pointer.
163 * Decode a per-cpu pointer @ptr to get the associated pointer for the
164 * given @cpu. The @ptr argument is a __rseq_percpu encoded pointer
165 * returned by either:
167 * - rseq_percpu_malloc(),
168 * - rseq_percpu_zmalloc(),
169 * - rseq_percpu_pool_set_malloc(),
170 * - rseq_percpu_pool_set_zmalloc().
172 * The __rseq_percpu pointer can be decoded with rseq_percpu_ptr() even
173 * after it has been freed, as long as its associated pool has not been
174 * destroyed. However, memory pointed to by the decoded pointer should
175 * not be accessed after the __rseq_percpu pointer has been freed.
177 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
178 * for the returned pointer, but removes the __rseq_percpu annotation.
180 * This API is MT-safe.
182 void *__rseq_percpu_ptr(void __rseq_percpu
*ptr
, int cpu
);
183 #define rseq_percpu_ptr(ptr, cpu) ((__typeof__(*(ptr)) *) __rseq_percpu_ptr(ptr, cpu))
186 * rseq_percpu_pool_set_create: Create a pool set.
188 * Create a set of pools. Its purpose is to offer a memory allocator API
189 * for variable-length items (e.g. variable length strings). When
190 * created, the pool set has no pool. Pools can be created and added to
191 * the set. One common approach would be to create pools for each
192 * relevant power of two allocation size useful for the application.
193 * Only one pool can be added to the pool set for each power of two
196 * Returns a pool set pointer on success, else returns NULL with
197 * errno=ENOMEM (out of memory).
199 * This API is MT-safe.
201 struct rseq_percpu_pool_set
*rseq_percpu_pool_set_create(void);
204 * rseq_percpu_pool_set_destroy: Destroy a pool set.
206 * Destroy a pool set and its associated resources. The pools that were
207 * added to the pool set are destroyed as well.
209 * Returns 0 on success, -1 on failure (or partial failure), with errno
210 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
211 * failure is undefined.
213 * This API is MT-safe.
215 int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set
*pool_set
);
218 * rseq_percpu_pool_set_add_pool: Add a pool to a pool set.
220 * Add a @pool to the @pool_set. On success, its ownership is handed
221 * over to the pool set, so the caller should not destroy it explicitly.
222 * Only one pool can be added to the pool set for each power of two
225 * Returns 0 on success, -1 on error with the following errno:
226 * - EBUSY: A pool already exists in the pool set for this power of two
229 * This API is MT-safe.
231 int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set
*pool_set
,
232 struct rseq_percpu_pool
*pool
);
235 * rseq_percpu_pool_set_malloc: Allocate memory from a per-cpu pool set.
237 * Allocate an item from a per-cpu @pool. The allocation will reserve
238 * an item of the size specified by @len (rounded to next power of
239 * two). This effectively reserves space for this item on all CPUs.
241 * The space reservation will search for the smallest pool within
242 * @pool_set which respects the following conditions:
244 * - it has an item size large enough to fit @len,
245 * - it has space available.
247 * On success, return a "__rseq_percpu" encoded pointer to the pool
248 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
249 * to be decoded to a valid address before being accessed.
251 * Return NULL (errno=ENOMEM) if there is not enough space left in the
252 * pool to allocate an item.
254 * This API is MT-safe.
256 void __rseq_percpu
*rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set
*pool_set
, size_t len
);
259 * rseq_percpu_pool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
261 * Allocate memory for an item within the pool, and zero-initialize its
262 * memory on all CPUs. See rseq_percpu_pool_set_malloc for details.
264 * This API is MT-safe.
266 void __rseq_percpu
*rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set
*pool_set
, size_t len
);
269 * rseq_percpu_pool_init_numa: Move pages to the NUMA node associated to their CPU topology.
271 * For pages allocated within @pool, invoke move_pages(2) with the given
272 * @numa_flags to move the pages to the NUMA node associated to their
275 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
276 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
277 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
278 * (requires CAP_SYS_NICE).
280 * Returns 0 on success, else return -1 with errno set by move_pages(2).
282 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool
*pool
, int numa_flags
);
285 * rseq_mmap_attr_create: Create a mmap attribute structure.
287 * The @mmap_func callback used to map the memory for the pool.
289 * The @munmap_func callback used to unmap the memory when the pool
292 * The @mmap_priv argument is a private data pointer passed to both
293 * @mmap_func and @munmap_func callbacks.
295 struct rseq_mmap_attr
*rseq_mmap_attr_create(void *(*mmap_func
)(void *priv
, size_t len
),
296 int (*munmap_func
)(void *priv
, void *ptr
, size_t len
),
300 * rseq_mmap_attr_destroy: Destroy a mmap attribute structure.
302 void rseq_mmap_attr_destroy(struct rseq_mmap_attr
*attr
);
308 #endif /* _RSEQ_PERCPU_ALLOC_H */
This page took 0.046697 seconds and 5 git commands to generate.