1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 #include <rseq/mempool.h>
11 #include <rseq/compiler.h>
22 #include "rseq-utils.h"
25 * rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
27 * The rseq per-CPU memory allocator allows the application the request
28 * memory pools of CPU-Local memory each of containing objects of a
29 * given size (rounded to next power of 2), reserving a given virtual
30 * address size per CPU, for a given maximum number of CPUs.
32 * The per-CPU memory allocator is analogous to TLS (Thread-Local
33 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
34 * memory allocator provides CPU-Local Storage.
38 * Use high bits of per-CPU addresses to index the pool.
39 * This leaves the low bits of available to the application for pointer
40 * tagging (based on next power of 2 alignment of the allocations).
42 #if RSEQ_BITS_PER_LONG == 64
43 # define POOL_INDEX_BITS 16
45 # define POOL_INDEX_BITS 8
47 #define MAX_NR_POOLS (1UL << POOL_INDEX_BITS)
48 #define POOL_INDEX_SHIFT (RSEQ_BITS_PER_LONG - POOL_INDEX_BITS)
49 #define MAX_POOL_LEN (1UL << POOL_INDEX_SHIFT)
50 #define MAX_POOL_LEN_MASK (MAX_POOL_LEN - 1)
52 #define POOL_SET_NR_ENTRIES POOL_INDEX_SHIFT
55 * Smallest allocation should hold enough space for a free list pointer.
57 #if RSEQ_BITS_PER_LONG == 64
58 # define POOL_SET_MIN_ENTRY 3 /* Smallest item_len=8 */
60 # define POOL_SET_MIN_ENTRY 2 /* Smallest item_len=4 */
64 * Skip pool index 0 to ensure allocated entries at index 0 do not match
69 #define BIT_PER_ULONG (8 * sizeof(unsigned long))
71 #define MOVE_PAGES_BATCH_SIZE 4096
73 struct free_list_node
;
75 struct free_list_node
{
76 struct free_list_node
*next
;
79 /* This lock protects pool create/destroy. */
80 static pthread_mutex_t pool_lock
= PTHREAD_MUTEX_INITIALIZER
;
82 struct rseq_pool_attr
{
84 void *(*mmap_func
)(void *priv
, size_t len
);
85 int (*munmap_func
)(void *priv
, void *ptr
, size_t len
);
91 struct rseq_percpu_pool_range
;
93 struct rseq_percpu_pool_range
{
94 struct rseq_percpu_pool_range
*next
;
95 struct rseq_percpu_pool
*pool
; /* Backward ref. to container pool. */
98 /* Track alloc/free. */
99 unsigned long *alloc_bitmap
;
102 struct rseq_percpu_pool
{
103 /* Linked-list of ranges. */
104 struct rseq_percpu_pool_range
*ranges
;
108 size_t percpu_stride
;
113 * The free list chains freed items on the CPU 0 address range.
114 * We should rethink this decision if false sharing between
115 * malloc/free from other CPUs and data accesses from CPU 0
116 * becomes an issue. This is a NULL-terminated singly-linked
119 struct free_list_node
*free_list_head
;
121 /* This lock protects allocation/free within the pool. */
122 pthread_mutex_t lock
;
124 struct rseq_pool_attr attr
;
128 //TODO: the array of pools should grow dynamically on create.
129 static struct rseq_percpu_pool rseq_percpu_pool
[MAX_NR_POOLS
];
132 * Pool set entries are indexed by item_len rounded to the next power of
133 * 2. A pool set can contain NULL pool entries, in which case the next
134 * large enough entry will be used for allocation.
136 struct rseq_percpu_pool_set
{
137 /* This lock protects add vs malloc/zmalloc within the pool set. */
138 pthread_mutex_t lock
;
139 struct rseq_percpu_pool
*entries
[POOL_SET_NR_ENTRIES
];
143 void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool
*pool
, int cpu
,
144 uintptr_t item_offset
, size_t stride
)
146 /* TODO: Implement multi-ranges support. */
147 return pool
->ranges
->base
+ (stride
* cpu
) + item_offset
;
150 void *__rseq_percpu_ptr(void __rseq_percpu
*_ptr
, int cpu
, size_t stride
)
152 uintptr_t ptr
= (uintptr_t) _ptr
;
153 uintptr_t item_offset
= ptr
& MAX_POOL_LEN_MASK
;
154 uintptr_t pool_index
= ptr
>> POOL_INDEX_SHIFT
;
155 struct rseq_percpu_pool
*pool
= &rseq_percpu_pool
[pool_index
];
158 return __rseq_pool_percpu_ptr(pool
, cpu
, item_offset
, stride
);
162 void rseq_percpu_zero_item(struct rseq_percpu_pool
*pool
, uintptr_t item_offset
)
166 for (i
= 0; i
< pool
->max_nr_cpus
; i
++) {
167 char *p
= __rseq_pool_percpu_ptr(pool
, i
,
168 item_offset
, pool
->percpu_stride
);
169 memset(p
, 0, pool
->item_len
);
173 //TODO: this will need to be reimplemented for ranges,
174 //which cannot use __rseq_pool_percpu_ptr.
175 #if 0 //#ifdef HAVE_LIBNUMA
177 int rseq_percpu_pool_range_init_numa(struct rseq_percpu_pool
*pool
, struct rseq_percpu_pool_range
*range
, int numa_flags
)
179 unsigned long nr_pages
, page_len
;
185 page_len
= rseq_get_page_len();
186 nr_pages
= pool
->percpu_stride
>> rseq_get_count_order_ulong(page_len
);
187 for (cpu
= 0; cpu
< pool
->max_nr_cpus
; cpu
++) {
189 int status
[MOVE_PAGES_BATCH_SIZE
];
190 int nodes
[MOVE_PAGES_BATCH_SIZE
];
191 void *pages
[MOVE_PAGES_BATCH_SIZE
];
193 nodes
[0] = numa_node_of_cpu(cpu
);
194 for (size_t k
= 1; k
< RSEQ_ARRAY_SIZE(nodes
); ++k
) {
198 for (unsigned long page
= 0; page
< nr_pages
;) {
200 size_t max_k
= RSEQ_ARRAY_SIZE(pages
);
201 size_t left
= nr_pages
- page
;
207 for (size_t k
= 0; k
< max_k
; ++k
, ++page
) {
208 pages
[k
] = __rseq_pool_percpu_ptr(pool
, cpu
, page
* page_len
);
212 ret
= move_pages(0, max_k
, pages
, nodes
, status
, numa_flags
);
218 fprintf(stderr
, "%lu pages were not migrated\n", ret
);
219 for (size_t k
= 0; k
< max_k
; ++k
) {
222 "Error while moving page %p to numa node %d: %u\n",
223 pages
[k
], nodes
[k
], -status
[k
]);
231 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool
*pool
, int numa_flags
)
233 struct rseq_percpu_pool_range
*range
;
238 for (range
= pool
->ranges
; range
; range
= range
->next
) {
239 ret
= rseq_percpu_pool_range_init_numa(pool
, range
, numa_flags
);
246 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool
*pool
__attribute__((unused
)),
247 int numa_flags
__attribute__((unused
)))
254 void *default_mmap_func(void *priv
__attribute__((unused
)), size_t len
)
258 base
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
259 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
260 if (base
== MAP_FAILED
)
266 int default_munmap_func(void *priv
__attribute__((unused
)), void *ptr
, size_t len
)
268 return munmap(ptr
, len
);
272 int create_alloc_bitmap(struct rseq_percpu_pool
*pool
, struct rseq_percpu_pool_range
*range
)
276 count
= ((pool
->percpu_stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
279 * Not being able to create the validation bitmap is an error
280 * that needs to be reported.
282 range
->alloc_bitmap
= calloc(count
, sizeof(unsigned long));
283 if (!range
->alloc_bitmap
)
289 const char *get_pool_name(const struct rseq_percpu_pool
*pool
)
291 return pool
->name
? : "<anonymous>";
295 bool addr_in_pool(const struct rseq_percpu_pool
*pool
, void *addr
)
297 struct rseq_percpu_pool_range
*range
;
299 for (range
= pool
->ranges
; range
; range
= range
->next
) {
300 if (addr
>= range
->base
&& addr
< range
->base
+ range
->next_unused
)
306 /* Always inline for __builtin_return_address(0). */
307 static inline __attribute__((always_inline
))
308 void check_free_list(const struct rseq_percpu_pool
*pool
)
310 size_t total_item
= 0, total_never_allocated
= 0, total_freed
= 0,
311 max_list_traversal
= 0, traversal_iteration
= 0;
312 struct rseq_percpu_pool_range
*range
;
314 if (!pool
->attr
.robust_set
)
317 for (range
= pool
->ranges
; range
; range
= range
->next
) {
318 total_item
+= pool
->percpu_stride
>> pool
->item_order
;
319 total_never_allocated
+= (pool
->percpu_stride
- range
->next_unused
) >> pool
->item_order
;
321 max_list_traversal
= total_item
- total_never_allocated
;
323 for (struct free_list_node
*node
= pool
->free_list_head
, *prev
= NULL
;
328 void *node_addr
= node
;
330 if (traversal_iteration
>= max_list_traversal
) {
331 fprintf(stderr
, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
332 __func__
, get_pool_name(pool
), pool
, __builtin_return_address(0));
336 /* Node is out of range. */
337 if (!addr_in_pool(pool
, node_addr
)) {
339 fprintf(stderr
, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
340 __func__
, prev
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
342 fprintf(stderr
, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
343 __func__
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
347 traversal_iteration
++;
351 if (total_never_allocated
+ total_freed
!= total_item
) {
352 fprintf(stderr
, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
353 __func__
, get_pool_name(pool
), pool
, total_item
, total_never_allocated
, total_freed
, __builtin_return_address(0));
358 /* Always inline for __builtin_return_address(0). */
359 static inline __attribute__((always_inline
))
360 void destroy_alloc_bitmap(struct rseq_percpu_pool
*pool
, struct rseq_percpu_pool_range
*range
)
362 unsigned long *bitmap
= range
->alloc_bitmap
;
363 size_t count
, total_leaks
= 0;
368 count
= ((pool
->percpu_stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
370 /* Assert that all items in the pool were freed. */
371 for (size_t k
= 0; k
< count
; ++k
)
372 total_leaks
+= rseq_hweight_ulong(bitmap
[k
]);
374 fprintf(stderr
, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
375 __func__
, get_pool_name(pool
), pool
, total_leaks
, (void *) __builtin_return_address(0));
382 /* Always inline for __builtin_return_address(0). */
383 static inline __attribute__((always_inline
))
384 int rseq_percpu_pool_range_destroy(struct rseq_percpu_pool
*pool
,
385 struct rseq_percpu_pool_range
*range
)
387 destroy_alloc_bitmap(pool
, range
);
388 /* range is a header located one page before the aligned mapping. */
389 return pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, range
,
390 (pool
->percpu_stride
* pool
->max_nr_cpus
) + rseq_get_page_len());
394 * Allocate a memory mapping aligned on @alignment, with an optional
395 * @pre_header before the mapping.
398 void *aligned_mmap_anonymous(struct rseq_percpu_pool
*pool
,
399 size_t page_size
, size_t len
, size_t alignment
,
400 void **pre_header
, size_t pre_header_len
)
402 size_t minimum_page_count
, page_count
, extra
, total_allocate
= 0;
406 if (len
< page_size
|| alignment
< page_size
||
407 !is_pow2(len
) || !is_pow2(alignment
)) {
411 page_order
= rseq_get_count_order_ulong(page_size
);
412 if (page_order
< 0) {
416 if (pre_header_len
&& (pre_header_len
& (page_size
- 1))) {
421 minimum_page_count
= (pre_header_len
+ len
) >> page_order
;
422 page_count
= (pre_header_len
+ len
+ alignment
- page_size
) >> page_order
;
424 assert(page_count
>= minimum_page_count
);
426 ptr
= pool
->attr
.mmap_func(pool
->attr
.mmap_priv
, page_count
<< page_order
);
430 total_allocate
= page_count
<< page_order
;
432 if (!(((uintptr_t) ptr
+ pre_header_len
) & (alignment
- 1))) {
433 /* Pointer is already aligned. ptr points to pre_header. */
437 /* Unmap extra before. */
438 extra
= offset_align((uintptr_t) ptr
+ pre_header_len
, alignment
);
439 assert(!(extra
& (page_size
- 1)));
440 if (pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, ptr
, extra
)) {
444 total_allocate
-= extra
;
445 ptr
+= extra
; /* ptr points to pre_header */
446 page_count
-= extra
>> page_order
;
448 assert(page_count
>= minimum_page_count
);
450 if (page_count
> minimum_page_count
) {
453 /* Unmap extra after. */
454 extra_ptr
= ptr
+ (minimum_page_count
<< page_order
);
455 extra
= (page_count
- minimum_page_count
) << page_order
;
456 if (pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, extra_ptr
, extra
)) {
460 total_allocate
-= extra
;
463 assert(!(((uintptr_t)ptr
+ pre_header_len
) & (alignment
- 1)));
464 assert(total_allocate
== len
+ pre_header_len
);
470 ptr
+= pre_header_len
;
476 struct rseq_percpu_pool_range
*rseq_percpu_pool_range_create(struct rseq_percpu_pool
*pool
)
478 struct rseq_percpu_pool_range
*range
;
479 unsigned long page_size
;
482 page_size
= rseq_get_page_len();
484 base
= aligned_mmap_anonymous(pool
, page_size
,
485 pool
->percpu_stride
* pool
->max_nr_cpus
,
487 (void **) &range
, page_size
);
492 if (pool
->attr
.robust_set
) {
493 if (create_alloc_bitmap(pool
, range
))
499 (void) rseq_percpu_pool_range_destroy(pool
, range
);
503 /* Always inline for __builtin_return_address(0). */
504 static inline __attribute__((always_inline
))
505 int __rseq_percpu_pool_destroy(struct rseq_percpu_pool
*pool
)
507 struct rseq_percpu_pool_range
*range
, *next_range
;
515 check_free_list(pool
);
516 /* Iteration safe against removal. */
517 for (range
= pool
->ranges
; range
&& (next_range
= range
->next
, 1); range
= next_range
) {
518 if (rseq_percpu_pool_range_destroy(pool
, range
))
520 /* Update list head to keep list coherent in case of partial failure. */
521 pool
->ranges
= next_range
;
523 pthread_mutex_destroy(&pool
->lock
);
525 memset(pool
, 0, sizeof(*pool
));
530 int rseq_percpu_pool_destroy(struct rseq_percpu_pool
*pool
)
534 pthread_mutex_lock(&pool_lock
);
535 ret
= __rseq_percpu_pool_destroy(pool
);
536 pthread_mutex_unlock(&pool_lock
);
540 struct rseq_percpu_pool
*rseq_percpu_pool_create(const char *pool_name
,
541 size_t item_len
, size_t percpu_stride
, int max_nr_cpus
,
542 const struct rseq_pool_attr
*_attr
)
544 struct rseq_percpu_pool
*pool
;
545 struct rseq_pool_attr attr
= {};
549 /* Make sure each item is large enough to contain free list pointers. */
550 if (item_len
< sizeof(void *))
551 item_len
= sizeof(void *);
553 /* Align item_len on next power of two. */
554 order
= rseq_get_count_order_ulong(item_len
);
559 item_len
= 1UL << order
;
562 percpu_stride
= RSEQ_PERCPU_STRIDE
; /* Use default */
564 if (max_nr_cpus
< 0 || item_len
> percpu_stride
||
565 percpu_stride
> (UINTPTR_MAX
>> POOL_INDEX_BITS
) ||
566 percpu_stride
< (size_t) rseq_get_page_len() ||
567 !is_pow2(percpu_stride
)) {
573 memcpy(&attr
, _attr
, sizeof(attr
));
574 if (!attr
.mmap_set
) {
575 attr
.mmap_func
= default_mmap_func
;
576 attr
.munmap_func
= default_munmap_func
;
577 attr
.mmap_priv
= NULL
;
580 pthread_mutex_lock(&pool_lock
);
581 /* Linear scan in array of pools to find empty spot. */
582 for (i
= FIRST_POOL
; i
< MAX_NR_POOLS
; i
++) {
583 pool
= &rseq_percpu_pool
[i
];
592 memcpy(&pool
->attr
, &attr
, sizeof(attr
));
593 pthread_mutex_init(&pool
->lock
, NULL
);
594 pool
->percpu_stride
= percpu_stride
;
595 pool
->max_nr_cpus
= max_nr_cpus
;
597 pool
->item_len
= item_len
;
598 pool
->item_order
= order
;
600 //TODO: implement multi-range support.
601 pool
->ranges
= rseq_percpu_pool_range_create(pool
);
606 pool
->name
= strdup(pool_name
);
611 pthread_mutex_unlock(&pool_lock
);
615 __rseq_percpu_pool_destroy(pool
);
616 pthread_mutex_unlock(&pool_lock
);
621 /* Always inline for __builtin_return_address(0). */
622 static inline __attribute__((always_inline
))
623 void set_alloc_slot(struct rseq_percpu_pool
*pool
, size_t item_offset
)
625 unsigned long *bitmap
= pool
->ranges
->alloc_bitmap
;
626 size_t item_index
= item_offset
>> pool
->item_order
;
633 k
= item_index
/ BIT_PER_ULONG
;
634 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
636 /* Print error if bit is already set. */
637 if (bitmap
[k
] & mask
) {
638 fprintf(stderr
, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
639 __func__
, get_pool_name(pool
), pool
, item_offset
, (void *) __builtin_return_address(0));
646 void __rseq_percpu
*__rseq_percpu_malloc(struct rseq_percpu_pool
*pool
, bool zeroed
)
648 struct free_list_node
*node
;
649 uintptr_t item_offset
;
650 void __rseq_percpu
*addr
;
652 pthread_mutex_lock(&pool
->lock
);
653 /* Get first entry from free list. */
654 node
= pool
->free_list_head
;
656 /* Remove node from free list (update head). */
657 pool
->free_list_head
= node
->next
;
658 item_offset
= (uintptr_t) ((void *) node
- pool
->ranges
->base
);
659 addr
= (void *) (((uintptr_t) pool
->index
<< POOL_INDEX_SHIFT
) | item_offset
);
662 if (pool
->ranges
->next_unused
+ pool
->item_len
> pool
->percpu_stride
) {
667 item_offset
= pool
->ranges
->next_unused
;
668 addr
= (void *) (((uintptr_t) pool
->index
<< POOL_INDEX_SHIFT
) | item_offset
);
669 pool
->ranges
->next_unused
+= pool
->item_len
;
672 set_alloc_slot(pool
, item_offset
);
673 pthread_mutex_unlock(&pool
->lock
);
675 rseq_percpu_zero_item(pool
, item_offset
);
679 void __rseq_percpu
*rseq_percpu_malloc(struct rseq_percpu_pool
*pool
)
681 return __rseq_percpu_malloc(pool
, false);
684 void __rseq_percpu
*rseq_percpu_zmalloc(struct rseq_percpu_pool
*pool
)
686 return __rseq_percpu_malloc(pool
, true);
689 /* Always inline for __builtin_return_address(0). */
690 static inline __attribute__((always_inline
))
691 void clear_alloc_slot(struct rseq_percpu_pool
*pool
, size_t item_offset
)
693 unsigned long *bitmap
= pool
->ranges
->alloc_bitmap
;
694 size_t item_index
= item_offset
>> pool
->item_order
;
701 k
= item_index
/ BIT_PER_ULONG
;
702 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
704 /* Print error if bit is not set. */
705 if (!(bitmap
[k
] & mask
)) {
706 fprintf(stderr
, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
707 __func__
, get_pool_name(pool
), pool
, item_offset
,
708 (void *) __builtin_return_address(0));
714 void __rseq_percpu_free(void __rseq_percpu
*_ptr
, size_t percpu_stride
)
716 uintptr_t ptr
= (uintptr_t) _ptr
;
717 uintptr_t item_offset
= ptr
& MAX_POOL_LEN_MASK
;
718 uintptr_t pool_index
= ptr
>> POOL_INDEX_SHIFT
;
719 struct rseq_percpu_pool
*pool
= &rseq_percpu_pool
[pool_index
];
720 struct free_list_node
*head
, *item
;
722 pthread_mutex_lock(&pool
->lock
);
723 clear_alloc_slot(pool
, item_offset
);
724 /* Add ptr to head of free list */
725 head
= pool
->free_list_head
;
726 /* Free-list is in CPU 0 range. */
727 item
= (struct free_list_node
*)__rseq_pool_percpu_ptr(pool
, 0, item_offset
, percpu_stride
);
729 pool
->free_list_head
= item
;
730 pthread_mutex_unlock(&pool
->lock
);
733 struct rseq_percpu_pool_set
*rseq_percpu_pool_set_create(void)
735 struct rseq_percpu_pool_set
*pool_set
;
737 pool_set
= calloc(1, sizeof(struct rseq_percpu_pool_set
));
740 pthread_mutex_init(&pool_set
->lock
, NULL
);
744 int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set
*pool_set
)
748 for (order
= POOL_SET_MIN_ENTRY
; order
< POOL_SET_NR_ENTRIES
; order
++) {
749 struct rseq_percpu_pool
*pool
= pool_set
->entries
[order
];
753 ret
= rseq_percpu_pool_destroy(pool
);
756 pool_set
->entries
[order
] = NULL
;
758 pthread_mutex_destroy(&pool_set
->lock
);
763 /* Ownership of pool is handed over to pool set on success. */
764 int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set
*pool_set
, struct rseq_percpu_pool
*pool
)
766 size_t item_order
= pool
->item_order
;
769 pthread_mutex_lock(&pool_set
->lock
);
770 if (pool_set
->entries
[item_order
]) {
775 pool_set
->entries
[pool
->item_order
] = pool
;
777 pthread_mutex_unlock(&pool_set
->lock
);
782 void __rseq_percpu
*__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set
*pool_set
, size_t len
, bool zeroed
)
784 int order
, min_order
= POOL_SET_MIN_ENTRY
;
785 struct rseq_percpu_pool
*pool
;
786 void __rseq_percpu
*addr
;
788 order
= rseq_get_count_order_ulong(len
);
789 if (order
> POOL_SET_MIN_ENTRY
)
792 pthread_mutex_lock(&pool_set
->lock
);
793 /* First smallest present pool where @len fits. */
794 for (order
= min_order
; order
< POOL_SET_NR_ENTRIES
; order
++) {
795 pool
= pool_set
->entries
[order
];
799 if (pool
->item_len
>= len
)
804 pthread_mutex_unlock(&pool_set
->lock
);
806 addr
= __rseq_percpu_malloc(pool
, zeroed
);
807 if (addr
== NULL
&& errno
== ENOMEM
) {
809 * If the allocation failed, try again with a
812 min_order
= order
+ 1;
823 void __rseq_percpu
*rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set
*pool_set
, size_t len
)
825 return __rseq_percpu_pool_set_malloc(pool_set
, len
, false);
828 void __rseq_percpu
*rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set
*pool_set
, size_t len
)
830 return __rseq_percpu_pool_set_malloc(pool_set
, len
, true);
833 struct rseq_pool_attr
*rseq_pool_attr_create(void)
835 return calloc(1, sizeof(struct rseq_pool_attr
));
838 void rseq_pool_attr_destroy(struct rseq_pool_attr
*attr
)
843 int rseq_pool_attr_set_mmap(struct rseq_pool_attr
*attr
,
844 void *(*mmap_func
)(void *priv
, size_t len
),
845 int (*munmap_func
)(void *priv
, void *ptr
, size_t len
),
852 attr
->mmap_set
= true;
853 attr
->mmap_func
= mmap_func
;
854 attr
->munmap_func
= munmap_func
;
855 attr
->mmap_priv
= mmap_priv
;
859 int rseq_pool_attr_set_robust(struct rseq_pool_attr
*attr
)
865 attr
->robust_set
= true;