1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 #include <rseq/mempool.h>
11 #include <rseq/compiler.h>
23 #include "rseq-utils.h"
24 #include <rseq/rseq.h>
27 * rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
29 * The rseq per-CPU memory allocator allows the application the request
30 * memory pools of CPU-Local memory each of containing objects of a
31 * given size (rounded to next power of 2), reserving a given virtual
32 * address size per CPU, for a given maximum number of CPUs.
34 * The per-CPU memory allocator is analogous to TLS (Thread-Local
35 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
36 * memory allocator provides CPU-Local Storage.
39 #define POOL_SET_NR_ENTRIES RSEQ_BITS_PER_LONG
42 * Smallest allocation should hold enough space for a free list pointer.
44 #if RSEQ_BITS_PER_LONG == 64
45 # define POOL_SET_MIN_ENTRY 3 /* Smallest item_len=8 */
47 # define POOL_SET_MIN_ENTRY 2 /* Smallest item_len=4 */
50 #define BIT_PER_ULONG (8 * sizeof(unsigned long))
52 #define MOVE_PAGES_BATCH_SIZE 4096
54 #define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
56 #if RSEQ_BITS_PER_LONG == 64
57 # define DEFAULT_POISON_VALUE 0x5555555555555555ULL
59 # define DEFAULT_POISON_VALUE 0x55555555UL
62 struct free_list_node
;
64 struct free_list_node
{
65 struct free_list_node
*next
;
69 MEMPOOL_TYPE_GLOBAL
= 0, /* Default */
70 MEMPOOL_TYPE_PERCPU
= 1,
73 struct rseq_mempool_attr
{
75 void *(*mmap_func
)(void *priv
, size_t len
);
76 int (*munmap_func
)(void *priv
, void *ptr
, size_t len
);
80 int (*init_func
)(void *priv
, void *addr
, size_t len
, int cpu
);
85 enum mempool_type type
;
89 unsigned long max_nr_ranges
;
94 enum rseq_mempool_populate_policy populate_policy
;
97 struct rseq_mempool_range
;
99 struct rseq_mempool_range
{
100 struct rseq_mempool_range
*next
; /* Linked list of ranges. */
101 struct rseq_mempool
*pool
; /* Backward reference to container pool. */
104 * Memory layout of a mempool range:
105 * - Header page (contains struct rseq_mempool_range at the very end),
106 * - Base of the per-cpu data, starting with CPU 0.
107 * Aliases with free-list for non-robust populate all pool.
110 * - CPU max_nr_cpus - 1
111 * - init values (unpopulated for RSEQ_MEMPOOL_POPULATE_ALL).
112 * Aliases with free-list for non-robust populate none pool.
113 * - free list (for robust pool).
115 * The free list aliases the CPU 0 memory area for non-robust
116 * populate all pools. It aliases with init values for
117 * non-robust populate none pools. It is located immediately
118 * after the init values for robust pools.
123 * The init values contains malloc_init/zmalloc values.
124 * Pointer is NULL for RSEQ_MEMPOOL_POPULATE_ALL.
129 /* Pool range mmap/munmap */
133 /* Track alloc/free. */
134 unsigned long *alloc_bitmap
;
137 struct rseq_mempool
{
138 /* Head of ranges linked-list. */
139 struct rseq_mempool_range
*range_list
;
140 unsigned long nr_ranges
;
146 * The free list chains freed items on the CPU 0 address range.
147 * We should rethink this decision if false sharing between
148 * malloc/free from other CPUs and data accesses from CPU 0
149 * becomes an issue. This is a NULL-terminated singly-linked
152 struct free_list_node
*free_list_head
;
154 /* This lock protects allocation/free within the pool. */
155 pthread_mutex_t lock
;
157 struct rseq_mempool_attr attr
;
162 * Pool set entries are indexed by item_len rounded to the next power of
163 * 2. A pool set can contain NULL pool entries, in which case the next
164 * large enough entry will be used for allocation.
166 struct rseq_mempool_set
{
167 /* This lock protects add vs malloc/zmalloc within the pool set. */
168 pthread_mutex_t lock
;
169 struct rseq_mempool
*entries
[POOL_SET_NR_ENTRIES
];
173 * This memfd is used to implement the user COW behavior for the page
174 * protection scheme. memfd is a sparse virtual file. Its layout (in
175 * offset from beginning of file) matches the process address space
176 * (pointers directly converted to file offsets).
179 pthread_mutex_t lock
;
180 size_t reserved_size
;
181 unsigned int refcount
;
185 static struct rseq_memfd memfd
= {
186 .lock
= PTHREAD_MUTEX_INITIALIZER
,
193 const char *get_pool_name(const struct rseq_mempool
*pool
)
195 return pool
->name
? : "<anonymous>";
199 void *__rseq_pool_range_percpu_ptr(const struct rseq_mempool_range
*range
, int cpu
,
200 uintptr_t item_offset
, size_t stride
)
202 return range
->base
+ (stride
* cpu
) + item_offset
;
206 void *__rseq_pool_range_init_ptr(const struct rseq_mempool_range
*range
,
207 uintptr_t item_offset
)
211 return range
->init
+ item_offset
;
215 void __rseq_percpu
*__rseq_free_list_to_percpu_ptr(const struct rseq_mempool
*pool
,
216 struct free_list_node
*node
)
218 void __rseq_percpu
*p
= (void __rseq_percpu
*) node
;
220 if (pool
->attr
.robust_set
) {
222 p
-= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
223 /* Skip init values */
224 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
)
225 p
-= pool
->attr
.stride
;
228 /* Populate none free list is in init values */
229 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
)
230 p
-= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
236 struct free_list_node
*__rseq_percpu_to_free_list_ptr(const struct rseq_mempool
*pool
,
237 void __rseq_percpu
*p
)
239 if (pool
->attr
.robust_set
) {
241 p
+= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
242 /* Skip init values */
243 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
)
244 p
+= pool
->attr
.stride
;
247 /* Populate none free list is in init values */
248 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
)
249 p
+= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
251 return (struct free_list_node
*) p
;
255 off_t
ptr_to_off_t(void *p
)
257 return (off_t
) (uintptr_t) p
;
261 int memcmpbyte(const char *s
, int c
, size_t n
)
266 if ((res
= *(s
++) - c
) != 0)
272 void rseq_percpu_zero_item(struct rseq_mempool
*pool
,
273 struct rseq_mempool_range
*range
, uintptr_t item_offset
)
278 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
280 memset(init_p
, 0, pool
->item_len
);
281 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
282 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
283 item_offset
, pool
->attr
.stride
);
286 * If item is already zeroed, either because the
287 * init range update has propagated or because the
288 * content is already zeroed (e.g. zero page), don't
289 * write to the page. This eliminates useless COW over
290 * the zero page just for overwriting it with zeroes.
292 * This means zmalloc() in populate all policy pool do
293 * not trigger COW for CPUs which are not actively
294 * writing to the pool. This is however not the case for
295 * malloc_init() in populate-all pools if it populates
298 if (!memcmpbyte(p
, 0, pool
->item_len
))
300 memset(p
, 0, pool
->item_len
);
305 void rseq_percpu_init_item(struct rseq_mempool
*pool
,
306 struct rseq_mempool_range
*range
, uintptr_t item_offset
,
307 void *init_ptr
, size_t init_len
)
312 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
314 memcpy(init_p
, init_ptr
, init_len
);
315 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
316 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
317 item_offset
, pool
->attr
.stride
);
320 * If the update propagated through a shared mapping,
321 * or the item already has the correct content, skip
322 * writing it into the cpu item to eliminate useless
325 if (!memcmp(init_ptr
, p
, init_len
))
327 memcpy(p
, init_ptr
, init_len
);
332 void rseq_poison_item(void *p
, size_t item_len
, uintptr_t poison
)
336 for (offset
= 0; offset
< item_len
; offset
+= sizeof(uintptr_t))
337 *((uintptr_t *) (p
+ offset
)) = poison
;
341 intptr_t rseq_cmp_poison_item(void *p
, size_t item_len
, uintptr_t poison
, intptr_t *unexpected_value
)
346 for (offset
= 0; offset
< item_len
; offset
+= sizeof(uintptr_t)) {
347 intptr_t v
= *((intptr_t *) (p
+ offset
));
349 if ((res
= v
- (intptr_t) poison
) != 0) {
350 if (unexpected_value
)
351 *unexpected_value
= v
;
359 void rseq_percpu_poison_item(struct rseq_mempool
*pool
,
360 struct rseq_mempool_range
*range
, uintptr_t item_offset
)
362 uintptr_t poison
= pool
->attr
.poison
;
366 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
368 rseq_poison_item(init_p
, pool
->item_len
, poison
);
369 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
370 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
371 item_offset
, pool
->attr
.stride
);
374 * If the update propagated through a shared mapping,
375 * or the item already has the correct content, skip
376 * writing it into the cpu item to eliminate useless
379 * It is recommended to use zero as poison value for
380 * populate-all pools to eliminate COW due to writing
381 * poison to unused CPU memory.
383 if (rseq_cmp_poison_item(p
, pool
->item_len
, poison
, NULL
) == 0)
385 rseq_poison_item(p
, pool
->item_len
, poison
);
389 /* Always inline for __builtin_return_address(0). */
390 static inline __attribute__((always_inline
))
391 void rseq_check_poison_item(const struct rseq_mempool
*pool
, uintptr_t item_offset
,
392 void *p
, size_t item_len
, uintptr_t poison
)
394 intptr_t unexpected_value
;
396 if (rseq_cmp_poison_item(p
, item_len
, poison
, &unexpected_value
) == 0)
399 fprintf(stderr
, "%s: Poison corruption detected (0x%lx) for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
400 __func__
, (unsigned long) unexpected_value
, get_pool_name(pool
), pool
, item_offset
, (void *) __builtin_return_address(0));
404 /* Always inline for __builtin_return_address(0). */
405 static inline __attribute__((always_inline
))
406 void rseq_percpu_check_poison_item(const struct rseq_mempool
*pool
,
407 const struct rseq_mempool_range
*range
, uintptr_t item_offset
)
409 uintptr_t poison
= pool
->attr
.poison
;
413 if (!pool
->attr
.robust_set
)
415 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
417 rseq_check_poison_item(pool
, item_offset
, init_p
, pool
->item_len
, poison
);
418 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
419 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
420 item_offset
, pool
->attr
.stride
);
421 rseq_check_poison_item(pool
, item_offset
, p
, pool
->item_len
, poison
);
426 int rseq_mempool_range_init_numa(void *addr
, size_t len
, int cpu
, int numa_flags
)
428 unsigned long nr_pages
, page_len
;
429 int status
[MOVE_PAGES_BATCH_SIZE
];
430 int nodes
[MOVE_PAGES_BATCH_SIZE
];
431 void *pages
[MOVE_PAGES_BATCH_SIZE
];
438 page_len
= rseq_get_page_len();
439 nr_pages
= len
>> rseq_get_count_order_ulong(page_len
);
441 nodes
[0] = numa_node_of_cpu(cpu
);
445 for (size_t k
= 1; k
< RSEQ_ARRAY_SIZE(nodes
); ++k
) {
449 for (unsigned long page
= 0; page
< nr_pages
;) {
451 size_t max_k
= RSEQ_ARRAY_SIZE(pages
);
452 size_t left
= nr_pages
- page
;
458 for (size_t k
= 0; k
< max_k
; ++k
, ++page
) {
459 pages
[k
] = addr
+ (page
* page_len
);
463 ret
= move_pages(0, max_k
, pages
, nodes
, status
, numa_flags
);
469 fprintf(stderr
, "%lu pages were not migrated\n", ret
);
470 for (size_t k
= 0; k
< max_k
; ++k
) {
473 "Error while moving page %p to numa node %d: %u\n",
474 pages
[k
], nodes
[k
], -status
[k
]);
481 int rseq_mempool_range_init_numa(void *addr
__attribute__((unused
)),
482 size_t len
__attribute__((unused
)),
483 int cpu
__attribute__((unused
)),
484 int numa_flags
__attribute__((unused
)))
492 void *default_mmap_func(void *priv
__attribute__((unused
)), size_t len
)
496 base
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
497 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
498 if (base
== MAP_FAILED
)
504 int default_munmap_func(void *priv
__attribute__((unused
)), void *ptr
, size_t len
)
506 return munmap(ptr
, len
);
510 int create_alloc_bitmap(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
)
514 count
= ((pool
->attr
.stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
517 * Not being able to create the validation bitmap is an error
518 * that needs to be reported.
520 range
->alloc_bitmap
= calloc(count
, sizeof(unsigned long));
521 if (!range
->alloc_bitmap
)
527 bool percpu_addr_in_pool(const struct rseq_mempool
*pool
, void __rseq_percpu
*_addr
)
529 struct rseq_mempool_range
*range
;
530 void *addr
= (void *) _addr
;
532 for (range
= pool
->range_list
; range
; range
= range
->next
) {
533 if (addr
>= range
->base
&& addr
< range
->base
+ range
->next_unused
)
539 /* Always inline for __builtin_return_address(0). */
540 static inline __attribute__((always_inline
))
541 void check_free_list(const struct rseq_mempool
*pool
)
543 size_t total_item
= 0, total_never_allocated
= 0, total_freed
= 0,
544 max_list_traversal
= 0, traversal_iteration
= 0;
545 struct rseq_mempool_range
*range
;
547 if (!pool
->attr
.robust_set
)
550 for (range
= pool
->range_list
; range
; range
= range
->next
) {
551 total_item
+= pool
->attr
.stride
>> pool
->item_order
;
552 total_never_allocated
+= (pool
->attr
.stride
- range
->next_unused
) >> pool
->item_order
;
554 max_list_traversal
= total_item
- total_never_allocated
;
556 for (struct free_list_node
*node
= pool
->free_list_head
, *prev
= NULL
;
561 if (traversal_iteration
>= max_list_traversal
) {
562 fprintf(stderr
, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
563 __func__
, get_pool_name(pool
), pool
, __builtin_return_address(0));
567 /* Node is out of range. */
568 if (!percpu_addr_in_pool(pool
, __rseq_free_list_to_percpu_ptr(pool
, node
))) {
570 fprintf(stderr
, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
571 __func__
, prev
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
573 fprintf(stderr
, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
574 __func__
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
578 traversal_iteration
++;
582 if (total_never_allocated
+ total_freed
!= total_item
) {
583 fprintf(stderr
, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
584 __func__
, get_pool_name(pool
), pool
, total_item
, total_never_allocated
, total_freed
, __builtin_return_address(0));
589 /* Always inline for __builtin_return_address(0). */
590 static inline __attribute__((always_inline
))
591 void check_range_poison(const struct rseq_mempool
*pool
,
592 const struct rseq_mempool_range
*range
)
596 for (item_offset
= 0; item_offset
< range
->next_unused
;
597 item_offset
+= pool
->item_len
)
598 rseq_percpu_check_poison_item(pool
, range
, item_offset
);
601 /* Always inline for __builtin_return_address(0). */
602 static inline __attribute__((always_inline
))
603 void check_pool_poison(const struct rseq_mempool
*pool
)
605 struct rseq_mempool_range
*range
;
607 if (!pool
->attr
.robust_set
)
609 for (range
= pool
->range_list
; range
; range
= range
->next
)
610 check_range_poison(pool
, range
);
613 /* Always inline for __builtin_return_address(0). */
614 static inline __attribute__((always_inline
))
615 void destroy_alloc_bitmap(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
)
617 unsigned long *bitmap
= range
->alloc_bitmap
;
618 size_t count
, total_leaks
= 0;
623 count
= ((pool
->attr
.stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
625 /* Assert that all items in the pool were freed. */
626 for (size_t k
= 0; k
< count
; ++k
)
627 total_leaks
+= rseq_hweight_ulong(bitmap
[k
]);
629 fprintf(stderr
, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
630 __func__
, get_pool_name(pool
), pool
, total_leaks
, (void *) __builtin_return_address(0));
635 range
->alloc_bitmap
= NULL
;
638 /* Always inline for __builtin_return_address(0). */
639 static inline __attribute__((always_inline
))
640 int rseq_mempool_range_destroy(struct rseq_mempool
*pool
,
641 struct rseq_mempool_range
*range
)
645 destroy_alloc_bitmap(pool
, range
);
648 * Punch a hole into memfd where the init values used to be.
651 ret
= fallocate(memfd
.fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
652 ptr_to_off_t(range
->init
), pool
->attr
.stride
);
658 /* range is a header located one page before the aligned mapping. */
659 return pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, range
->mmap_addr
, range
->mmap_len
);
663 * Allocate a memory mapping aligned on @alignment, with an optional
664 * @pre_header before the mapping.
667 void *aligned_mmap_anonymous(struct rseq_mempool
*pool
,
668 size_t page_size
, size_t len
, size_t alignment
,
669 void **pre_header
, size_t pre_header_len
)
671 size_t minimum_page_count
, page_count
, extra
, total_allocate
= 0;
675 if (len
< page_size
|| alignment
< page_size
||
676 !is_pow2(alignment
) || (len
& (alignment
- 1))) {
680 page_order
= rseq_get_count_order_ulong(page_size
);
681 if (page_order
< 0) {
685 if (pre_header_len
&& (pre_header_len
& (page_size
- 1))) {
690 minimum_page_count
= (pre_header_len
+ len
) >> page_order
;
691 page_count
= (pre_header_len
+ len
+ alignment
- page_size
) >> page_order
;
693 assert(page_count
>= minimum_page_count
);
695 ptr
= pool
->attr
.mmap_func(pool
->attr
.mmap_priv
, page_count
<< page_order
);
699 total_allocate
= page_count
<< page_order
;
701 if (!(((uintptr_t) ptr
+ pre_header_len
) & (alignment
- 1))) {
702 /* Pointer is already aligned. ptr points to pre_header. */
706 /* Unmap extra before. */
707 extra
= offset_align((uintptr_t) ptr
+ pre_header_len
, alignment
);
708 assert(!(extra
& (page_size
- 1)));
709 if (pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, ptr
, extra
)) {
713 total_allocate
-= extra
;
714 ptr
+= extra
; /* ptr points to pre_header */
715 page_count
-= extra
>> page_order
;
717 assert(page_count
>= minimum_page_count
);
719 if (page_count
> minimum_page_count
) {
722 /* Unmap extra after. */
723 extra_ptr
= ptr
+ (minimum_page_count
<< page_order
);
724 extra
= (page_count
- minimum_page_count
) << page_order
;
725 if (pool
->attr
.munmap_func(pool
->attr
.mmap_priv
, extra_ptr
, extra
)) {
729 total_allocate
-= extra
;
732 assert(!(((uintptr_t)ptr
+ pre_header_len
) & (alignment
- 1)));
733 assert(total_allocate
== len
+ pre_header_len
);
739 ptr
+= pre_header_len
;
745 int rseq_memfd_reserve_init(void *init
, size_t init_len
)
750 pthread_mutex_lock(&memfd
.lock
);
751 reserve_len
= (size_t) ptr_to_off_t(init
) + init_len
;
752 if (reserve_len
> memfd
.reserved_size
) {
753 if (ftruncate(memfd
.fd
, (off_t
) reserve_len
)) {
757 memfd
.reserved_size
= reserve_len
;
760 pthread_mutex_unlock(&memfd
.lock
);
765 struct rseq_mempool_range
*rseq_mempool_range_create(struct rseq_mempool
*pool
)
767 struct rseq_mempool_range
*range
;
768 unsigned long page_size
;
771 size_t range_len
; /* Range len excludes header. */
773 if (pool
->attr
.max_nr_ranges
&&
774 pool
->nr_ranges
>= pool
->attr
.max_nr_ranges
) {
778 page_size
= rseq_get_page_len();
780 range_len
= pool
->attr
.stride
* pool
->attr
.max_nr_cpus
;
781 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
)
782 range_len
+= pool
->attr
.stride
; /* init values */
783 if (pool
->attr
.robust_set
)
784 range_len
+= pool
->attr
.stride
; /* free list */
785 base
= aligned_mmap_anonymous(pool
, page_size
,
791 range
= (struct rseq_mempool_range
*) (base
- RANGE_HEADER_OFFSET
);
793 range
->header
= header
;
795 range
->mmap_addr
= header
;
796 range
->mmap_len
= page_size
+ range_len
;
798 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_ALL
) {
799 range
->init
= base
+ (pool
->attr
.stride
* pool
->attr
.max_nr_cpus
);
800 /* Populate init values pages from memfd */
801 if (rseq_memfd_reserve_init(range
->init
, pool
->attr
.stride
))
803 if (mmap(range
->init
, pool
->attr
.stride
, PROT_READ
| PROT_WRITE
,
804 MAP_SHARED
| MAP_FIXED
, memfd
.fd
,
805 ptr_to_off_t(range
->init
)) != (void *) range
->init
) {
808 assert(pool
->attr
.type
== MEMPOOL_TYPE_PERCPU
);
810 * Map per-cpu memory as private COW mappings of init values.
815 for (cpu
= 0; cpu
< pool
->attr
.max_nr_cpus
; cpu
++) {
816 void *p
= base
+ (pool
->attr
.stride
* cpu
);
817 size_t len
= pool
->attr
.stride
;
819 if (mmap(p
, len
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
820 memfd
.fd
, ptr_to_off_t(range
->init
)) != (void *) p
) {
827 if (pool
->attr
.robust_set
) {
828 if (create_alloc_bitmap(pool
, range
))
831 if (pool
->attr
.init_set
) {
832 switch (pool
->attr
.type
) {
833 case MEMPOOL_TYPE_GLOBAL
:
834 if (pool
->attr
.init_func(pool
->attr
.init_priv
,
835 base
, pool
->attr
.stride
, -1)) {
839 case MEMPOOL_TYPE_PERCPU
:
842 for (cpu
= 0; cpu
< pool
->attr
.max_nr_cpus
; cpu
++) {
843 if (pool
->attr
.init_func(pool
->attr
.init_priv
,
844 base
+ (pool
->attr
.stride
* cpu
),
845 pool
->attr
.stride
, cpu
)) {
859 (void) rseq_mempool_range_destroy(pool
, range
);
864 int rseq_mempool_memfd_ref(struct rseq_mempool
*pool
)
868 if (pool
->attr
.populate_policy
== RSEQ_MEMPOOL_POPULATE_ALL
)
871 pthread_mutex_lock(&memfd
.lock
);
872 if (memfd
.refcount
== 0) {
873 memfd
.fd
= memfd_create("mempool", MFD_CLOEXEC
);
875 perror("memfd_create");
882 pthread_mutex_unlock(&memfd
.lock
);
887 void rseq_mempool_memfd_unref(struct rseq_mempool
*pool
)
889 if (pool
->attr
.populate_policy
== RSEQ_MEMPOOL_POPULATE_ALL
)
892 pthread_mutex_lock(&memfd
.lock
);
893 if (memfd
.refcount
== 1) {
894 if (close(memfd
.fd
)) {
899 memfd
.reserved_size
= 0;
902 pthread_mutex_unlock(&memfd
.lock
);
905 int rseq_mempool_destroy(struct rseq_mempool
*pool
)
907 struct rseq_mempool_range
*range
, *next_range
;
912 check_free_list(pool
);
913 check_pool_poison(pool
);
914 /* Iteration safe against removal. */
915 for (range
= pool
->range_list
; range
&& (next_range
= range
->next
, 1); range
= next_range
) {
916 if (rseq_mempool_range_destroy(pool
, range
))
918 /* Update list head to keep list coherent in case of partial failure. */
919 pool
->range_list
= next_range
;
921 rseq_mempool_memfd_unref(pool
);
922 pthread_mutex_destroy(&pool
->lock
);
929 struct rseq_mempool
*rseq_mempool_create(const char *pool_name
,
930 size_t item_len
, const struct rseq_mempool_attr
*_attr
)
932 struct rseq_mempool
*pool
;
933 struct rseq_mempool_attr attr
= {};
936 /* Make sure each item is large enough to contain free list pointers. */
937 if (item_len
< sizeof(void *))
938 item_len
= sizeof(void *);
940 /* Align item_len on next power of two. */
941 order
= rseq_get_count_order_ulong(item_len
);
946 item_len
= 1UL << order
;
949 memcpy(&attr
, _attr
, sizeof(attr
));
950 if (!attr
.mmap_set
) {
951 attr
.mmap_func
= default_mmap_func
;
952 attr
.munmap_func
= default_munmap_func
;
953 attr
.mmap_priv
= NULL
;
957 case MEMPOOL_TYPE_PERCPU
:
958 if (attr
.max_nr_cpus
< 0) {
962 if (attr
.max_nr_cpus
== 0) {
964 attr
.max_nr_cpus
= rseq_get_max_nr_cpus();
965 if (attr
.max_nr_cpus
== 0) {
971 case MEMPOOL_TYPE_GLOBAL
:
972 /* Override populate policy for global type. */
973 attr
.populate_policy
= RSEQ_MEMPOOL_POPULATE_ALL
;
974 /* Use a 1-cpu pool for global mempool type. */
975 attr
.max_nr_cpus
= 1;
979 attr
.stride
= RSEQ_MEMPOOL_STRIDE
; /* Use default */
980 if (attr
.robust_set
&& !attr
.poison_set
) {
981 attr
.poison_set
= true;
982 attr
.poison
= DEFAULT_POISON_VALUE
;
984 if (item_len
> attr
.stride
|| attr
.stride
< (size_t) rseq_get_page_len() ||
985 !is_pow2(attr
.stride
)) {
990 pool
= calloc(1, sizeof(struct rseq_mempool
));
994 memcpy(&pool
->attr
, &attr
, sizeof(attr
));
995 pthread_mutex_init(&pool
->lock
, NULL
);
996 pool
->item_len
= item_len
;
997 pool
->item_order
= order
;
999 if (rseq_mempool_memfd_ref(pool
))
1002 pool
->range_list
= rseq_mempool_range_create(pool
);
1003 if (!pool
->range_list
)
1007 pool
->name
= strdup(pool_name
);
1014 rseq_mempool_destroy(pool
);
1019 /* Always inline for __builtin_return_address(0). */
1020 static inline __attribute__((always_inline
))
1021 void set_alloc_slot(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
, size_t item_offset
)
1023 unsigned long *bitmap
= range
->alloc_bitmap
;
1024 size_t item_index
= item_offset
>> pool
->item_order
;
1031 k
= item_index
/ BIT_PER_ULONG
;
1032 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
1034 /* Print error if bit is already set. */
1035 if (bitmap
[k
] & mask
) {
1036 fprintf(stderr
, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
1037 __func__
, get_pool_name(pool
), pool
, item_offset
, (void *) __builtin_return_address(0));
1044 void __rseq_percpu
*__rseq_percpu_malloc(struct rseq_mempool
*pool
,
1045 bool zeroed
, void *init_ptr
, size_t init_len
)
1047 struct rseq_mempool_range
*range
;
1048 struct free_list_node
*node
;
1049 uintptr_t item_offset
;
1050 void __rseq_percpu
*addr
;
1052 if (init_len
> pool
->item_len
) {
1056 pthread_mutex_lock(&pool
->lock
);
1057 /* Get first entry from free list. */
1058 node
= pool
->free_list_head
;
1060 void *range_base
, *ptr
;
1062 ptr
= __rseq_free_list_to_percpu_ptr(pool
, node
);
1063 range_base
= (void *) ((uintptr_t) ptr
& (~(pool
->attr
.stride
- 1)));
1064 range
= (struct rseq_mempool_range
*) (range_base
- RANGE_HEADER_OFFSET
);
1065 /* Remove node from free list (update head). */
1066 pool
->free_list_head
= node
->next
;
1067 item_offset
= (uintptr_t) (ptr
- range_base
);
1068 rseq_percpu_check_poison_item(pool
, range
, item_offset
);
1069 addr
= __rseq_free_list_to_percpu_ptr(pool
, node
);
1073 * If the most recent range (first in list) does not have any
1074 * room left, create a new range and prepend it to the list
1077 range
= pool
->range_list
;
1078 if (range
->next_unused
+ pool
->item_len
> pool
->attr
.stride
) {
1079 range
= rseq_mempool_range_create(pool
);
1085 /* Add range to head of list. */
1086 range
->next
= pool
->range_list
;
1087 pool
->range_list
= range
;
1089 /* First range in list has room left. */
1090 item_offset
= range
->next_unused
;
1091 addr
= (void __rseq_percpu
*) (range
->base
+ item_offset
);
1092 range
->next_unused
+= pool
->item_len
;
1095 set_alloc_slot(pool
, range
, item_offset
);
1096 pthread_mutex_unlock(&pool
->lock
);
1099 rseq_percpu_zero_item(pool
, range
, item_offset
);
1100 else if (init_ptr
) {
1101 rseq_percpu_init_item(pool
, range
, item_offset
,
1102 init_ptr
, init_len
);
1108 void __rseq_percpu
*rseq_mempool_percpu_malloc(struct rseq_mempool
*pool
)
1110 return __rseq_percpu_malloc(pool
, false, NULL
, 0);
1113 void __rseq_percpu
*rseq_mempool_percpu_zmalloc(struct rseq_mempool
*pool
)
1115 return __rseq_percpu_malloc(pool
, true, NULL
, 0);
1118 void __rseq_percpu
*rseq_mempool_percpu_malloc_init(struct rseq_mempool
*pool
,
1119 void *init_ptr
, size_t len
)
1121 return __rseq_percpu_malloc(pool
, false, init_ptr
, len
);
1124 /* Always inline for __builtin_return_address(0). */
1125 static inline __attribute__((always_inline
))
1126 void clear_alloc_slot(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
, size_t item_offset
)
1128 unsigned long *bitmap
= range
->alloc_bitmap
;
1129 size_t item_index
= item_offset
>> pool
->item_order
;
1136 k
= item_index
/ BIT_PER_ULONG
;
1137 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
1139 /* Print error if bit is not set. */
1140 if (!(bitmap
[k
] & mask
)) {
1141 fprintf(stderr
, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
1142 __func__
, get_pool_name(pool
), pool
, item_offset
,
1143 (void *) __builtin_return_address(0));
1149 void librseq_mempool_percpu_free(void __rseq_percpu
*_ptr
, size_t stride
)
1151 uintptr_t ptr
= (uintptr_t) _ptr
;
1152 void *range_base
= (void *) (ptr
& (~(stride
- 1)));
1153 struct rseq_mempool_range
*range
= (struct rseq_mempool_range
*) (range_base
- RANGE_HEADER_OFFSET
);
1154 struct rseq_mempool
*pool
= range
->pool
;
1155 uintptr_t item_offset
= ptr
& (stride
- 1);
1156 struct free_list_node
*head
, *item
;
1158 pthread_mutex_lock(&pool
->lock
);
1159 clear_alloc_slot(pool
, range
, item_offset
);
1160 /* Add ptr to head of free list */
1161 head
= pool
->free_list_head
;
1162 if (pool
->attr
.poison_set
)
1163 rseq_percpu_poison_item(pool
, range
, item_offset
);
1164 item
= __rseq_percpu_to_free_list_ptr(pool
, _ptr
);
1166 * Setting the next pointer will overwrite the first uintptr_t
1167 * poison for either CPU 0 (populate all) or init data (populate
1171 pool
->free_list_head
= item
;
1172 pthread_mutex_unlock(&pool
->lock
);
1175 struct rseq_mempool_set
*rseq_mempool_set_create(void)
1177 struct rseq_mempool_set
*pool_set
;
1179 pool_set
= calloc(1, sizeof(struct rseq_mempool_set
));
1182 pthread_mutex_init(&pool_set
->lock
, NULL
);
1186 int rseq_mempool_set_destroy(struct rseq_mempool_set
*pool_set
)
1190 for (order
= POOL_SET_MIN_ENTRY
; order
< POOL_SET_NR_ENTRIES
; order
++) {
1191 struct rseq_mempool
*pool
= pool_set
->entries
[order
];
1195 ret
= rseq_mempool_destroy(pool
);
1198 pool_set
->entries
[order
] = NULL
;
1200 pthread_mutex_destroy(&pool_set
->lock
);
1205 /* Ownership of pool is handed over to pool set on success. */
1206 int rseq_mempool_set_add_pool(struct rseq_mempool_set
*pool_set
, struct rseq_mempool
*pool
)
1208 size_t item_order
= pool
->item_order
;
1211 pthread_mutex_lock(&pool_set
->lock
);
1212 if (pool_set
->entries
[item_order
]) {
1217 pool_set
->entries
[pool
->item_order
] = pool
;
1219 pthread_mutex_unlock(&pool_set
->lock
);
1224 void __rseq_percpu
*__rseq_mempool_set_malloc(struct rseq_mempool_set
*pool_set
,
1225 void *init_ptr
, size_t len
, bool zeroed
)
1227 int order
, min_order
= POOL_SET_MIN_ENTRY
;
1228 struct rseq_mempool
*pool
;
1229 void __rseq_percpu
*addr
;
1231 order
= rseq_get_count_order_ulong(len
);
1232 if (order
> POOL_SET_MIN_ENTRY
)
1235 pthread_mutex_lock(&pool_set
->lock
);
1236 /* First smallest present pool where @len fits. */
1237 for (order
= min_order
; order
< POOL_SET_NR_ENTRIES
; order
++) {
1238 pool
= pool_set
->entries
[order
];
1242 if (pool
->item_len
>= len
)
1247 pthread_mutex_unlock(&pool_set
->lock
);
1249 addr
= __rseq_percpu_malloc(pool
, zeroed
, init_ptr
, len
);
1250 if (addr
== NULL
&& errno
== ENOMEM
) {
1252 * If the allocation failed, try again with a
1255 min_order
= order
+ 1;
1266 void __rseq_percpu
*rseq_mempool_set_percpu_malloc(struct rseq_mempool_set
*pool_set
, size_t len
)
1268 return __rseq_mempool_set_malloc(pool_set
, NULL
, len
, false);
1271 void __rseq_percpu
*rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set
*pool_set
, size_t len
)
1273 return __rseq_mempool_set_malloc(pool_set
, NULL
, len
, true);
1276 void __rseq_percpu
*rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set
*pool_set
,
1277 void *init_ptr
, size_t len
)
1279 return __rseq_mempool_set_malloc(pool_set
, init_ptr
, len
, true);
1282 struct rseq_mempool_attr
*rseq_mempool_attr_create(void)
1284 return calloc(1, sizeof(struct rseq_mempool_attr
));
1287 void rseq_mempool_attr_destroy(struct rseq_mempool_attr
*attr
)
1292 int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr
*attr
,
1293 void *(*mmap_func
)(void *priv
, size_t len
),
1294 int (*munmap_func
)(void *priv
, void *ptr
, size_t len
),
1301 attr
->mmap_set
= true;
1302 attr
->mmap_func
= mmap_func
;
1303 attr
->munmap_func
= munmap_func
;
1304 attr
->mmap_priv
= mmap_priv
;
1308 int rseq_mempool_attr_set_init(struct rseq_mempool_attr
*attr
,
1309 int (*init_func
)(void *priv
, void *addr
, size_t len
, int cpu
),
1316 attr
->init_set
= true;
1317 attr
->init_func
= init_func
;
1318 attr
->init_priv
= init_priv
;
1322 int rseq_mempool_attr_set_robust(struct rseq_mempool_attr
*attr
)
1328 attr
->robust_set
= true;
1332 int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr
*attr
,
1333 size_t stride
, int max_nr_cpus
)
1339 attr
->type
= MEMPOOL_TYPE_PERCPU
;
1340 attr
->stride
= stride
;
1341 attr
->max_nr_cpus
= max_nr_cpus
;
1345 int rseq_mempool_attr_set_global(struct rseq_mempool_attr
*attr
,
1352 attr
->type
= MEMPOOL_TYPE_GLOBAL
;
1353 attr
->stride
= stride
;
1354 attr
->max_nr_cpus
= 0;
1358 int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr
*attr
,
1359 unsigned long max_nr_ranges
)
1365 attr
->max_nr_ranges
= max_nr_ranges
;
1369 int rseq_mempool_attr_set_poison(struct rseq_mempool_attr
*attr
,
1376 attr
->poison_set
= true;
1377 attr
->poison
= poison
;
1381 int rseq_mempool_attr_set_populate_policy(struct rseq_mempool_attr
*attr
,
1382 enum rseq_mempool_populate_policy policy
)
1388 attr
->populate_policy
= policy
;
1392 int rseq_mempool_get_max_nr_cpus(struct rseq_mempool
*mempool
)
1394 if (!mempool
|| mempool
->attr
.type
!= MEMPOOL_TYPE_PERCPU
) {
1398 return mempool
->attr
.max_nr_cpus
;