1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 #include <rseq/mempool.h>
11 #include <rseq/compiler.h>
23 #include "rseq-utils.h"
24 #include <rseq/rseq.h>
27 * rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
29 * The rseq per-CPU memory allocator allows the application the request
30 * memory pools of CPU-Local memory each of containing objects of a
31 * given size (rounded to next power of 2), reserving a given virtual
32 * address size per CPU, for a given maximum number of CPUs.
34 * The per-CPU memory allocator is analogous to TLS (Thread-Local
35 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
36 * memory allocator provides CPU-Local Storage.
39 #define POOL_SET_NR_ENTRIES RSEQ_BITS_PER_LONG
42 * Smallest allocation should hold enough space for a free list pointer.
44 #if RSEQ_BITS_PER_LONG == 64
45 # define POOL_SET_MIN_ENTRY 3 /* Smallest item_len=8 */
47 # define POOL_SET_MIN_ENTRY 2 /* Smallest item_len=4 */
50 #define BIT_PER_ULONG (8 * sizeof(unsigned long))
52 #define MOVE_PAGES_BATCH_SIZE 4096
54 #define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
56 #if RSEQ_BITS_PER_LONG == 64
57 # define DEFAULT_PRIVATE_POISON_VALUE 0x5555555555555555ULL
59 # define DEFAULT_PRIVATE_POISON_VALUE 0x55555555UL
62 struct free_list_node
;
64 struct free_list_node
{
65 struct free_list_node
*next
;
69 MEMPOOL_TYPE_GLOBAL
= 0, /* Default */
70 MEMPOOL_TYPE_PERCPU
= 1,
73 struct rseq_mempool_attr
{
75 int (*init_func
)(void *priv
, void *addr
, size_t len
, int cpu
);
80 enum mempool_type type
;
84 unsigned long max_nr_ranges
;
89 enum rseq_mempool_populate_policy populate_policy
;
92 struct rseq_mempool_range
;
94 struct rseq_mempool_range
{
95 struct rseq_mempool_range
*next
; /* Linked list of ranges. */
96 struct rseq_mempool
*pool
; /* Backward reference to container pool. */
99 * Memory layout of a mempool range:
100 * - Header page (contains struct rseq_mempool_range at the very end),
101 * - Base of the per-cpu data, starting with CPU 0.
102 * Aliases with free-list for non-robust populate all pool.
105 * - CPU max_nr_cpus - 1
106 * - init values (unpopulated for RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL).
107 * Aliases with free-list for non-robust populate none pool.
108 * - free list (for robust pool).
110 * The free list aliases the CPU 0 memory area for non-robust
111 * populate all pools. It aliases with init values for
112 * non-robust populate none pools. It is located immediately
113 * after the init values for robust pools.
118 * The init values contains malloc_init/zmalloc values.
119 * Pointer is NULL for RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL.
124 /* Pool range mmap/munmap */
128 /* Track alloc/free. */
129 unsigned long *alloc_bitmap
;
132 struct rseq_mempool
{
133 /* Head of ranges linked-list. */
134 struct rseq_mempool_range
*range_list
;
135 unsigned long nr_ranges
;
141 * The free list chains freed items on the CPU 0 address range.
142 * We should rethink this decision if false sharing between
143 * malloc/free from other CPUs and data accesses from CPU 0
144 * becomes an issue. This is a NULL-terminated singly-linked
147 struct free_list_node
*free_list_head
;
149 /* This lock protects allocation/free within the pool. */
150 pthread_mutex_t lock
;
152 struct rseq_mempool_attr attr
;
157 * Pool set entries are indexed by item_len rounded to the next power of
158 * 2. A pool set can contain NULL pool entries, in which case the next
159 * large enough entry will be used for allocation.
161 struct rseq_mempool_set
{
162 /* This lock protects add vs malloc/zmalloc within the pool set. */
163 pthread_mutex_t lock
;
164 struct rseq_mempool
*entries
[POOL_SET_NR_ENTRIES
];
168 * This memfd is used to implement the user COW behavior for the page
169 * protection scheme. memfd is a sparse virtual file. Its layout (in
170 * offset from beginning of file) matches the process address space
171 * (pointers directly converted to file offsets).
174 pthread_mutex_t lock
;
175 size_t reserved_size
;
176 unsigned int refcount
;
180 static struct rseq_memfd memfd
= {
181 .lock
= PTHREAD_MUTEX_INITIALIZER
,
188 const char *get_pool_name(const struct rseq_mempool
*pool
)
190 return pool
->name
? : "<anonymous>";
194 void *__rseq_pool_range_percpu_ptr(const struct rseq_mempool_range
*range
, int cpu
,
195 uintptr_t item_offset
, size_t stride
)
197 return range
->base
+ (stride
* cpu
) + item_offset
;
201 void *__rseq_pool_range_init_ptr(const struct rseq_mempool_range
*range
,
202 uintptr_t item_offset
)
206 return range
->init
+ item_offset
;
210 void __rseq_percpu
*__rseq_free_list_to_percpu_ptr(const struct rseq_mempool
*pool
,
211 struct free_list_node
*node
)
213 void __rseq_percpu
*p
= (void __rseq_percpu
*) node
;
215 if (pool
->attr
.robust_set
) {
217 p
-= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
218 /* Skip init values */
219 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
220 p
-= pool
->attr
.stride
;
223 /* Populate none free list is in init values */
224 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
225 p
-= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
231 struct free_list_node
*__rseq_percpu_to_free_list_ptr(const struct rseq_mempool
*pool
,
232 void __rseq_percpu
*p
)
234 if (pool
->attr
.robust_set
) {
236 p
+= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
237 /* Skip init values */
238 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
239 p
+= pool
->attr
.stride
;
242 /* Populate none free list is in init values */
243 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
244 p
+= pool
->attr
.max_nr_cpus
* pool
->attr
.stride
;
246 return (struct free_list_node
*) p
;
250 off_t
ptr_to_off_t(void *p
)
252 return (off_t
) (uintptr_t) p
;
256 intptr_t rseq_cmp_item(void *p
, size_t item_len
, intptr_t cmp_value
, intptr_t *unexpected_value
)
261 for (offset
= 0; offset
< item_len
; offset
+= sizeof(uintptr_t)) {
262 intptr_t v
= *((intptr_t *) (p
+ offset
));
264 if ((res
= v
- cmp_value
) != 0) {
265 if (unexpected_value
)
266 *unexpected_value
= v
;
274 void rseq_percpu_zero_item(struct rseq_mempool
*pool
,
275 struct rseq_mempool_range
*range
, uintptr_t item_offset
)
280 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
282 bzero(init_p
, pool
->item_len
);
283 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
284 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
285 item_offset
, pool
->attr
.stride
);
288 * If item is already zeroed, either because the
289 * init range update has propagated or because the
290 * content is already zeroed (e.g. zero page), don't
291 * write to the page. This eliminates useless COW over
292 * the zero page just for overwriting it with zeroes.
294 * This means zmalloc() in populate all policy pool do
295 * not trigger COW for CPUs which are not actively
296 * writing to the pool. This is however not the case for
297 * malloc_init() in populate-all pools if it populates
300 if (!rseq_cmp_item(p
, pool
->item_len
, 0, NULL
))
302 bzero(p
, pool
->item_len
);
307 void rseq_percpu_init_item(struct rseq_mempool
*pool
,
308 struct rseq_mempool_range
*range
, uintptr_t item_offset
,
309 void *init_ptr
, size_t init_len
)
314 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
316 memcpy(init_p
, init_ptr
, init_len
);
317 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
318 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
319 item_offset
, pool
->attr
.stride
);
322 * If the update propagated through a shared mapping,
323 * or the item already has the correct content, skip
324 * writing it into the cpu item to eliminate useless
327 if (!memcmp(init_ptr
, p
, init_len
))
329 memcpy(p
, init_ptr
, init_len
);
334 void rseq_poison_item(void *p
, size_t item_len
, uintptr_t poison
)
338 for (offset
= 0; offset
< item_len
; offset
+= sizeof(uintptr_t))
339 *((uintptr_t *) (p
+ offset
)) = poison
;
343 void rseq_percpu_poison_item(struct rseq_mempool
*pool
,
344 struct rseq_mempool_range
*range
, uintptr_t item_offset
)
346 uintptr_t poison
= pool
->attr
.poison
;
350 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
352 rseq_poison_item(init_p
, pool
->item_len
, poison
);
353 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
354 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
355 item_offset
, pool
->attr
.stride
);
358 * If the update propagated through a shared mapping,
359 * or the item already has the correct content, skip
360 * writing it into the cpu item to eliminate useless
363 * It is recommended to use zero as poison value for
364 * populate-all pools to eliminate COW due to writing
365 * poison to unused CPU memory.
367 if (rseq_cmp_item(p
, pool
->item_len
, poison
, NULL
) == 0)
369 rseq_poison_item(p
, pool
->item_len
, poison
);
373 /* Always inline for __builtin_return_address(0). */
374 static inline __attribute__((always_inline
))
375 void rseq_check_poison_item(const struct rseq_mempool
*pool
, uintptr_t item_offset
,
376 void *p
, size_t item_len
, uintptr_t poison
)
378 intptr_t unexpected_value
;
380 if (rseq_cmp_item(p
, item_len
, poison
, &unexpected_value
) == 0)
383 fprintf(stderr
, "%s: Poison corruption detected (0x%lx) for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
384 __func__
, (unsigned long) unexpected_value
, get_pool_name(pool
), pool
, item_offset
, (void *) __builtin_return_address(0));
388 /* Always inline for __builtin_return_address(0). */
389 static inline __attribute__((always_inline
))
390 void rseq_percpu_check_poison_item(const struct rseq_mempool
*pool
,
391 const struct rseq_mempool_range
*range
, uintptr_t item_offset
)
393 uintptr_t poison
= pool
->attr
.poison
;
397 if (!pool
->attr
.robust_set
)
399 init_p
= __rseq_pool_range_init_ptr(range
, item_offset
);
401 rseq_check_poison_item(pool
, item_offset
, init_p
, pool
->item_len
, poison
);
402 for (i
= 0; i
< pool
->attr
.max_nr_cpus
; i
++) {
403 char *p
= __rseq_pool_range_percpu_ptr(range
, i
,
404 item_offset
, pool
->attr
.stride
);
405 rseq_check_poison_item(pool
, item_offset
, p
, pool
->item_len
, poison
);
410 int rseq_mempool_range_init_numa(void *addr
, size_t len
, int cpu
, int numa_flags
)
412 unsigned long nr_pages
, page_len
;
413 int status
[MOVE_PAGES_BATCH_SIZE
];
414 int nodes
[MOVE_PAGES_BATCH_SIZE
];
415 void *pages
[MOVE_PAGES_BATCH_SIZE
];
422 page_len
= rseq_get_page_len();
423 nr_pages
= len
>> rseq_get_count_order_ulong(page_len
);
425 nodes
[0] = numa_node_of_cpu(cpu
);
429 for (size_t k
= 1; k
< RSEQ_ARRAY_SIZE(nodes
); ++k
) {
433 for (unsigned long page
= 0; page
< nr_pages
;) {
435 size_t max_k
= RSEQ_ARRAY_SIZE(pages
);
436 size_t left
= nr_pages
- page
;
442 for (size_t k
= 0; k
< max_k
; ++k
, ++page
) {
443 pages
[k
] = addr
+ (page
* page_len
);
447 ret
= move_pages(0, max_k
, pages
, nodes
, status
, numa_flags
);
453 fprintf(stderr
, "%lu pages were not migrated\n", ret
);
454 for (size_t k
= 0; k
< max_k
; ++k
) {
457 "Error while moving page %p to numa node %d: %u\n",
458 pages
[k
], nodes
[k
], -status
[k
]);
465 int rseq_mempool_range_init_numa(void *addr
__attribute__((unused
)),
466 size_t len
__attribute__((unused
)),
467 int cpu
__attribute__((unused
)),
468 int numa_flags
__attribute__((unused
)))
476 int create_alloc_bitmap(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
)
480 count
= ((pool
->attr
.stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
483 * Not being able to create the validation bitmap is an error
484 * that needs to be reported.
486 range
->alloc_bitmap
= calloc(count
, sizeof(unsigned long));
487 if (!range
->alloc_bitmap
)
493 bool percpu_addr_in_pool(const struct rseq_mempool
*pool
, void __rseq_percpu
*_addr
)
495 struct rseq_mempool_range
*range
;
496 void *addr
= (void *) _addr
;
498 for (range
= pool
->range_list
; range
; range
= range
->next
) {
499 if (addr
>= range
->base
&& addr
< range
->base
+ range
->next_unused
)
505 /* Always inline for __builtin_return_address(0). */
506 static inline __attribute__((always_inline
))
507 void check_free_list(const struct rseq_mempool
*pool
)
509 size_t total_item
= 0, total_never_allocated
= 0, total_freed
= 0,
510 max_list_traversal
= 0, traversal_iteration
= 0;
511 struct rseq_mempool_range
*range
;
513 if (!pool
->attr
.robust_set
)
516 for (range
= pool
->range_list
; range
; range
= range
->next
) {
517 total_item
+= pool
->attr
.stride
>> pool
->item_order
;
518 total_never_allocated
+= (pool
->attr
.stride
- range
->next_unused
) >> pool
->item_order
;
520 max_list_traversal
= total_item
- total_never_allocated
;
522 for (struct free_list_node
*node
= pool
->free_list_head
, *prev
= NULL
;
527 if (traversal_iteration
>= max_list_traversal
) {
528 fprintf(stderr
, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
529 __func__
, get_pool_name(pool
), pool
, __builtin_return_address(0));
533 /* Node is out of range. */
534 if (!percpu_addr_in_pool(pool
, __rseq_free_list_to_percpu_ptr(pool
, node
))) {
536 fprintf(stderr
, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
537 __func__
, prev
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
539 fprintf(stderr
, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
540 __func__
, node
, get_pool_name(pool
), pool
, __builtin_return_address(0));
544 traversal_iteration
++;
548 if (total_never_allocated
+ total_freed
!= total_item
) {
549 fprintf(stderr
, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
550 __func__
, get_pool_name(pool
), pool
, total_item
, total_never_allocated
, total_freed
, __builtin_return_address(0));
555 /* Always inline for __builtin_return_address(0). */
556 static inline __attribute__((always_inline
))
557 void check_range_poison(const struct rseq_mempool
*pool
,
558 const struct rseq_mempool_range
*range
)
562 for (item_offset
= 0; item_offset
< range
->next_unused
;
563 item_offset
+= pool
->item_len
)
564 rseq_percpu_check_poison_item(pool
, range
, item_offset
);
567 /* Always inline for __builtin_return_address(0). */
568 static inline __attribute__((always_inline
))
569 void check_pool_poison(const struct rseq_mempool
*pool
)
571 struct rseq_mempool_range
*range
;
573 if (!pool
->attr
.robust_set
)
575 for (range
= pool
->range_list
; range
; range
= range
->next
)
576 check_range_poison(pool
, range
);
579 /* Always inline for __builtin_return_address(0). */
580 static inline __attribute__((always_inline
))
581 void destroy_alloc_bitmap(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
)
583 unsigned long *bitmap
= range
->alloc_bitmap
;
584 size_t count
, total_leaks
= 0;
589 count
= ((pool
->attr
.stride
>> pool
->item_order
) + BIT_PER_ULONG
- 1) / BIT_PER_ULONG
;
591 /* Assert that all items in the pool were freed. */
592 for (size_t k
= 0; k
< count
; ++k
)
593 total_leaks
+= rseq_hweight_ulong(bitmap
[k
]);
595 fprintf(stderr
, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
596 __func__
, get_pool_name(pool
), pool
, total_leaks
, (void *) __builtin_return_address(0));
601 range
->alloc_bitmap
= NULL
;
604 /* Always inline for __builtin_return_address(0). */
605 static inline __attribute__((always_inline
))
606 int rseq_mempool_range_destroy(struct rseq_mempool
*pool
,
607 struct rseq_mempool_range
*range
)
611 destroy_alloc_bitmap(pool
, range
);
614 * Punch a hole into memfd where the init values used to be.
617 ret
= fallocate(memfd
.fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
618 ptr_to_off_t(range
->init
), pool
->attr
.stride
);
624 /* range is a header located one page before the aligned mapping. */
625 return munmap(range
->mmap_addr
, range
->mmap_len
);
629 * Allocate a memory mapping aligned on @alignment, with an optional
630 * @pre_header before the mapping.
633 void *aligned_mmap_anonymous(size_t page_size
, size_t len
, size_t alignment
,
634 void **pre_header
, size_t pre_header_len
)
636 size_t minimum_page_count
, page_count
, extra
, total_allocate
= 0;
640 if (len
< page_size
|| alignment
< page_size
||
641 !is_pow2(alignment
) || (len
& (alignment
- 1))) {
645 page_order
= rseq_get_count_order_ulong(page_size
);
646 if (page_order
< 0) {
650 if (pre_header_len
&& (pre_header_len
& (page_size
- 1))) {
655 minimum_page_count
= (pre_header_len
+ len
) >> page_order
;
656 page_count
= (pre_header_len
+ len
+ alignment
- page_size
) >> page_order
;
658 assert(page_count
>= minimum_page_count
);
660 ptr
= mmap(NULL
, page_count
<< page_order
, PROT_READ
| PROT_WRITE
,
661 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
662 if (ptr
== MAP_FAILED
) {
667 total_allocate
= page_count
<< page_order
;
669 if (!(((uintptr_t) ptr
+ pre_header_len
) & (alignment
- 1))) {
670 /* Pointer is already aligned. ptr points to pre_header. */
674 /* Unmap extra before. */
675 extra
= offset_align((uintptr_t) ptr
+ pre_header_len
, alignment
);
676 assert(!(extra
& (page_size
- 1)));
677 if (munmap(ptr
, extra
)) {
681 total_allocate
-= extra
;
682 ptr
+= extra
; /* ptr points to pre_header */
683 page_count
-= extra
>> page_order
;
685 assert(page_count
>= minimum_page_count
);
687 if (page_count
> minimum_page_count
) {
690 /* Unmap extra after. */
691 extra_ptr
= ptr
+ (minimum_page_count
<< page_order
);
692 extra
= (page_count
- minimum_page_count
) << page_order
;
693 if (munmap(extra_ptr
, extra
)) {
697 total_allocate
-= extra
;
700 assert(!(((uintptr_t)ptr
+ pre_header_len
) & (alignment
- 1)));
701 assert(total_allocate
== len
+ pre_header_len
);
707 ptr
+= pre_header_len
;
713 int rseq_memfd_reserve_init(void *init
, size_t init_len
)
718 pthread_mutex_lock(&memfd
.lock
);
719 reserve_len
= (size_t) ptr_to_off_t(init
) + init_len
;
720 if (reserve_len
> memfd
.reserved_size
) {
721 if (ftruncate(memfd
.fd
, (off_t
) reserve_len
)) {
725 memfd
.reserved_size
= reserve_len
;
728 pthread_mutex_unlock(&memfd
.lock
);
733 struct rseq_mempool_range
*rseq_mempool_range_create(struct rseq_mempool
*pool
)
735 struct rseq_mempool_range
*range
;
736 unsigned long page_size
;
739 size_t range_len
; /* Range len excludes header. */
741 if (pool
->attr
.max_nr_ranges
&&
742 pool
->nr_ranges
>= pool
->attr
.max_nr_ranges
) {
746 page_size
= rseq_get_page_len();
748 range_len
= pool
->attr
.stride
* pool
->attr
.max_nr_cpus
;
749 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
750 range_len
+= pool
->attr
.stride
; /* init values */
751 if (pool
->attr
.robust_set
)
752 range_len
+= pool
->attr
.stride
; /* free list */
753 base
= aligned_mmap_anonymous(page_size
, range_len
,
754 pool
->attr
.stride
, &header
, page_size
);
757 range
= (struct rseq_mempool_range
*) (base
- RANGE_HEADER_OFFSET
);
759 range
->header
= header
;
761 range
->mmap_addr
= header
;
762 range
->mmap_len
= page_size
+ range_len
;
764 if (pool
->attr
.populate_policy
!= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
) {
765 range
->init
= base
+ (pool
->attr
.stride
* pool
->attr
.max_nr_cpus
);
766 /* Populate init values pages from memfd */
767 if (rseq_memfd_reserve_init(range
->init
, pool
->attr
.stride
))
769 if (mmap(range
->init
, pool
->attr
.stride
, PROT_READ
| PROT_WRITE
,
770 MAP_SHARED
| MAP_FIXED
, memfd
.fd
,
771 ptr_to_off_t(range
->init
)) != (void *) range
->init
) {
774 assert(pool
->attr
.type
== MEMPOOL_TYPE_PERCPU
);
776 * Map per-cpu memory as private COW mappings of init values.
781 for (cpu
= 0; cpu
< pool
->attr
.max_nr_cpus
; cpu
++) {
782 void *p
= base
+ (pool
->attr
.stride
* cpu
);
783 size_t len
= pool
->attr
.stride
;
785 if (mmap(p
, len
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
786 memfd
.fd
, ptr_to_off_t(range
->init
)) != (void *) p
) {
793 if (pool
->attr
.robust_set
) {
794 if (create_alloc_bitmap(pool
, range
))
797 if (pool
->attr
.init_set
) {
798 switch (pool
->attr
.type
) {
799 case MEMPOOL_TYPE_GLOBAL
:
800 if (pool
->attr
.init_func(pool
->attr
.init_priv
,
801 base
, pool
->attr
.stride
, -1)) {
805 case MEMPOOL_TYPE_PERCPU
:
808 for (cpu
= 0; cpu
< pool
->attr
.max_nr_cpus
; cpu
++) {
809 if (pool
->attr
.init_func(pool
->attr
.init_priv
,
810 base
+ (pool
->attr
.stride
* cpu
),
811 pool
->attr
.stride
, cpu
)) {
825 (void) rseq_mempool_range_destroy(pool
, range
);
830 int rseq_mempool_memfd_ref(struct rseq_mempool
*pool
)
834 if (pool
->attr
.populate_policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
837 pthread_mutex_lock(&memfd
.lock
);
838 if (memfd
.refcount
== 0) {
839 memfd
.fd
= memfd_create("mempool", MFD_CLOEXEC
);
841 perror("memfd_create");
848 pthread_mutex_unlock(&memfd
.lock
);
853 void rseq_mempool_memfd_unref(struct rseq_mempool
*pool
)
855 if (pool
->attr
.populate_policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
858 pthread_mutex_lock(&memfd
.lock
);
859 if (memfd
.refcount
== 1) {
860 if (close(memfd
.fd
)) {
865 memfd
.reserved_size
= 0;
868 pthread_mutex_unlock(&memfd
.lock
);
871 int rseq_mempool_destroy(struct rseq_mempool
*pool
)
873 struct rseq_mempool_range
*range
, *next_range
;
878 check_free_list(pool
);
879 check_pool_poison(pool
);
880 /* Iteration safe against removal. */
881 for (range
= pool
->range_list
; range
&& (next_range
= range
->next
, 1); range
= next_range
) {
882 if (rseq_mempool_range_destroy(pool
, range
))
884 /* Update list head to keep list coherent in case of partial failure. */
885 pool
->range_list
= next_range
;
887 rseq_mempool_memfd_unref(pool
);
888 pthread_mutex_destroy(&pool
->lock
);
895 struct rseq_mempool
*rseq_mempool_create(const char *pool_name
,
896 size_t item_len
, const struct rseq_mempool_attr
*_attr
)
898 struct rseq_mempool
*pool
;
899 struct rseq_mempool_attr attr
= {};
902 /* Make sure each item is large enough to contain free list pointers. */
903 if (item_len
< sizeof(void *))
904 item_len
= sizeof(void *);
906 /* Align item_len on next power of two. */
907 order
= rseq_get_count_order_ulong(item_len
);
912 item_len
= 1UL << order
;
915 memcpy(&attr
, _attr
, sizeof(attr
));
918 case MEMPOOL_TYPE_PERCPU
:
919 if (attr
.max_nr_cpus
< 0) {
923 if (attr
.max_nr_cpus
== 0) {
925 attr
.max_nr_cpus
= rseq_get_max_nr_cpus();
926 if (attr
.max_nr_cpus
== 0) {
932 case MEMPOOL_TYPE_GLOBAL
:
933 /* Override populate policy for global type. */
934 if (attr
.populate_policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
)
935 attr
.populate_policy
= RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
;
936 /* Use a 1-cpu pool for global mempool type. */
937 attr
.max_nr_cpus
= 1;
941 attr
.stride
= RSEQ_MEMPOOL_STRIDE
; /* Use default */
942 if (attr
.robust_set
&& !attr
.poison_set
) {
943 attr
.poison_set
= true;
944 attr
.poison
= DEFAULT_PRIVATE_POISON_VALUE
;
946 if (item_len
> attr
.stride
|| attr
.stride
< (size_t) rseq_get_page_len() ||
947 !is_pow2(attr
.stride
)) {
952 pool
= calloc(1, sizeof(struct rseq_mempool
));
956 memcpy(&pool
->attr
, &attr
, sizeof(attr
));
957 pthread_mutex_init(&pool
->lock
, NULL
);
958 pool
->item_len
= item_len
;
959 pool
->item_order
= order
;
961 if (rseq_mempool_memfd_ref(pool
))
964 pool
->range_list
= rseq_mempool_range_create(pool
);
965 if (!pool
->range_list
)
969 pool
->name
= strdup(pool_name
);
976 rseq_mempool_destroy(pool
);
981 /* Always inline for __builtin_return_address(0). */
982 static inline __attribute__((always_inline
))
983 void set_alloc_slot(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
, size_t item_offset
)
985 unsigned long *bitmap
= range
->alloc_bitmap
;
986 size_t item_index
= item_offset
>> pool
->item_order
;
993 k
= item_index
/ BIT_PER_ULONG
;
994 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
996 /* Print error if bit is already set. */
997 if (bitmap
[k
] & mask
) {
998 fprintf(stderr
, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
999 __func__
, get_pool_name(pool
), pool
, item_offset
, (void *) __builtin_return_address(0));
1006 void __rseq_percpu
*__rseq_percpu_malloc(struct rseq_mempool
*pool
,
1007 bool zeroed
, void *init_ptr
, size_t init_len
)
1009 struct rseq_mempool_range
*range
;
1010 struct free_list_node
*node
;
1011 uintptr_t item_offset
;
1012 void __rseq_percpu
*addr
;
1014 if (init_len
> pool
->item_len
) {
1018 pthread_mutex_lock(&pool
->lock
);
1019 /* Get first entry from free list. */
1020 node
= pool
->free_list_head
;
1022 void *range_base
, *ptr
;
1024 ptr
= __rseq_free_list_to_percpu_ptr(pool
, node
);
1025 range_base
= (void *) ((uintptr_t) ptr
& (~(pool
->attr
.stride
- 1)));
1026 range
= (struct rseq_mempool_range
*) (range_base
- RANGE_HEADER_OFFSET
);
1027 /* Remove node from free list (update head). */
1028 pool
->free_list_head
= node
->next
;
1029 item_offset
= (uintptr_t) (ptr
- range_base
);
1030 rseq_percpu_check_poison_item(pool
, range
, item_offset
);
1031 addr
= __rseq_free_list_to_percpu_ptr(pool
, node
);
1035 * If the most recent range (first in list) does not have any
1036 * room left, create a new range and prepend it to the list
1039 range
= pool
->range_list
;
1040 if (range
->next_unused
+ pool
->item_len
> pool
->attr
.stride
) {
1041 range
= rseq_mempool_range_create(pool
);
1047 /* Add range to head of list. */
1048 range
->next
= pool
->range_list
;
1049 pool
->range_list
= range
;
1051 /* First range in list has room left. */
1052 item_offset
= range
->next_unused
;
1053 addr
= (void __rseq_percpu
*) (range
->base
+ item_offset
);
1054 range
->next_unused
+= pool
->item_len
;
1057 set_alloc_slot(pool
, range
, item_offset
);
1058 pthread_mutex_unlock(&pool
->lock
);
1061 rseq_percpu_zero_item(pool
, range
, item_offset
);
1062 else if (init_ptr
) {
1063 rseq_percpu_init_item(pool
, range
, item_offset
,
1064 init_ptr
, init_len
);
1070 void __rseq_percpu
*rseq_mempool_percpu_malloc(struct rseq_mempool
*pool
)
1072 return __rseq_percpu_malloc(pool
, false, NULL
, 0);
1075 void __rseq_percpu
*rseq_mempool_percpu_zmalloc(struct rseq_mempool
*pool
)
1077 return __rseq_percpu_malloc(pool
, true, NULL
, 0);
1080 void __rseq_percpu
*rseq_mempool_percpu_malloc_init(struct rseq_mempool
*pool
,
1081 void *init_ptr
, size_t len
)
1083 return __rseq_percpu_malloc(pool
, false, init_ptr
, len
);
1086 /* Always inline for __builtin_return_address(0). */
1087 static inline __attribute__((always_inline
))
1088 void clear_alloc_slot(struct rseq_mempool
*pool
, struct rseq_mempool_range
*range
, size_t item_offset
)
1090 unsigned long *bitmap
= range
->alloc_bitmap
;
1091 size_t item_index
= item_offset
>> pool
->item_order
;
1098 k
= item_index
/ BIT_PER_ULONG
;
1099 mask
= 1ULL << (item_index
% BIT_PER_ULONG
);
1101 /* Print error if bit is not set. */
1102 if (!(bitmap
[k
] & mask
)) {
1103 fprintf(stderr
, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
1104 __func__
, get_pool_name(pool
), pool
, item_offset
,
1105 (void *) __builtin_return_address(0));
1111 void librseq_mempool_percpu_free(void __rseq_percpu
*_ptr
, size_t stride
)
1113 uintptr_t ptr
= (uintptr_t) _ptr
;
1114 void *range_base
= (void *) (ptr
& (~(stride
- 1)));
1115 struct rseq_mempool_range
*range
= (struct rseq_mempool_range
*) (range_base
- RANGE_HEADER_OFFSET
);
1116 struct rseq_mempool
*pool
= range
->pool
;
1117 uintptr_t item_offset
= ptr
& (stride
- 1);
1118 struct free_list_node
*head
, *item
;
1120 pthread_mutex_lock(&pool
->lock
);
1121 clear_alloc_slot(pool
, range
, item_offset
);
1122 /* Add ptr to head of free list */
1123 head
= pool
->free_list_head
;
1124 if (pool
->attr
.poison_set
)
1125 rseq_percpu_poison_item(pool
, range
, item_offset
);
1126 item
= __rseq_percpu_to_free_list_ptr(pool
, _ptr
);
1128 * Setting the next pointer will overwrite the first uintptr_t
1129 * poison for either CPU 0 (populate all) or init data (populate
1133 pool
->free_list_head
= item
;
1134 pthread_mutex_unlock(&pool
->lock
);
1137 struct rseq_mempool_set
*rseq_mempool_set_create(void)
1139 struct rseq_mempool_set
*pool_set
;
1141 pool_set
= calloc(1, sizeof(struct rseq_mempool_set
));
1144 pthread_mutex_init(&pool_set
->lock
, NULL
);
1148 int rseq_mempool_set_destroy(struct rseq_mempool_set
*pool_set
)
1152 for (order
= POOL_SET_MIN_ENTRY
; order
< POOL_SET_NR_ENTRIES
; order
++) {
1153 struct rseq_mempool
*pool
= pool_set
->entries
[order
];
1157 ret
= rseq_mempool_destroy(pool
);
1160 pool_set
->entries
[order
] = NULL
;
1162 pthread_mutex_destroy(&pool_set
->lock
);
1167 /* Ownership of pool is handed over to pool set on success. */
1168 int rseq_mempool_set_add_pool(struct rseq_mempool_set
*pool_set
, struct rseq_mempool
*pool
)
1170 size_t item_order
= pool
->item_order
;
1173 pthread_mutex_lock(&pool_set
->lock
);
1174 if (pool_set
->entries
[item_order
]) {
1179 pool_set
->entries
[pool
->item_order
] = pool
;
1181 pthread_mutex_unlock(&pool_set
->lock
);
1186 void __rseq_percpu
*__rseq_mempool_set_malloc(struct rseq_mempool_set
*pool_set
,
1187 void *init_ptr
, size_t len
, bool zeroed
)
1189 int order
, min_order
= POOL_SET_MIN_ENTRY
;
1190 struct rseq_mempool
*pool
;
1191 void __rseq_percpu
*addr
;
1193 order
= rseq_get_count_order_ulong(len
);
1194 if (order
> POOL_SET_MIN_ENTRY
)
1197 pthread_mutex_lock(&pool_set
->lock
);
1198 /* First smallest present pool where @len fits. */
1199 for (order
= min_order
; order
< POOL_SET_NR_ENTRIES
; order
++) {
1200 pool
= pool_set
->entries
[order
];
1204 if (pool
->item_len
>= len
)
1209 pthread_mutex_unlock(&pool_set
->lock
);
1211 addr
= __rseq_percpu_malloc(pool
, zeroed
, init_ptr
, len
);
1212 if (addr
== NULL
&& errno
== ENOMEM
) {
1214 * If the allocation failed, try again with a
1217 min_order
= order
+ 1;
1228 void __rseq_percpu
*rseq_mempool_set_percpu_malloc(struct rseq_mempool_set
*pool_set
, size_t len
)
1230 return __rseq_mempool_set_malloc(pool_set
, NULL
, len
, false);
1233 void __rseq_percpu
*rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set
*pool_set
, size_t len
)
1235 return __rseq_mempool_set_malloc(pool_set
, NULL
, len
, true);
1238 void __rseq_percpu
*rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set
*pool_set
,
1239 void *init_ptr
, size_t len
)
1241 return __rseq_mempool_set_malloc(pool_set
, init_ptr
, len
, true);
1244 struct rseq_mempool_attr
*rseq_mempool_attr_create(void)
1246 return calloc(1, sizeof(struct rseq_mempool_attr
));
1249 void rseq_mempool_attr_destroy(struct rseq_mempool_attr
*attr
)
1254 int rseq_mempool_attr_set_init(struct rseq_mempool_attr
*attr
,
1255 int (*init_func
)(void *priv
, void *addr
, size_t len
, int cpu
),
1262 attr
->init_set
= true;
1263 attr
->init_func
= init_func
;
1264 attr
->init_priv
= init_priv
;
1268 int rseq_mempool_attr_set_robust(struct rseq_mempool_attr
*attr
)
1274 attr
->robust_set
= true;
1278 int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr
*attr
,
1279 size_t stride
, int max_nr_cpus
)
1285 attr
->type
= MEMPOOL_TYPE_PERCPU
;
1286 attr
->stride
= stride
;
1287 attr
->max_nr_cpus
= max_nr_cpus
;
1291 int rseq_mempool_attr_set_global(struct rseq_mempool_attr
*attr
,
1298 attr
->type
= MEMPOOL_TYPE_GLOBAL
;
1299 attr
->stride
= stride
;
1300 attr
->max_nr_cpus
= 0;
1304 int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr
*attr
,
1305 unsigned long max_nr_ranges
)
1311 attr
->max_nr_ranges
= max_nr_ranges
;
1315 int rseq_mempool_attr_set_poison(struct rseq_mempool_attr
*attr
,
1322 attr
->poison_set
= true;
1323 attr
->poison
= poison
;
1327 int rseq_mempool_attr_set_populate_policy(struct rseq_mempool_attr
*attr
,
1328 enum rseq_mempool_populate_policy policy
)
1334 attr
->populate_policy
= policy
;
1338 int rseq_mempool_get_max_nr_cpus(struct rseq_mempool
*mempool
)
1340 if (!mempool
|| mempool
->attr
.type
!= MEMPOOL_TYPE_PERCPU
) {
1344 return mempool
->attr
.max_nr_cpus
;