#endif
#include "rseq-utils.h"
+#include <rseq/rseq.h>
/*
* rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
* memory allocator provides CPU-Local Storage.
*/
-/*
- * Use high bits of per-CPU addresses to index the pool.
- * This leaves the low bits of available to the application for pointer
- * tagging (based on next power of 2 alignment of the allocations).
- */
-#if RSEQ_BITS_PER_LONG == 64
-# define POOL_INDEX_BITS 16
-#else
-# define POOL_INDEX_BITS 8
-#endif
-#define MAX_NR_POOLS (1UL << POOL_INDEX_BITS)
-#define POOL_INDEX_SHIFT (RSEQ_BITS_PER_LONG - POOL_INDEX_BITS)
-#define MAX_POOL_LEN (1UL << POOL_INDEX_SHIFT)
-#define MAX_POOL_LEN_MASK (MAX_POOL_LEN - 1)
-
-#define POOL_SET_NR_ENTRIES POOL_INDEX_SHIFT
+#define POOL_SET_NR_ENTRIES RSEQ_BITS_PER_LONG
/*
* Smallest allocation should hold enough space for a free list pointer.
#define MOVE_PAGES_BATCH_SIZE 4096
-#define RANGE_HEADER_OFFSET sizeof(struct rseq_percpu_pool_range)
+#define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
struct free_list_node;
struct free_list_node *next;
};
-/* This lock protects pool create/destroy. */
-static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
+enum mempool_type {
+ MEMPOOL_TYPE_GLOBAL = 0, /* Default */
+ MEMPOOL_TYPE_PERCPU = 1,
+};
-struct rseq_pool_attr {
+struct rseq_mempool_attr {
bool mmap_set;
void *(*mmap_func)(void *priv, size_t len);
int (*munmap_func)(void *priv, void *ptr, size_t len);
void *mmap_priv;
+ bool init_set;
+ int (*init_func)(void *priv, void *addr, size_t len, int cpu);
+ void *init_priv;
+
bool robust_set;
+
+ enum mempool_type type;
+ size_t stride;
+ int max_nr_cpus;
};
-struct rseq_percpu_pool_range;
+struct rseq_mempool_range;
-struct rseq_percpu_pool_range {
- struct rseq_percpu_pool_range *next;
- struct rseq_percpu_pool *pool; /* Backward ref. to container pool. */
+struct rseq_mempool_range {
+ struct rseq_mempool_range *next;
+ struct rseq_mempool *pool; /* Backward ref. to container pool. */
void *header;
void *base;
size_t next_unused;
unsigned long *alloc_bitmap;
};
-struct rseq_percpu_pool {
+struct rseq_mempool {
/* Linked-list of ranges. */
- struct rseq_percpu_pool_range *ranges;
+ struct rseq_mempool_range *ranges;
- unsigned int index;
size_t item_len;
- size_t percpu_stride;
int item_order;
- int max_nr_cpus;
/*
* The free list chains freed items on the CPU 0 address range.
/* This lock protects allocation/free within the pool. */
pthread_mutex_t lock;
- struct rseq_pool_attr attr;
+ struct rseq_mempool_attr attr;
char *name;
};
-//TODO: the array of pools should grow dynamically on create.
-static struct rseq_percpu_pool rseq_percpu_pool[MAX_NR_POOLS];
-
/*
* Pool set entries are indexed by item_len rounded to the next power of
* 2. A pool set can contain NULL pool entries, in which case the next
* large enough entry will be used for allocation.
*/
-struct rseq_percpu_pool_set {
+struct rseq_mempool_set {
/* This lock protects add vs malloc/zmalloc within the pool set. */
pthread_mutex_t lock;
- struct rseq_percpu_pool *entries[POOL_SET_NR_ENTRIES];
+ struct rseq_mempool *entries[POOL_SET_NR_ENTRIES];
};
static
-void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool *pool, int cpu,
+void *__rseq_pool_range_percpu_ptr(struct rseq_mempool_range *range, int cpu,
uintptr_t item_offset, size_t stride)
{
- /* TODO: Implement multi-ranges support. */
- return pool->ranges->base + (stride * cpu) + item_offset;
+ return range->base + (stride * cpu) + item_offset;
}
static
-void rseq_percpu_zero_item(struct rseq_percpu_pool *pool, uintptr_t item_offset)
+void rseq_percpu_zero_item(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range, uintptr_t item_offset)
{
int i;
- for (i = 0; i < pool->max_nr_cpus; i++) {
- char *p = __rseq_pool_percpu_ptr(pool, i,
- item_offset, pool->percpu_stride);
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
memset(p, 0, pool->item_len);
}
}
-//TODO: this will need to be reimplemented for ranges,
-//which cannot use __rseq_pool_percpu_ptr.
-#if 0 //#ifdef HAVE_LIBNUMA
-static
-int rseq_percpu_pool_range_init_numa(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range, int numa_flags)
+#ifdef HAVE_LIBNUMA
+int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags)
{
unsigned long nr_pages, page_len;
+ int status[MOVE_PAGES_BATCH_SIZE];
+ int nodes[MOVE_PAGES_BATCH_SIZE];
+ void *pages[MOVE_PAGES_BATCH_SIZE];
long ret;
- int cpu;
- if (!numa_flags)
- return 0;
+ if (!numa_flags) {
+ errno = EINVAL;
+ return -1;
+ }
page_len = rseq_get_page_len();
- nr_pages = pool->percpu_stride >> rseq_get_count_order_ulong(page_len);
- for (cpu = 0; cpu < pool->max_nr_cpus; cpu++) {
+ nr_pages = len >> rseq_get_count_order_ulong(page_len);
- int status[MOVE_PAGES_BATCH_SIZE];
- int nodes[MOVE_PAGES_BATCH_SIZE];
- void *pages[MOVE_PAGES_BATCH_SIZE];
+ nodes[0] = numa_node_of_cpu(cpu);
+ if (nodes[0] < 0)
+ return -1;
- nodes[0] = numa_node_of_cpu(cpu);
- for (size_t k = 1; k < RSEQ_ARRAY_SIZE(nodes); ++k) {
- nodes[k] = nodes[0];
- }
+ for (size_t k = 1; k < RSEQ_ARRAY_SIZE(nodes); ++k) {
+ nodes[k] = nodes[0];
+ }
- for (unsigned long page = 0; page < nr_pages;) {
+ for (unsigned long page = 0; page < nr_pages;) {
- size_t max_k = RSEQ_ARRAY_SIZE(pages);
- size_t left = nr_pages - page;
+ size_t max_k = RSEQ_ARRAY_SIZE(pages);
+ size_t left = nr_pages - page;
- if (left < max_k) {
- max_k = left;
- }
+ if (left < max_k) {
+ max_k = left;
+ }
- for (size_t k = 0; k < max_k; ++k, ++page) {
- pages[k] = __rseq_pool_percpu_ptr(pool, cpu, page * page_len);
- status[k] = -EPERM;
- }
+ for (size_t k = 0; k < max_k; ++k, ++page) {
+ pages[k] = addr + (page * page_len);
+ status[k] = -EPERM;
+ }
- ret = move_pages(0, max_k, pages, nodes, status, numa_flags);
+ ret = move_pages(0, max_k, pages, nodes, status, numa_flags);
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
- if (ret > 0) {
- fprintf(stderr, "%lu pages were not migrated\n", ret);
- for (size_t k = 0; k < max_k; ++k) {
- if (status[k] < 0)
- fprintf(stderr,
- "Error while moving page %p to numa node %d: %u\n",
- pages[k], nodes[k], -status[k]);
- }
+ if (ret > 0) {
+ fprintf(stderr, "%lu pages were not migrated\n", ret);
+ for (size_t k = 0; k < max_k; ++k) {
+ if (status[k] < 0)
+ fprintf(stderr,
+ "Error while moving page %p to numa node %d: %u\n",
+ pages[k], nodes[k], -status[k]);
}
}
}
return 0;
}
-
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags)
-{
- struct rseq_percpu_pool_range *range;
- int ret;
-
- if (!numa_flags)
- return 0;
- for (range = pool->ranges; range; range = range->next) {
- ret = rseq_percpu_pool_range_init_numa(pool, range, numa_flags);
- if (ret)
- return ret;
- }
- return 0;
-}
#else
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool __attribute__((unused)),
+int rseq_mempool_range_init_numa(void *addr __attribute__((unused)),
+ size_t len __attribute__((unused)),
+ int cpu __attribute__((unused)),
int numa_flags __attribute__((unused)))
{
- return 0;
+ errno = ENOSYS;
+ return -1;
}
#endif
}
static
-int create_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
+int create_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
size_t count;
- count = ((pool->percpu_stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
+ count = ((pool->attr.stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
/*
* Not being able to create the validation bitmap is an error
}
static
-const char *get_pool_name(const struct rseq_percpu_pool *pool)
+const char *get_pool_name(const struct rseq_mempool *pool)
{
return pool->name ? : "<anonymous>";
}
static
-bool addr_in_pool(const struct rseq_percpu_pool *pool, void *addr)
+bool addr_in_pool(const struct rseq_mempool *pool, void *addr)
{
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
for (range = pool->ranges; range; range = range->next) {
if (addr >= range->base && addr < range->base + range->next_unused)
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void check_free_list(const struct rseq_percpu_pool *pool)
+void check_free_list(const struct rseq_mempool *pool)
{
size_t total_item = 0, total_never_allocated = 0, total_freed = 0,
max_list_traversal = 0, traversal_iteration = 0;
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
if (!pool->attr.robust_set)
return;
for (range = pool->ranges; range; range = range->next) {
- total_item += pool->percpu_stride >> pool->item_order;
- total_never_allocated += (pool->percpu_stride - range->next_unused) >> pool->item_order;
+ total_item += pool->attr.stride >> pool->item_order;
+ total_never_allocated += (pool->attr.stride - range->next_unused) >> pool->item_order;
}
max_list_traversal = total_item - total_never_allocated;
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void destroy_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
+void destroy_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
unsigned long *bitmap = range->alloc_bitmap;
size_t count, total_leaks = 0;
if (!bitmap)
return;
- count = ((pool->percpu_stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
+ count = ((pool->attr.stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
/* Assert that all items in the pool were freed. */
for (size_t k = 0; k < count; ++k)
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-int rseq_percpu_pool_range_destroy(struct rseq_percpu_pool *pool,
- struct rseq_percpu_pool_range *range)
+int rseq_mempool_range_destroy(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range)
{
destroy_alloc_bitmap(pool, range);
/* range is a header located one page before the aligned mapping. */
return pool->attr.munmap_func(pool->attr.mmap_priv, range->header,
- (pool->percpu_stride * pool->max_nr_cpus) + rseq_get_page_len());
+ (pool->attr.stride * pool->attr.max_nr_cpus) + rseq_get_page_len());
}
/*
* @pre_header before the mapping.
*/
static
-void *aligned_mmap_anonymous(struct rseq_percpu_pool *pool,
+void *aligned_mmap_anonymous(struct rseq_mempool *pool,
size_t page_size, size_t len, size_t alignment,
void **pre_header, size_t pre_header_len)
{
void *ptr;
if (len < page_size || alignment < page_size ||
- !is_pow2(len) || !is_pow2(alignment)) {
+ !is_pow2(alignment) || (len & (alignment - 1))) {
errno = EINVAL;
return NULL;
}
}
static
-struct rseq_percpu_pool_range *rseq_percpu_pool_range_create(struct rseq_percpu_pool *pool)
+struct rseq_mempool_range *rseq_mempool_range_create(struct rseq_mempool *pool)
{
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
unsigned long page_size;
void *header;
void *base;
page_size = rseq_get_page_len();
base = aligned_mmap_anonymous(pool, page_size,
- pool->percpu_stride * pool->max_nr_cpus,
- pool->percpu_stride,
+ pool->attr.stride * pool->attr.max_nr_cpus,
+ pool->attr.stride,
&header, page_size);
if (!base)
return NULL;
- range = (struct rseq_percpu_pool_range *) (base - RANGE_HEADER_OFFSET);
+ range = (struct rseq_mempool_range *) (base - RANGE_HEADER_OFFSET);
range->pool = pool;
range->base = base;
range->header = header;
if (create_alloc_bitmap(pool, range))
goto error_alloc;
}
+ if (pool->attr.init_set) {
+ int cpu;
+
+ for (cpu = 0; cpu < pool->attr.max_nr_cpus; cpu++) {
+ if (pool->attr.init_func(pool->attr.init_priv,
+ base + (pool->attr.stride * cpu),
+ pool->attr.stride, cpu)) {
+ goto error_alloc;
+ }
+ }
+ }
return range;
error_alloc:
- (void) rseq_percpu_pool_range_destroy(pool, range);
+ (void) rseq_mempool_range_destroy(pool, range);
return NULL;
}
-/* Always inline for __builtin_return_address(0). */
-static inline __attribute__((always_inline))
-int __rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
+int rseq_mempool_destroy(struct rseq_mempool *pool)
{
- struct rseq_percpu_pool_range *range, *next_range;
+ struct rseq_mempool_range *range, *next_range;
int ret = 0;
- if (!pool->ranges) {
- errno = ENOENT;
- ret = -1;
- goto end;
- }
+ if (!pool)
+ return 0;
check_free_list(pool);
/* Iteration safe against removal. */
for (range = pool->ranges; range && (next_range = range->next, 1); range = next_range) {
- if (rseq_percpu_pool_range_destroy(pool, range))
+ if (rseq_mempool_range_destroy(pool, range))
goto end;
/* Update list head to keep list coherent in case of partial failure. */
pool->ranges = next_range;
return ret;
}
-int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
+struct rseq_mempool *rseq_mempool_create(const char *pool_name,
+ size_t item_len, const struct rseq_mempool_attr *_attr)
{
- int ret;
-
- pthread_mutex_lock(&pool_lock);
- ret = __rseq_percpu_pool_destroy(pool);
- pthread_mutex_unlock(&pool_lock);
- return ret;
-}
-
-struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
- size_t item_len, size_t percpu_stride, int max_nr_cpus,
- const struct rseq_pool_attr *_attr)
-{
- struct rseq_percpu_pool *pool;
- struct rseq_pool_attr attr = {};
- unsigned int i;
+ struct rseq_mempool *pool;
+ struct rseq_mempool_attr attr = {};
int order;
/* Make sure each item is large enough to contain free list pointers. */
}
item_len = 1UL << order;
- if (!percpu_stride)
- percpu_stride = RSEQ_PERCPU_STRIDE; /* Use default */
-
- if (max_nr_cpus < 0 || item_len > percpu_stride ||
- percpu_stride > (UINTPTR_MAX >> POOL_INDEX_BITS) ||
- percpu_stride < (size_t) rseq_get_page_len() ||
- !is_pow2(percpu_stride)) {
- errno = EINVAL;
- return NULL;
- }
-
if (_attr)
memcpy(&attr, _attr, sizeof(attr));
if (!attr.mmap_set) {
attr.mmap_priv = NULL;
}
- pthread_mutex_lock(&pool_lock);
- /* Linear scan in array of pools to find empty spot. */
- for (i = FIRST_POOL; i < MAX_NR_POOLS; i++) {
- pool = &rseq_percpu_pool[i];
- if (!pool->ranges)
- goto found_empty;
+ switch (attr.type) {
+ case MEMPOOL_TYPE_PERCPU:
+ if (attr.max_nr_cpus < 0) {
+ errno = EINVAL;
+ return NULL;
+ }
+ if (attr.max_nr_cpus == 0) {
+ /* Auto-detect */
+ attr.max_nr_cpus = rseq_get_max_nr_cpus();
+ if (attr.max_nr_cpus == 0) {
+ errno = EINVAL;
+ return NULL;
+ }
+ }
+ break;
+ case MEMPOOL_TYPE_GLOBAL:
+ /* Use a 1-cpu pool for global mempool type. */
+ attr.max_nr_cpus = 1;
+ break;
+ }
+ if (!attr.stride)
+ attr.stride = RSEQ_MEMPOOL_STRIDE; /* Use default */
+ if (item_len > attr.stride || attr.stride < (size_t) rseq_get_page_len() ||
+ !is_pow2(attr.stride)) {
+ errno = EINVAL;
+ return NULL;
}
- errno = ENOMEM;
- pool = NULL;
- goto end;
-found_empty:
+ pool = calloc(1, sizeof(struct rseq_mempool));
+ if (!pool)
+ return NULL;
+
memcpy(&pool->attr, &attr, sizeof(attr));
pthread_mutex_init(&pool->lock, NULL);
- pool->percpu_stride = percpu_stride;
- pool->max_nr_cpus = max_nr_cpus;
- pool->index = i;
pool->item_len = item_len;
pool->item_order = order;
//TODO: implement multi-range support.
- pool->ranges = rseq_percpu_pool_range_create(pool);
+ pool->ranges = rseq_mempool_range_create(pool);
if (!pool->ranges)
goto error_alloc;
if (!pool->name)
goto error_alloc;
}
-end:
- pthread_mutex_unlock(&pool_lock);
return pool;
error_alloc:
- __rseq_percpu_pool_destroy(pool);
- pthread_mutex_unlock(&pool_lock);
+ rseq_mempool_destroy(pool);
errno = ENOMEM;
return NULL;
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void set_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void set_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
}
static
-void __rseq_percpu *__rseq_percpu_malloc(struct rseq_percpu_pool *pool, bool zeroed)
+void __rseq_percpu *__rseq_percpu_malloc(struct rseq_mempool *pool, bool zeroed)
{
struct free_list_node *node;
uintptr_t item_offset;
addr = (void __rseq_percpu *) (pool->ranges->base + item_offset);
goto end;
}
- if (pool->ranges->next_unused + pool->item_len > pool->percpu_stride) {
+ if (pool->ranges->next_unused + pool->item_len > pool->attr.stride) {
errno = ENOMEM;
addr = NULL;
goto end;
set_alloc_slot(pool, item_offset);
pthread_mutex_unlock(&pool->lock);
if (zeroed && addr)
- rseq_percpu_zero_item(pool, item_offset);
+ rseq_percpu_zero_item(pool, pool->ranges, item_offset);
return addr;
}
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, false);
}
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, true);
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void clear_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void clear_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
bitmap[k] &= ~mask;
}
-void __rseq_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride)
+void librseq_mempool_percpu_free(void __rseq_percpu *_ptr, size_t stride)
{
uintptr_t ptr = (uintptr_t) _ptr;
- void *range_base = (void *) (ptr & (~(percpu_stride - 1)));
- struct rseq_percpu_pool_range *range = (struct rseq_percpu_pool_range *) (range_base - RANGE_HEADER_OFFSET);
- struct rseq_percpu_pool *pool = range->pool;
- uintptr_t item_offset = ptr & (percpu_stride - 1);
+ void *range_base = (void *) (ptr & (~(stride - 1)));
+ struct rseq_mempool_range *range = (struct rseq_mempool_range *) (range_base - RANGE_HEADER_OFFSET);
+ struct rseq_mempool *pool = range->pool;
+ uintptr_t item_offset = ptr & (stride - 1);
struct free_list_node *head, *item;
pthread_mutex_lock(&pool->lock);
pthread_mutex_unlock(&pool->lock);
}
-struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void)
+struct rseq_mempool_set *rseq_mempool_set_create(void)
{
- struct rseq_percpu_pool_set *pool_set;
+ struct rseq_mempool_set *pool_set;
- pool_set = calloc(1, sizeof(struct rseq_percpu_pool_set));
+ pool_set = calloc(1, sizeof(struct rseq_mempool_set));
if (!pool_set)
return NULL;
pthread_mutex_init(&pool_set->lock, NULL);
return pool_set;
}
-int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set)
+int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set)
{
int order, ret;
for (order = POOL_SET_MIN_ENTRY; order < POOL_SET_NR_ENTRIES; order++) {
- struct rseq_percpu_pool *pool = pool_set->entries[order];
+ struct rseq_mempool *pool = pool_set->entries[order];
if (!pool)
continue;
- ret = rseq_percpu_pool_destroy(pool);
+ ret = rseq_mempool_destroy(pool);
if (ret)
return ret;
pool_set->entries[order] = NULL;
}
/* Ownership of pool is handed over to pool set on success. */
-int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set, struct rseq_percpu_pool *pool)
+int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set, struct rseq_mempool *pool)
{
size_t item_order = pool->item_order;
int ret = 0;
}
static
-void __rseq_percpu *__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len, bool zeroed)
+void __rseq_percpu *__rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len, bool zeroed)
{
int order, min_order = POOL_SET_MIN_ENTRY;
- struct rseq_percpu_pool *pool;
+ struct rseq_mempool *pool;
void __rseq_percpu *addr;
order = rseq_get_count_order_ulong(len);
return addr;
}
-void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, false);
+ return __rseq_mempool_set_malloc(pool_set, len, false);
}
-void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, true);
+ return __rseq_mempool_set_malloc(pool_set, len, true);
}
-struct rseq_pool_attr *rseq_pool_attr_create(void)
+struct rseq_mempool_attr *rseq_mempool_attr_create(void)
{
- return calloc(1, sizeof(struct rseq_pool_attr));
+ return calloc(1, sizeof(struct rseq_mempool_attr));
}
-void rseq_pool_attr_destroy(struct rseq_pool_attr *attr)
+void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr)
{
free(attr);
}
-int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
+int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
void *(*mmap_func)(void *priv, size_t len),
int (*munmap_func)(void *priv, void *ptr, size_t len),
void *mmap_priv)
return 0;
}
-int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr)
+int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
+ int (*init_func)(void *priv, void *addr, size_t len, int cpu),
+ void *init_priv)
+{
+ if (!attr) {
+ errno = EINVAL;
+ return -1;
+ }
+ attr->init_set = true;
+ attr->init_func = init_func;
+ attr->init_priv = init_priv;
+ return 0;
+}
+
+int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr)
{
if (!attr) {
errno = EINVAL;
attr->robust_set = true;
return 0;
}
+
+int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr,
+ size_t stride, int max_nr_cpus)
+{
+ if (!attr) {
+ errno = EINVAL;
+ return -1;
+ }
+ attr->type = MEMPOOL_TYPE_PERCPU;
+ attr->stride = stride;
+ attr->max_nr_cpus = max_nr_cpus;
+ return 0;
+}
+
+int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr,
+ size_t stride)
+{
+ if (!attr) {
+ errno = EINVAL;
+ return -1;
+ }
+ attr->type = MEMPOOL_TYPE_GLOBAL;
+ attr->stride = stride;
+ attr->max_nr_cpus = 0;
+ return 0;
+}
+
+int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool)
+{
+ if (!mempool || mempool->attr.type != MEMPOOL_TYPE_PERCPU) {
+ errno = EINVAL;
+ return -1;
+ }
+ return mempool->attr.max_nr_cpus;
+}