#endif
#include "rseq-utils.h"
-#include "smp.h"
+#include <rseq/rseq.h>
/*
* rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
int (*munmap_func)(void *priv, void *ptr, size_t len);
void *mmap_priv;
+ bool init_set;
+ int (*init_func)(void *priv, void *addr, size_t len, int cpu);
+ void *init_priv;
+
bool robust_set;
enum mempool_type type;
void *__rseq_pool_range_percpu_ptr(struct rseq_mempool_range *range, int cpu,
uintptr_t item_offset, size_t stride)
{
- /* TODO: Implement multi-ranges support. */
return range->base + (stride * cpu) + item_offset;
}
}
#ifdef HAVE_LIBNUMA
-static
-int rseq_mempool_range_init_numa(struct rseq_mempool *pool, struct rseq_mempool_range *range, int numa_flags)
+int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags)
{
unsigned long nr_pages, page_len;
+ int status[MOVE_PAGES_BATCH_SIZE];
+ int nodes[MOVE_PAGES_BATCH_SIZE];
+ void *pages[MOVE_PAGES_BATCH_SIZE];
long ret;
- int cpu;
- if (!numa_flags)
- return 0;
+ if (!numa_flags) {
+ errno = EINVAL;
+ return -1;
+ }
page_len = rseq_get_page_len();
- nr_pages = pool->attr.stride >> rseq_get_count_order_ulong(page_len);
- for (cpu = 0; cpu < pool->attr.max_nr_cpus; cpu++) {
+ nr_pages = len >> rseq_get_count_order_ulong(page_len);
- int status[MOVE_PAGES_BATCH_SIZE];
- int nodes[MOVE_PAGES_BATCH_SIZE];
- void *pages[MOVE_PAGES_BATCH_SIZE];
+ nodes[0] = numa_node_of_cpu(cpu);
+ if (nodes[0] < 0)
+ return -1;
- nodes[0] = numa_node_of_cpu(cpu);
- if (nodes[0] < 0)
- continue;
- for (size_t k = 1; k < RSEQ_ARRAY_SIZE(nodes); ++k) {
- nodes[k] = nodes[0];
- }
+ for (size_t k = 1; k < RSEQ_ARRAY_SIZE(nodes); ++k) {
+ nodes[k] = nodes[0];
+ }
- for (unsigned long page = 0; page < nr_pages;) {
+ for (unsigned long page = 0; page < nr_pages;) {
- size_t max_k = RSEQ_ARRAY_SIZE(pages);
- size_t left = nr_pages - page;
+ size_t max_k = RSEQ_ARRAY_SIZE(pages);
+ size_t left = nr_pages - page;
- if (left < max_k) {
- max_k = left;
- }
+ if (left < max_k) {
+ max_k = left;
+ }
- for (size_t k = 0; k < max_k; ++k, ++page) {
- pages[k] = __rseq_pool_range_percpu_ptr(range, cpu,
- page * page_len, pool->attr.stride);
- status[k] = -EPERM;
- }
+ for (size_t k = 0; k < max_k; ++k, ++page) {
+ pages[k] = addr + (page * page_len);
+ status[k] = -EPERM;
+ }
- ret = move_pages(0, max_k, pages, nodes, status, numa_flags);
+ ret = move_pages(0, max_k, pages, nodes, status, numa_flags);
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
- if (ret > 0) {
- fprintf(stderr, "%lu pages were not migrated\n", ret);
- for (size_t k = 0; k < max_k; ++k) {
- if (status[k] < 0)
- fprintf(stderr,
- "Error while moving page %p to numa node %d: %u\n",
- pages[k], nodes[k], -status[k]);
- }
+ if (ret > 0) {
+ fprintf(stderr, "%lu pages were not migrated\n", ret);
+ for (size_t k = 0; k < max_k; ++k) {
+ if (status[k] < 0)
+ fprintf(stderr,
+ "Error while moving page %p to numa node %d: %u\n",
+ pages[k], nodes[k], -status[k]);
}
}
}
return 0;
}
-
-int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags)
-{
- struct rseq_mempool_range *range;
- int ret;
-
- if (!numa_flags)
- return 0;
- for (range = pool->ranges; range; range = range->next) {
- ret = rseq_mempool_range_init_numa(pool, range, numa_flags);
- if (ret)
- return ret;
- }
- return 0;
-}
#else
-int rseq_mempool_init_numa(struct rseq_mempool *pool __attribute__((unused)),
+int rseq_mempool_range_init_numa(void *addr __attribute__((unused)),
+ size_t len __attribute__((unused)),
+ int cpu __attribute__((unused)),
int numa_flags __attribute__((unused)))
{
- return 0;
+ errno = ENOSYS;
+ return -1;
}
#endif
void *ptr;
if (len < page_size || alignment < page_size ||
- !is_pow2(len) || !is_pow2(alignment)) {
+ !is_pow2(alignment) || (len & (alignment - 1))) {
errno = EINVAL;
return NULL;
}
if (create_alloc_bitmap(pool, range))
goto error_alloc;
}
+ if (pool->attr.init_set) {
+ int cpu;
+
+ for (cpu = 0; cpu < pool->attr.max_nr_cpus; cpu++) {
+ if (pool->attr.init_func(pool->attr.init_priv,
+ base + (pool->attr.stride * cpu),
+ pool->attr.stride, cpu)) {
+ goto error_alloc;
+ }
+ }
+ }
return range;
error_alloc:
}
if (attr.max_nr_cpus == 0) {
/* Auto-detect */
- attr.max_nr_cpus = get_possible_cpus_array_len();
+ attr.max_nr_cpus = rseq_get_max_nr_cpus();
if (attr.max_nr_cpus == 0) {
errno = EINVAL;
return NULL;
return 0;
}
+int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
+ int (*init_func)(void *priv, void *addr, size_t len, int cpu),
+ void *init_priv)
+{
+ if (!attr) {
+ errno = EINVAL;
+ return -1;
+ }
+ attr->init_set = true;
+ attr->init_func = init_func;
+ attr->init_priv = init_priv;
+ return 0;
+}
+
int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr)
{
if (!attr) {
attr->max_nr_cpus = 0;
return 0;
}
+
+int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool)
+{
+ if (!mempool || mempool->attr.type != MEMPOOL_TYPE_PERCPU) {
+ errno = EINVAL;
+ return -1;
+ }
+ return mempool->attr.max_nr_cpus;
+}