// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+// SPDX-FileCopyrightText: 2024 Olivier Dion <odion@efficios.com>
#include <rseq/mempool.h>
#include <sys/mman.h>
#define POOL_SET_NR_ENTRIES RSEQ_BITS_PER_LONG
+#define POOL_HEADER_NR_PAGES 2
+
/*
* Smallest allocation should hold enough space for a free list pointer.
*/
#define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
#if RSEQ_BITS_PER_LONG == 64
-# define DEFAULT_PRIVATE_POISON_VALUE 0x5555555555555555ULL
+# define DEFAULT_COW_INIT_POISON_VALUE 0x5555555555555555ULL
#else
-# define DEFAULT_PRIVATE_POISON_VALUE 0x55555555UL
+# define DEFAULT_COW_INIT_POISON_VALUE 0x55555555UL
#endif
+/*
+ * Define the default COW_ZERO poison value as zero to prevent useless
+ * COW page allocation when writing poison values when freeing items.
+ */
+#define DEFAULT_COW_ZERO_POISON_VALUE 0x0
+
struct free_list_node;
struct free_list_node {
/*
* Memory layout of a mempool range:
- * - Header page (contains struct rseq_mempool_range at the very end),
+ * - Canary header page (for detection of destroy-after-fork of
+ * COW_INIT pool),
+ * - Header page (contains struct rseq_mempool_range at the
+ * very end),
* - Base of the per-cpu data, starting with CPU 0.
- * Aliases with free-list for non-robust populate all pool.
+ * Aliases with free-list for non-robust COW_ZERO pool.
* - CPU 1,
* ...
* - CPU max_nr_cpus - 1
- * - init values (unpopulated for RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL).
- * Aliases with free-list for non-robust populate none pool.
+ * - init values (only allocated for COW_INIT pool).
+ * Aliases with free-list for non-robust COW_INIT pool.
* - free list (for robust pool).
*
* The free list aliases the CPU 0 memory area for non-robust
- * populate all pools. It aliases with init values for
- * non-robust populate none pools. It is located immediately
- * after the init values for robust pools.
+ * COW_ZERO pools. It aliases with init values for non-robust
+ * COW_INIT pools. It is located immediately after the init
+ * values for robust pools.
*/
void *header;
void *base;
/*
* The init values contains malloc_init/zmalloc values.
- * Pointer is NULL for RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL.
+ * Pointer is NULL for RSEQ_MEMPOOL_POPULATE_COW_ZERO.
*/
void *init;
size_t next_unused;
int item_order;
/*
- * The free list chains freed items on the CPU 0 address range.
- * We should rethink this decision if false sharing between
- * malloc/free from other CPUs and data accesses from CPU 0
- * becomes an issue. This is a NULL-terminated singly-linked
- * list.
+ * COW_INIT non-robust pools:
+ * The free list chains freed items on the init
+ * values address range.
+ *
+ * COW_ZERO non-robust pools:
+ * The free list chains freed items on the CPU 0
+ * address range. We should rethink this
+ * decision if false sharing between malloc/free
+ * from other CPUs and data accesses from CPU 0
+ * becomes an issue.
+ *
+ * Robust pools: The free list chains freed items in the
+ * address range dedicated for the free list.
+ *
+ * This is a NULL-terminated singly-linked list.
*/
struct free_list_node *free_list_head;
/* Skip cpus. */
p -= pool->attr.max_nr_cpus * pool->attr.stride;
/* Skip init values */
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL)
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
p -= pool->attr.stride;
} else {
- /* Populate none free list is in init values */
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL)
+ /* COW_INIT free list is in init values */
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
p -= pool->attr.max_nr_cpus * pool->attr.stride;
}
return p;
/* Skip cpus. */
p += pool->attr.max_nr_cpus * pool->attr.stride;
/* Skip init values */
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL)
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
p += pool->attr.stride;
} else {
- /* Populate none free list is in init values */
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL)
+ /* COW_INIT free list is in init values */
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
p += pool->attr.max_nr_cpus * pool->attr.stride;
}
return (struct free_list_node *) p;
* write to the page. This eliminates useless COW over
* the zero page just for overwriting it with zeroes.
*
- * This means zmalloc() in populate all policy pool do
+ * This means zmalloc() in COW_ZERO policy pool do
* not trigger COW for CPUs which are not actively
* writing to the pool. This is however not the case for
* malloc_init() in populate-all pools if it populates
* COW of the page.
*
* It is recommended to use zero as poison value for
- * populate-all pools to eliminate COW due to writing
- * poison to unused CPU memory.
+ * COW_ZERO pools to eliminate COW due to writing
+ * poison to CPU memory still backed by the zero page.
*/
if (rseq_cmp_item(p, pool->item_len, poison, NULL) == 0)
continue;
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void check_free_list(const struct rseq_mempool *pool)
+void check_free_list(const struct rseq_mempool *pool, bool mapping_accessible)
{
size_t total_item = 0, total_never_allocated = 0, total_freed = 0,
max_list_traversal = 0, traversal_iteration = 0;
struct rseq_mempool_range *range;
- if (!pool->attr.robust_set)
+ if (!pool->attr.robust_set || !mapping_accessible)
return;
for (range = pool->range_list; range; range = range->next) {
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void check_pool_poison(const struct rseq_mempool *pool)
+void check_pool_poison(const struct rseq_mempool *pool, bool mapping_accessible)
{
struct rseq_mempool_range *range;
- if (!pool->attr.robust_set)
+ if (!pool->attr.robust_set || !mapping_accessible)
return;
for (range = pool->range_list; range; range = range->next)
check_range_poison(pool, range);
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
int rseq_mempool_range_destroy(struct rseq_mempool *pool,
- struct rseq_mempool_range *range)
+ struct rseq_mempool_range *range,
+ bool mapping_accessible)
{
destroy_alloc_bitmap(pool, range);
-
- /* range is a header located one page before the aligned mapping. */
+ if (!mapping_accessible) {
+ /*
+ * Only the header pages are populated in the child
+ * process.
+ */
+ return munmap(range->header, POOL_HEADER_NR_PAGES * rseq_get_page_len());
+ }
return munmap(range->mmap_addr, range->mmap_len);
}
goto end;
}
if (ftruncate(fd, (off_t) init_len)) {
+ if (close(fd))
+ perror("close");
fd = -1;
goto end;
}
static
void rseq_memfd_close(int fd)
{
+ if (fd < 0)
+ return;
if (close(fd))
perror("close");
}
void *header;
void *base;
size_t range_len; /* Range len excludes header. */
+ size_t header_len;
+ int memfd = -1;
if (pool->attr.max_nr_ranges &&
pool->nr_ranges >= pool->attr.max_nr_ranges) {
}
page_size = rseq_get_page_len();
+ header_len = POOL_HEADER_NR_PAGES * page_size;
range_len = pool->attr.stride * pool->attr.max_nr_cpus;
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL)
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
range_len += pool->attr.stride; /* init values */
if (pool->attr.robust_set)
- range_len += pool->attr.stride; /* free list */
+ range_len += pool->attr.stride; /* dedicated free list */
base = aligned_mmap_anonymous(page_size, range_len,
- pool->attr.stride, &header, page_size);
+ pool->attr.stride, &header, header_len);
if (!base)
return NULL;
range = (struct rseq_mempool_range *) (base - RANGE_HEADER_OFFSET);
range->header = header;
range->base = base;
range->mmap_addr = header;
- range->mmap_len = page_size + range_len;
-
- if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL) {
- int memfd;
+ range->mmap_len = header_len + range_len;
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT) {
range->init = base + (pool->attr.stride * pool->attr.max_nr_cpus);
/* Populate init values pages from memfd */
memfd = rseq_memfd_create_init(pool->name, pool->attr.stride);
if (memfd < 0)
goto error_alloc;
if (mmap(range->init, pool->attr.stride, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED, memfd, 0) != (void *) range->init) {
+ MAP_SHARED | MAP_FIXED, memfd, 0) != (void *) range->init)
goto error_alloc;
- }
assert(pool->attr.type == MEMPOOL_TYPE_PERCPU);
/*
* Map per-cpu memory as private COW mappings of init values.
size_t len = pool->attr.stride;
if (mmap(p, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
- memfd, 0) != (void *) p) {
+ memfd, 0) != (void *) p)
goto error_alloc;
- }
}
}
+ /*
+ * The init values shared mapping should not be shared
+ * with the children processes across fork. Prevent the
+ * whole mapping from being used across fork.
+ */
+ if (madvise(base, range_len, MADV_DONTFORK))
+ goto error_alloc;
+
+ /*
+ * Write 0x1 in first byte of header first page, which
+ * will be WIPEONFORK (and thus cleared) in children
+ * processes. Used to find out if pool destroy is called
+ * from a child process after fork.
+ */
+ *((char *) header) = 0x1;
+ if (madvise(header, page_size, MADV_WIPEONFORK))
+ goto error_alloc;
+
+ /*
+ * The second header page contains the struct
+ * rseq_mempool_range, which is needed by pool destroy.
+ * Leave this anonymous page populated (COW) in child
+ * processes.
+ */
rseq_memfd_close(memfd);
+ memfd = -1;
}
if (pool->attr.robust_set) {
return range;
error_alloc:
- (void) rseq_mempool_range_destroy(pool, range);
+ rseq_memfd_close(memfd);
+ (void) rseq_mempool_range_destroy(pool, range, true);
return NULL;
}
+static
+bool pool_mappings_accessible(struct rseq_mempool *pool)
+{
+ struct rseq_mempool_range *range;
+ size_t page_size;
+ char *addr;
+
+ if (pool->attr.populate_policy != RSEQ_MEMPOOL_POPULATE_COW_INIT)
+ return true;
+ range = pool->range_list;
+ if (!range)
+ return true;
+ page_size = rseq_get_page_len();
+ /*
+ * Header first page is one page before the page containing the
+ * range structure.
+ */
+ addr = (char *) ((uintptr_t) range & ~(page_size - 1)) - page_size;
+ /*
+ * Look for 0x1 first byte marker in header first page.
+ */
+ if (*addr != 0x1)
+ return false;
+ return true;
+}
+
int rseq_mempool_destroy(struct rseq_mempool *pool)
{
struct rseq_mempool_range *range, *next_range;
+ bool mapping_accessible;
int ret = 0;
if (!pool)
return 0;
- check_free_list(pool);
- check_pool_poison(pool);
+
+ /*
+ * Validate that the pool mappings are accessible before doing
+ * free list/poison validation and unmapping ranges. This allows
+ * calling pool destroy in child process after a fork for COW_INIT
+ * pools to free pool resources.
+ */
+ mapping_accessible = pool_mappings_accessible(pool);
+
+ check_free_list(pool, mapping_accessible);
+ check_pool_poison(pool, mapping_accessible);
+
/* Iteration safe against removal. */
for (range = pool->range_list; range && (next_range = range->next, 1); range = next_range) {
- if (rseq_mempool_range_destroy(pool, range))
+ if (rseq_mempool_range_destroy(pool, range, mapping_accessible))
goto end;
/* Update list head to keep list coherent in case of partial failure. */
pool->range_list = next_range;
if (_attr)
memcpy(&attr, _attr, sizeof(attr));
+ /*
+ * Validate that the pool populate policy requested is known.
+ */
+ switch (attr.populate_policy) {
+ case RSEQ_MEMPOOL_POPULATE_COW_INIT:
+ break;
+ case RSEQ_MEMPOOL_POPULATE_COW_ZERO:
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
switch (attr.type) {
case MEMPOOL_TYPE_PERCPU:
if (attr.max_nr_cpus < 0) {
break;
case MEMPOOL_TYPE_GLOBAL:
/* Override populate policy for global type. */
- if (attr.populate_policy == RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE)
- attr.populate_policy = RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL;
+ if (attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
+ attr.populate_policy = RSEQ_MEMPOOL_POPULATE_COW_ZERO;
/* Use a 1-cpu pool for global mempool type. */
attr.max_nr_cpus = 1;
break;
attr.stride = RSEQ_MEMPOOL_STRIDE; /* Use default */
if (attr.robust_set && !attr.poison_set) {
attr.poison_set = true;
- attr.poison = DEFAULT_PRIVATE_POISON_VALUE;
+ if (attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT)
+ attr.poison = DEFAULT_COW_INIT_POISON_VALUE;
+ else
+ attr.poison = DEFAULT_COW_ZERO_POISON_VALUE;
}
if (item_len > attr.stride || attr.stride < (size_t) rseq_get_page_len() ||
!is_pow2(attr.stride)) {
item = __rseq_percpu_to_free_list_ptr(pool, _ptr);
/*
* Setting the next pointer will overwrite the first uintptr_t
- * poison for either CPU 0 (populate all) or init data (populate
- * none).
+ * poison for either CPU 0 (COW_ZERO, non-robust), or init data
+ * (COW_INIT, non-robust).
*/
item->next = head;
pool->free_list_head = item;
attr->init_set = true;
attr->init_func = init_func;
attr->init_priv = init_priv;
+ attr->populate_policy = RSEQ_MEMPOOL_POPULATE_COW_INIT;
return 0;
}