1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * rseq memory pool test.
21 #include <rseq/mempool.h>
22 #include "../src/rseq-utils.h"
24 #include "../src/list.h"
27 #if RSEQ_BITS_PER_LONG == 64
28 # define POISON_VALUE 0xABCDABCDABCDABCDULL
30 # define POISON_VALUE 0xABCDABCDUL
35 struct test_data __rseq_percpu
*backref
;
36 struct list_head node
;
39 static void test_mempool_fill(enum rseq_mempool_populate_policy policy
,
40 unsigned long max_nr_ranges
, size_t stride
)
42 struct test_data __rseq_percpu
*ptr
;
43 struct test_data
*iter
, *tmp
;
44 struct rseq_mempool
*mempool
;
45 struct rseq_mempool_attr
*attr
;
48 int ret
, i
, size_order
;
49 struct test_data init_value
= {
58 attr
= rseq_mempool_attr_create();
59 ok(attr
, "Create pool attribute");
60 ret
= rseq_mempool_attr_set_robust(attr
);
61 ok(ret
== 0, "Setting mempool robust attribute");
62 ret
= rseq_mempool_attr_set_percpu(attr
, stride
, CPU_SETSIZE
);
63 ok(ret
== 0, "Setting mempool percpu type");
64 ret
= rseq_mempool_attr_set_max_nr_ranges(attr
, max_nr_ranges
);
65 ok(ret
== 0, "Setting mempool max_nr_ranges=%lu", max_nr_ranges
);
66 ret
= rseq_mempool_attr_set_poison(attr
, POISON_VALUE
);
67 ok(ret
== 0, "Setting mempool poison");
68 ret
= rseq_mempool_attr_set_populate_policy(attr
, policy
);
69 ok(ret
== 0, "Setting mempool populate policy to %s",
70 policy
== RSEQ_MEMPOOL_POPULATE_COW_INIT
? "COW_INIT" : "COW_ZERO");
71 mempool
= rseq_mempool_create("test_data",
72 sizeof(struct test_data
), attr
);
73 ok(mempool
, "Create mempool of size %zu", stride
);
74 rseq_mempool_attr_destroy(attr
);
77 struct test_data
*cpuptr
;
79 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_zmalloc(mempool
);
82 /* Link items in cpu 0. */
83 cpuptr
= rseq_percpu_ptr(ptr
, 0, stride
);
84 cpuptr
->backref
= ptr
;
85 /* Randomize items in list. */
87 list_add(&cpuptr
->node
, &list
);
89 list_add_tail(&cpuptr
->node
, &list
);
93 size_order
= rseq_get_count_order_ulong(sizeof(struct test_data
));
94 ok(count
* (1U << size_order
) == stride
* max_nr_ranges
,
95 "Allocated %" PRIu64
" objects in pool", count
);
97 list_for_each_entry(iter
, &list
, node
) {
99 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
100 struct test_data
*cpuptr
= rseq_percpu_ptr(ptr
, i
, stride
);
102 if (cpuptr
->value
[0] != 0)
107 ok(1, "Check for pool content corruption");
109 list_for_each_entry_safe(iter
, tmp
, &list
, node
) {
111 rseq_mempool_percpu_free(ptr
, stride
);
113 ok(1, "Free all objects");
115 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_zmalloc(mempool
);
118 ok(1, "Allocate one object");
120 rseq_mempool_percpu_free(ptr
, stride
);
121 ok(1, "Free one object");
123 ptr
= (struct test_data __rseq_percpu
*)
124 rseq_mempool_percpu_malloc_init(mempool
,
125 &init_value
, sizeof(struct test_data
));
128 ok(1, "Allocate one initialized object");
130 ok(ptr
->value
[0] == 123 && ptr
->value
[1] == 456, "Validate initial values");
132 rseq_mempool_percpu_free(ptr
, stride
);
133 ok(1, "Free one object");
135 ret
= rseq_mempool_destroy(mempool
);
136 ok(ret
== 0, "Destroy mempool");
139 static void test_robust_double_free(struct rseq_mempool
*pool
,
140 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
142 struct test_data __rseq_percpu
*ptr
;
144 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
146 rseq_mempool_percpu_free(ptr
);
147 rseq_mempool_percpu_free(ptr
);
150 static void test_robust_corrupt_after_free(struct rseq_mempool
*pool
,
151 enum rseq_mempool_populate_policy policy
)
153 struct test_data __rseq_percpu
*ptr
;
154 struct test_data
*cpuptr
;
156 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
158 * Corrupt free list: For robust pools, the free list is located
159 * after the last cpu memory range for COW_ZERO, and after the init
160 * values memory range for COW_INIT.
162 if (policy
== RSEQ_MEMPOOL_POPULATE_COW_ZERO
)
163 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
));
165 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
) + 1);
167 rseq_mempool_percpu_free(ptr
);
168 cpuptr
->value
[0] = (uintptr_t) test_robust_corrupt_after_free
;
170 rseq_mempool_destroy(pool
);
173 static void test_robust_memory_leak(struct rseq_mempool
*pool
,
174 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
176 (void) rseq_mempool_percpu_malloc(pool
);
178 rseq_mempool_destroy(pool
);
181 static void test_robust_free_list_corruption(struct rseq_mempool
*pool
,
182 enum rseq_mempool_populate_policy policy
)
184 struct test_data __rseq_percpu
*ptr
;
185 struct test_data
*cpuptr
;
187 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
189 * Corrupt free list: For robust pools, the free list is located
190 * after the last cpu memory range for COW_ZERO, and after the init
191 * values memory range for COW_INIT.
193 if (policy
== RSEQ_MEMPOOL_POPULATE_COW_ZERO
)
194 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
));
196 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
) + 1);
198 rseq_mempool_percpu_free(ptr
);
200 cpuptr
->value
[0] = (uintptr_t) cpuptr
;
202 (void) rseq_mempool_percpu_malloc(pool
);
203 (void) rseq_mempool_percpu_malloc(pool
);
206 static void test_robust_poison_corruption_malloc(struct rseq_mempool
*pool
,
207 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
209 struct test_data __rseq_percpu
*ptr
;
210 struct test_data
*cpuptr
;
212 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
213 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, 0);
215 rseq_mempool_percpu_free(ptr
);
217 cpuptr
->value
[0] = 1;
219 (void) rseq_mempool_percpu_malloc(pool
);
222 static void test_robust_poison_corruption_destroy(struct rseq_mempool
*pool
,
223 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
225 struct test_data __rseq_percpu
*ptr
;
226 struct test_data
*cpuptr
;
228 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
229 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, 0);
231 rseq_mempool_percpu_free(ptr
);
233 cpuptr
->value
[0] = 1;
235 rseq_mempool_destroy(pool
);
238 static struct rseq_mempool
*make_test_pool(enum rseq_mempool_populate_policy policy
)
240 struct rseq_mempool_attr
*attr
;
241 struct rseq_mempool
*pool
;
246 attr
= rseq_mempool_attr_create();
252 ret
= rseq_mempool_attr_set_robust(attr
);
258 ret
= rseq_mempool_attr_set_percpu(attr
, RSEQ_MEMPOOL_STRIDE
, 1);
264 ret
= rseq_mempool_attr_set_populate_policy(attr
, policy
);
270 pool
= rseq_mempool_create("mempool-robust",
271 sizeof(struct test_data
), attr
);
273 rseq_mempool_attr_destroy(attr
);
279 static int run_robust_test(void (*test
)(struct rseq_mempool
*, enum rseq_mempool_populate_policy
),
280 enum rseq_mempool_populate_policy policy
)
284 struct rseq_mempool
*pool
;
293 * Intentional leak of test pool because some tests might want
294 * to do an explicit destroy on it.
296 pool
= make_test_pool(policy
);
302 waitpid(cpid
, &status
, 0);
305 if (WIFSIGNALED(status
) &&
306 (SIGABRT
== WTERMSIG(status
)))
312 static void run_robust_tests(enum rseq_mempool_populate_policy policy
)
315 ok(run_robust_test(test_robust_double_free
, policy
),
316 "robust-double-free");
318 ok(run_robust_test(test_robust_memory_leak
, policy
),
319 "robust-memory-leak");
321 ok(run_robust_test(test_robust_poison_corruption_malloc
, policy
),
322 "robust-poison-corruption-malloc");
324 ok(run_robust_test(test_robust_poison_corruption_destroy
, policy
),
325 "robust-poison-corruption-destroy");
327 ok(run_robust_test(test_robust_corrupt_after_free
, policy
),
328 "robust-corrupt-after-free");
330 ok(run_robust_test(test_robust_free_list_corruption
, policy
),
331 "robust-free-list-corruption");
334 static void fork_child(struct rseq_mempool
*pool
,
335 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
337 rseq_mempool_destroy(pool
);
341 * Test that destroying a mempool works in child after fork.
343 static int run_fork_destroy_pool_test(void (*test
)(struct rseq_mempool
*, enum rseq_mempool_populate_policy
),
344 enum rseq_mempool_populate_policy policy
)
348 struct rseq_mempool
*pool
;
350 pool
= make_test_pool(policy
);
363 waitpid(cpid
, &status
, 0);
366 if (WIFSIGNALED(status
))
375 unsigned long nr_ranges
;
379 for (nr_ranges
= 1; nr_ranges
< 32; nr_ranges
<<= 1) {
380 /* From page size to 64kB */
381 for (len
= rseq_get_page_len(); len
< 65536; len
<<= 1) {
382 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_ZERO
, nr_ranges
, len
);
383 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_INIT
, nr_ranges
, len
);
387 len
= rseq_get_page_len();
390 /* From min(page size, 64kB) to 4MB */
391 for (; len
< 4096 * 1024; len
<<= 1) {
392 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_ZERO
, 1, len
);
393 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_INIT
, 1, len
);
396 run_robust_tests(RSEQ_MEMPOOL_POPULATE_COW_ZERO
);
397 run_robust_tests(RSEQ_MEMPOOL_POPULATE_COW_INIT
);
398 ok(run_fork_destroy_pool_test(fork_child
, RSEQ_MEMPOOL_POPULATE_COW_ZERO
),
399 "fork destroy pool test populate COW_ZERO");
400 ok(run_fork_destroy_pool_test(fork_child
, RSEQ_MEMPOOL_POPULATE_COW_INIT
),
401 "fork destroy pool test populate COW_INIT");