1 // SPDX-License-Identifier: LGPL-2.1-only
15 #include <rseq/rseq.h>
21 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
23 struct percpu_lock_entry
{
25 } __attribute__((aligned(128)));
28 struct percpu_lock_entry c
[CPU_SETSIZE
];
31 struct test_data_entry
{
33 } __attribute__((aligned(128)));
35 struct spinlock_test_data
{
36 struct percpu_lock lock
;
37 struct test_data_entry c
[CPU_SETSIZE
];
41 struct percpu_list_node
{
43 struct percpu_list_node
*next
;
46 struct percpu_list_entry
{
47 struct percpu_list_node
*head
;
48 } __attribute__((aligned(128)));
51 struct percpu_list_entry c
[CPU_SETSIZE
];
54 /* A simple percpu spinlock. Returns the cpu lock was acquired on. */
55 static int rseq_this_cpu_lock(struct percpu_lock
*lock
)
62 cpu
= rseq_cpu_start();
63 ret
= rseq_cmpeqv_storev(&lock
->c
[cpu
].v
,
65 if (rseq_likely(!ret
))
67 /* Retry if comparison fails or rseq aborts. */
70 * Acquire semantic when taking lock after control dependency.
71 * Matches rseq_smp_store_release().
73 rseq_smp_acquire__after_ctrl_dep();
77 static void rseq_percpu_unlock(struct percpu_lock
*lock
, int cpu
)
79 assert(lock
->c
[cpu
].v
== 1);
81 * Release lock, with release semantic. Matches
82 * rseq_smp_acquire__after_ctrl_dep().
84 rseq_smp_store_release(&lock
->c
[cpu
].v
, 0);
87 static void *test_percpu_spinlock_thread(void *arg
)
89 struct spinlock_test_data
*data
= (struct spinlock_test_data
*) arg
;
92 if (rseq_register_current_thread()) {
93 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
94 errno
, strerror(errno
));
97 for (i
= 0; i
< data
->reps
; i
++) {
98 cpu
= rseq_this_cpu_lock(&data
->lock
);
100 rseq_percpu_unlock(&data
->lock
, cpu
);
102 if (rseq_unregister_current_thread()) {
103 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
104 errno
, strerror(errno
));
112 * A simple test which implements a sharded counter using a per-cpu
113 * lock. Obviously real applications might prefer to simply use a
114 * per-cpu increment; however, this is reasonable for a test and the
115 * lock can be extended to synchronize more complicated operations.
117 static void test_percpu_spinlock(void)
119 const int num_threads
= 200;
121 uint64_t sum
, expected_sum
;
122 pthread_t test_threads
[num_threads
];
123 struct spinlock_test_data data
;
127 memset(&data
, 0, sizeof(data
));
130 for (i
= 0; i
< num_threads
; i
++)
131 pthread_create(&test_threads
[i
], NULL
,
132 test_percpu_spinlock_thread
, &data
);
134 for (i
= 0; i
< num_threads
; i
++)
135 pthread_join(test_threads
[i
], NULL
);
138 for (i
= 0; i
< CPU_SETSIZE
; i
++)
139 sum
+= data
.c
[i
].count
;
141 expected_sum
= (uint64_t)data
.reps
* num_threads
;
143 ok(sum
== expected_sum
, "spinlock - sum (%" PRIu64
" == %" PRIu64
")", sum
, expected_sum
);
146 static void this_cpu_list_push(struct percpu_list
*list
,
147 struct percpu_list_node
*node
,
153 intptr_t *targetptr
, newval
, expect
;
156 cpu
= rseq_cpu_start();
157 /* Load list->c[cpu].head with single-copy atomicity. */
158 expect
= (intptr_t)RSEQ_READ_ONCE(list
->c
[cpu
].head
);
159 newval
= (intptr_t)node
;
160 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
161 node
->next
= (struct percpu_list_node
*)expect
;
162 ret
= rseq_cmpeqv_storev(targetptr
, expect
, newval
, cpu
);
163 if (rseq_likely(!ret
))
165 /* Retry if comparison fails or rseq aborts. */
172 * Unlike a traditional lock-less linked list; the availability of a
173 * rseq primitive allows us to implement pop without concerns over
176 static struct percpu_list_node
*this_cpu_list_pop(struct percpu_list
*list
,
180 struct percpu_list_node
*head
;
181 intptr_t *targetptr
, expectnot
, *load
;
185 cpu
= rseq_cpu_start();
186 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
187 expectnot
= (intptr_t)NULL
;
188 offset
= offsetof(struct percpu_list_node
, next
);
189 load
= (intptr_t *)&head
;
190 ret
= rseq_cmpnev_storeoffp_load(targetptr
, expectnot
,
192 if (rseq_likely(!ret
)) {
199 /* Retry if rseq aborts. */
204 * __percpu_list_pop is not safe against concurrent accesses. Should
205 * only be used on lists that are not concurrently modified.
207 static struct percpu_list_node
*__percpu_list_pop(struct percpu_list
*list
, int cpu
)
209 struct percpu_list_node
*node
;
211 node
= list
->c
[cpu
].head
;
214 list
->c
[cpu
].head
= node
->next
;
218 static void *test_percpu_list_thread(void *arg
)
221 struct percpu_list
*list
= (struct percpu_list
*)arg
;
223 if (rseq_register_current_thread()) {
224 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
225 errno
, strerror(errno
));
229 for (i
= 0; i
< 100000; i
++) {
230 struct percpu_list_node
*node
;
232 node
= this_cpu_list_pop(list
, NULL
);
233 sched_yield(); /* encourage shuffling */
235 this_cpu_list_push(list
, node
, NULL
);
238 if (rseq_unregister_current_thread()) {
239 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
240 errno
, strerror(errno
));
247 /* Simultaneous modification to a per-cpu linked list from many threads. */
248 static void test_percpu_list(void)
251 uint64_t sum
= 0, expected_sum
= 0;
252 struct percpu_list list
;
253 pthread_t test_threads
[200];
254 cpu_set_t allowed_cpus
;
258 memset(&list
, 0, sizeof(list
));
260 /* Generate list entries for every usable cpu. */
261 sched_getaffinity(0, sizeof(allowed_cpus
), &allowed_cpus
);
262 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
263 if (!CPU_ISSET(i
, &allowed_cpus
))
265 for (j
= 1; j
<= 100; j
++) {
266 struct percpu_list_node
*node
;
270 node
= (struct percpu_list_node
*) malloc(sizeof(*node
));
273 node
->next
= list
.c
[i
].head
;
274 list
.c
[i
].head
= node
;
278 for (i
= 0; i
< 200; i
++)
279 pthread_create(&test_threads
[i
], NULL
,
280 test_percpu_list_thread
, &list
);
282 for (i
= 0; i
< 200; i
++)
283 pthread_join(test_threads
[i
], NULL
);
285 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
286 struct percpu_list_node
*node
;
288 if (!CPU_ISSET(i
, &allowed_cpus
))
291 while ((node
= __percpu_list_pop(&list
, i
))) {
298 * All entries should now be accounted for (unless some external
299 * actor is interfering with our allowed affinity while this
302 ok(sum
== expected_sum
, "percpu_list - sum (%" PRIu64
" == %" PRIu64
")", sum
, expected_sum
);
307 plan_tests(NR_TESTS
);
309 if (!rseq_available()) {
310 skip(NR_TESTS
, "The rseq syscall is unavailable");
314 if (rseq_register_current_thread()) {
315 fail("rseq_register_current_thread(...) failed(%d): %s\n",
316 errno
, strerror(errno
));
319 pass("Registered current thread with rseq");
322 test_percpu_spinlock();
325 if (rseq_unregister_current_thread()) {
326 fail("rseq_unregister_current_thread(...) failed(%d): %s\n",
327 errno
, strerror(errno
));
330 pass("Unregistered current thread with rseq");