1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2018-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
16 #include <rseq/rseq.h>
22 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
24 #ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
25 # define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
27 int get_current_cpu_id(void)
29 return rseq_current_mm_cid();
32 bool rseq_validate_cpu_id(void)
34 return rseq_mm_cid_available();
37 bool rseq_use_cpu_index(void)
39 return false; /* Use mm_cid */
42 # define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
44 int get_current_cpu_id(void)
46 return rseq_cpu_start();
49 bool rseq_validate_cpu_id(void)
51 return rseq_current_cpu_raw() >= 0;
54 bool rseq_use_cpu_index(void)
56 return true; /* Use cpu_id as index. */
60 struct percpu_lock_entry
{
62 } __attribute__((aligned(128)));
65 struct percpu_lock_entry c
[CPU_SETSIZE
];
68 struct test_data_entry
{
70 } __attribute__((aligned(128)));
72 struct spinlock_test_data
{
73 struct percpu_lock lock
;
74 struct test_data_entry c
[CPU_SETSIZE
];
78 struct percpu_list_node
{
80 struct percpu_list_node
*next
;
83 struct percpu_list_entry
{
84 struct percpu_list_node
*head
;
85 } __attribute__((aligned(128)));
88 struct percpu_list_entry c
[CPU_SETSIZE
];
91 /* A simple percpu spinlock. Returns the cpu lock was acquired on. */
92 static int rseq_this_cpu_lock(struct percpu_lock
*lock
)
99 cpu
= get_current_cpu_id();
100 ret
= rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED
, RSEQ_PERCPU
,
101 &lock
->c
[cpu
].v
, 0, 1, cpu
);
102 if (rseq_likely(!ret
))
104 /* Retry if comparison fails or rseq aborts. */
107 * Acquire semantic when taking lock after control dependency.
108 * Matches rseq_smp_store_release().
110 rseq_smp_acquire__after_ctrl_dep();
114 static void rseq_percpu_unlock(struct percpu_lock
*lock
, int cpu
)
116 assert(lock
->c
[cpu
].v
== 1);
118 * Release lock, with release semantic. Matches
119 * rseq_smp_acquire__after_ctrl_dep().
121 rseq_smp_store_release(&lock
->c
[cpu
].v
, 0);
124 static void *test_percpu_spinlock_thread(void *arg
)
126 struct spinlock_test_data
*data
= (struct spinlock_test_data
*) arg
;
129 if (rseq_register_current_thread()) {
130 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
131 errno
, strerror(errno
));
134 for (i
= 0; i
< data
->reps
; i
++) {
135 cpu
= rseq_this_cpu_lock(&data
->lock
);
136 data
->c
[cpu
].count
++;
137 rseq_percpu_unlock(&data
->lock
, cpu
);
139 if (rseq_unregister_current_thread()) {
140 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
141 errno
, strerror(errno
));
149 * A simple test which implements a sharded counter using a per-cpu
150 * lock. Obviously real applications might prefer to simply use a
151 * per-cpu increment; however, this is reasonable for a test and the
152 * lock can be extended to synchronize more complicated operations.
154 static void test_percpu_spinlock(void)
156 const int num_threads
= 200;
158 uint64_t sum
, expected_sum
;
159 pthread_t test_threads
[num_threads
];
160 struct spinlock_test_data data
;
164 memset(&data
, 0, sizeof(data
));
167 for (i
= 0; i
< num_threads
; i
++)
168 pthread_create(&test_threads
[i
], NULL
,
169 test_percpu_spinlock_thread
, &data
);
171 for (i
= 0; i
< num_threads
; i
++)
172 pthread_join(test_threads
[i
], NULL
);
175 for (i
= 0; i
< CPU_SETSIZE
; i
++)
176 sum
+= data
.c
[i
].count
;
178 expected_sum
= (uint64_t)data
.reps
* num_threads
;
180 ok(sum
== expected_sum
, "spinlock - sum (%" PRIu64
" == %" PRIu64
")", sum
, expected_sum
);
183 static void this_cpu_list_push(struct percpu_list
*list
,
184 struct percpu_list_node
*node
,
190 intptr_t *targetptr
, newval
, expect
;
193 cpu
= get_current_cpu_id();
194 /* Load list->c[cpu].head with single-copy atomicity. */
195 expect
= (intptr_t)RSEQ_READ_ONCE(list
->c
[cpu
].head
);
196 newval
= (intptr_t)node
;
197 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
198 node
->next
= (struct percpu_list_node
*)expect
;
199 ret
= rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED
, RSEQ_PERCPU
,
200 targetptr
, expect
, newval
, cpu
);
201 if (rseq_likely(!ret
))
203 /* Retry if comparison fails or rseq aborts. */
210 * Unlike a traditional lock-less linked list; the availability of a
211 * rseq primitive allows us to implement pop without concerns over
214 static struct percpu_list_node
*this_cpu_list_pop(struct percpu_list
*list
,
218 struct percpu_list_node
*head
;
219 intptr_t *targetptr
, expectnot
, *load
;
223 cpu
= get_current_cpu_id();
224 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
225 expectnot
= (intptr_t)NULL
;
226 offset
= offsetof(struct percpu_list_node
, next
);
227 load
= (intptr_t *)&head
;
228 ret
= rseq_load_cbeq_store_add_load_store__ptr(RSEQ_MO_RELAXED
, RSEQ_PERCPU
,
229 targetptr
, expectnot
,
231 if (rseq_likely(!ret
)) {
238 /* Retry if rseq aborts. */
243 * __percpu_list_pop is not safe against concurrent accesses. Should
244 * only be used on lists that are not concurrently modified.
246 static struct percpu_list_node
*__percpu_list_pop(struct percpu_list
*list
, int cpu
)
248 struct percpu_list_node
*node
;
250 node
= list
->c
[cpu
].head
;
253 list
->c
[cpu
].head
= node
->next
;
257 static void *test_percpu_list_thread(void *arg
)
260 struct percpu_list
*list
= (struct percpu_list
*)arg
;
262 if (rseq_register_current_thread()) {
263 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
264 errno
, strerror(errno
));
268 for (i
= 0; i
< 100000; i
++) {
269 struct percpu_list_node
*node
;
271 node
= this_cpu_list_pop(list
, NULL
);
272 sched_yield(); /* encourage shuffling */
274 this_cpu_list_push(list
, node
, NULL
);
277 if (rseq_unregister_current_thread()) {
278 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
279 errno
, strerror(errno
));
286 /* Simultaneous modification to a per-cpu linked list from many threads. */
287 static void test_percpu_list(void)
290 uint64_t sum
= 0, expected_sum
= 0;
291 struct percpu_list list
;
292 pthread_t test_threads
[200];
293 cpu_set_t allowed_cpus
;
297 memset(&list
, 0, sizeof(list
));
299 /* Generate list entries for every usable cpu. */
300 sched_getaffinity(0, sizeof(allowed_cpus
), &allowed_cpus
);
301 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
302 if (rseq_use_cpu_index() && !CPU_ISSET(i
, &allowed_cpus
))
304 for (j
= 1; j
<= 100; j
++) {
305 struct percpu_list_node
*node
;
309 node
= (struct percpu_list_node
*) malloc(sizeof(*node
));
312 node
->next
= list
.c
[i
].head
;
313 list
.c
[i
].head
= node
;
317 for (i
= 0; i
< 200; i
++)
318 pthread_create(&test_threads
[i
], NULL
,
319 test_percpu_list_thread
, &list
);
321 for (i
= 0; i
< 200; i
++)
322 pthread_join(test_threads
[i
], NULL
);
324 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
325 struct percpu_list_node
*node
;
327 if (rseq_use_cpu_index() && !CPU_ISSET(i
, &allowed_cpus
))
330 while ((node
= __percpu_list_pop(&list
, i
))) {
337 * All entries should now be accounted for (unless some external
338 * actor is interfering with our allowed affinity while this
341 ok(sum
== expected_sum
, "percpu_list - sum (%" PRIu64
" == %" PRIu64
")", sum
, expected_sum
);
346 plan_tests(NR_TESTS
);
348 if (!rseq_available(RSEQ_AVAILABLE_QUERY_KERNEL
)) {
349 skip(NR_TESTS
, "The rseq syscall is unavailable");
353 if (rseq_register_current_thread()) {
354 fail("rseq_register_current_thread(...) failed(%d): %s\n",
355 errno
, strerror(errno
));
358 pass("Registered current thread with rseq");
360 if (!rseq_validate_cpu_id()) {
361 skip(NR_TESTS
- 1, "Error: cpu id getter unavailable");
364 test_percpu_spinlock();
367 if (rseq_unregister_current_thread()) {
368 fail("rseq_unregister_current_thread(...) failed(%d): %s\n",
369 errno
, strerror(errno
));
372 pass("Unregistered current thread with rseq");