1 // SPDX-License-Identifier: LGPL-2.1
12 #include <rseq/percpu-op.h>
14 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
16 struct percpu_lock_entry
{
18 } __attribute__((aligned(128)));
21 struct percpu_lock_entry c
[CPU_SETSIZE
];
24 struct test_data_entry
{
26 } __attribute__((aligned(128)));
28 struct spinlock_test_data
{
29 struct percpu_lock lock
;
30 struct test_data_entry c
[CPU_SETSIZE
];
34 struct percpu_list_node
{
36 struct percpu_list_node
*next
;
39 struct percpu_list_entry
{
40 struct percpu_list_node
*head
;
41 } __attribute__((aligned(128)));
44 struct percpu_list_entry c
[CPU_SETSIZE
];
47 /* A simple percpu spinlock. */
48 void rseq_percpu_lock(struct percpu_lock
*lock
, int cpu
)
53 ret
= percpu_cmpeqv_storev(&lock
->c
[cpu
].v
,
55 if (rseq_likely(!ret
))
57 if (rseq_unlikely(ret
< 0)) {
61 /* Retry if comparison fails. */
64 * Acquire semantic when taking lock after control dependency.
65 * Matches rseq_smp_store_release().
67 rseq_smp_acquire__after_ctrl_dep();
70 void rseq_percpu_unlock(struct percpu_lock
*lock
, int cpu
)
72 assert(lock
->c
[cpu
].v
== 1);
74 * Release lock, with release semantic. Matches
75 * rseq_smp_acquire__after_ctrl_dep().
77 rseq_smp_store_release(&lock
->c
[cpu
].v
, 0);
80 void *test_percpu_spinlock_thread(void *arg
)
82 struct spinlock_test_data
*data
= arg
;
85 if (rseq_register_current_thread()) {
86 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
87 errno
, strerror(errno
));
90 for (i
= 0; i
< data
->reps
; i
++) {
91 int cpu
= percpu_current_cpu();
93 rseq_percpu_lock(&data
->lock
, cpu
);
95 rseq_percpu_unlock(&data
->lock
, cpu
);
97 if (rseq_unregister_current_thread()) {
98 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
99 errno
, strerror(errno
));
107 * A simple test which implements a sharded counter using a per-cpu
108 * lock. Obviously real applications might prefer to simply use a
109 * per-cpu increment; however, this is reasonable for a test and the
110 * lock can be extended to synchronize more complicated operations.
112 void test_percpu_spinlock(void)
114 const int num_threads
= 200;
117 pthread_t test_threads
[num_threads
];
118 struct spinlock_test_data data
;
120 memset(&data
, 0, sizeof(data
));
123 for (i
= 0; i
< num_threads
; i
++)
124 pthread_create(&test_threads
[i
], NULL
,
125 test_percpu_spinlock_thread
, &data
);
127 for (i
= 0; i
< num_threads
; i
++)
128 pthread_join(test_threads
[i
], NULL
);
131 for (i
= 0; i
< CPU_SETSIZE
; i
++)
132 sum
+= data
.c
[i
].count
;
134 assert(sum
== (uint64_t)data
.reps
* num_threads
);
137 int percpu_list_push(struct percpu_list
*list
, struct percpu_list_node
*node
,
141 intptr_t *targetptr
, newval
, expect
;
144 /* Load list->c[cpu].head with single-copy atomicity. */
145 expect
= (intptr_t)RSEQ_READ_ONCE(list
->c
[cpu
].head
);
146 newval
= (intptr_t)node
;
147 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
148 node
->next
= (struct percpu_list_node
*)expect
;
149 ret
= percpu_cmpeqv_storev(targetptr
, expect
, newval
, cpu
);
150 if (rseq_likely(!ret
))
152 if (rseq_unlikely(ret
< 0)) {
156 /* Retry if comparison fails. */
162 * Unlike a traditional lock-less linked list; the availability of a
163 * rseq primitive allows us to implement pop without concerns over
166 struct percpu_list_node
*percpu_list_pop(struct percpu_list
*list
,
169 struct percpu_list_node
*head
;
170 intptr_t *targetptr
, expectnot
, *load
;
174 targetptr
= (intptr_t *)&list
->c
[cpu
].head
;
175 expectnot
= (intptr_t)NULL
;
176 offset
= offsetof(struct percpu_list_node
, next
);
177 load
= (intptr_t *)&head
;
178 ret
= percpu_cmpnev_storeoffp_load(targetptr
, expectnot
,
180 if (rseq_unlikely(ret
< 0)) {
189 void *test_percpu_list_thread(void *arg
)
192 struct percpu_list
*list
= (struct percpu_list
*)arg
;
194 if (rseq_register_current_thread()) {
195 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
196 errno
, strerror(errno
));
200 for (i
= 0; i
< 100000; i
++) {
201 struct percpu_list_node
*node
;
203 node
= percpu_list_pop(list
, percpu_current_cpu());
204 sched_yield(); /* encourage shuffling */
206 percpu_list_push(list
, node
, percpu_current_cpu());
209 if (rseq_unregister_current_thread()) {
210 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
211 errno
, strerror(errno
));
218 /* Simultaneous modification to a per-cpu linked list from many threads. */
219 void test_percpu_list(void)
222 uint64_t sum
= 0, expected_sum
= 0;
223 struct percpu_list list
;
224 pthread_t test_threads
[200];
225 cpu_set_t allowed_cpus
;
227 memset(&list
, 0, sizeof(list
));
229 /* Generate list entries for every usable cpu. */
230 sched_getaffinity(0, sizeof(allowed_cpus
), &allowed_cpus
);
231 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
232 if (!CPU_ISSET(i
, &allowed_cpus
))
234 for (j
= 1; j
<= 100; j
++) {
235 struct percpu_list_node
*node
;
239 node
= malloc(sizeof(*node
));
242 node
->next
= list
.c
[i
].head
;
243 list
.c
[i
].head
= node
;
247 for (i
= 0; i
< 200; i
++)
248 pthread_create(&test_threads
[i
], NULL
,
249 test_percpu_list_thread
, &list
);
251 for (i
= 0; i
< 200; i
++)
252 pthread_join(test_threads
[i
], NULL
);
254 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
255 struct percpu_list_node
*node
;
257 if (!CPU_ISSET(i
, &allowed_cpus
))
260 while ((node
= percpu_list_pop(&list
, i
))) {
267 * All entries should now be accounted for (unless some external
268 * actor is interfering with our allowed affinity while this
271 assert(sum
== expected_sum
);
274 int main(int argc
, char **argv
)
276 if (rseq_register_current_thread()) {
277 fprintf(stderr
, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
278 errno
, strerror(errno
));
281 printf("spinlock\n");
282 test_percpu_spinlock();
283 printf("percpu_list\n");
285 if (rseq_unregister_current_thread()) {
286 fprintf(stderr
, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
287 errno
, strerror(errno
));