-// SPDX-License-Identifier: LGPL-2.1-only
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: 2018-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <pthread.h>
#include <sched.h>
#include <stdint.h>
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdbool.h>
#include <string.h>
#include <stddef.h>
-#include <errno.h>
-#include <rseq/percpu-op.h>
+#include <rseq/rseq.h>
+
+#include "tap.h"
+
+#define NR_TESTS 4
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
+# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_current_mm_cid();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_mm_cid_available();
+}
+static
+bool rseq_use_cpu_index(void)
+{
+ return false; /* Use mm_cid */
+}
+#else
+# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_cpu_start();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_current_cpu_raw() >= 0;
+}
+static
+bool rseq_use_cpu_index(void)
+{
+ return true; /* Use cpu_id as index. */
+}
+#endif
+
struct percpu_lock_entry {
intptr_t v;
} __attribute__((aligned(128)));
struct percpu_list_entry c[CPU_SETSIZE];
};
-static bool is_rseq_available;
-
-/* A simple percpu spinlock. */
-void rseq_percpu_lock(struct percpu_lock *lock, int cpu)
+/* A simple percpu spinlock. Returns the cpu lock was acquired on. */
+static int rseq_this_cpu_lock(struct percpu_lock *lock)
{
+ int cpu;
+
for (;;) {
int ret;
- ret = percpu_cmpeqv_storev(&lock->c[cpu].v,
- 0, 1, cpu);
+ cpu = get_current_cpu_id();
+ ret = rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ &lock->c[cpu].v, 0, 1, cpu);
if (rseq_likely(!ret))
break;
- if (rseq_unlikely(ret < 0)) {
- perror("do_on_cpu");
- abort();
- }
- /* Retry if comparison fails. */
+ /* Retry if comparison fails or rseq aborts. */
}
/*
* Acquire semantic when taking lock after control dependency.
* Matches rseq_smp_store_release().
*/
rseq_smp_acquire__after_ctrl_dep();
+ return cpu;
}
-void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
+static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
{
assert(lock->c[cpu].v == 1);
/*
rseq_smp_store_release(&lock->c[cpu].v, 0);
}
-void *test_percpu_spinlock_thread(void *arg)
+static void *test_percpu_spinlock_thread(void *arg)
{
- struct spinlock_test_data *data = arg;
- int i;
+ struct spinlock_test_data *data = (struct spinlock_test_data *) arg;
+ int i, cpu;
- if (is_rseq_available && rseq_register_current_thread()) {
+ if (rseq_register_current_thread()) {
fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
}
for (i = 0; i < data->reps; i++) {
- int cpu = percpu_current_cpu();
-
- rseq_percpu_lock(&data->lock, cpu);
+ cpu = rseq_this_cpu_lock(&data->lock);
data->c[cpu].count++;
rseq_percpu_unlock(&data->lock, cpu);
}
- if (is_rseq_available && rseq_unregister_current_thread()) {
+ if (rseq_unregister_current_thread()) {
fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
* per-cpu increment; however, this is reasonable for a test and the
* lock can be extended to synchronize more complicated operations.
*/
-void test_percpu_spinlock(void)
+static void test_percpu_spinlock(void)
{
const int num_threads = 200;
int i;
- uint64_t sum;
+ uint64_t sum, expected_sum;
pthread_t test_threads[num_threads];
struct spinlock_test_data data;
+ diag("spinlock");
+
memset(&data, 0, sizeof(data));
data.reps = 5000;
for (i = 0; i < CPU_SETSIZE; i++)
sum += data.c[i].count;
- assert(sum == (uint64_t)data.reps * num_threads);
+ expected_sum = (uint64_t)data.reps * num_threads;
+
+ ok(sum == expected_sum, "spinlock - sum (%" PRIu64 " == %" PRIu64 ")", sum, expected_sum);
}
-int percpu_list_push(struct percpu_list *list, struct percpu_list_node *node,
- int cpu)
+static void this_cpu_list_push(struct percpu_list *list,
+ struct percpu_list_node *node,
+ int *_cpu)
{
+ int cpu;
+
for (;;) {
intptr_t *targetptr, newval, expect;
int ret;
+ cpu = get_current_cpu_id();
/* Load list->c[cpu].head with single-copy atomicity. */
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
newval = (intptr_t)node;
targetptr = (intptr_t *)&list->c[cpu].head;
node->next = (struct percpu_list_node *)expect;
- ret = percpu_cmpeqv_storev(targetptr, expect, newval, cpu);
+ ret = rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expect, newval, cpu);
if (rseq_likely(!ret))
break;
- if (rseq_unlikely(ret < 0)) {
- perror("do_on_cpu");
- abort();
- }
- /* Retry if comparison fails. */
+ /* Retry if comparison fails or rseq aborts. */
}
- return cpu;
+ if (_cpu)
+ *_cpu = cpu;
}
/*
* rseq primitive allows us to implement pop without concerns over
* ABA-type races.
*/
-struct percpu_list_node *percpu_list_pop(struct percpu_list *list,
- int cpu)
+static struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
+ int *_cpu)
{
- struct percpu_list_node *head;
- intptr_t *targetptr, expectnot, *load;
- off_t offset;
- int ret;
-
- targetptr = (intptr_t *)&list->c[cpu].head;
- expectnot = (intptr_t)NULL;
- offset = offsetof(struct percpu_list_node, next);
- load = (intptr_t *)&head;
- ret = percpu_cmpnev_storeoffp_load(targetptr, expectnot,
- offset, load, cpu);
- if (rseq_unlikely(ret < 0)) {
- perror("do_on_cpu");
- abort();
+ for (;;) {
+ struct percpu_list_node *head;
+ intptr_t *targetptr, expectnot, *load;
+ long offset;
+ int ret, cpu;
+
+ cpu = get_current_cpu_id();
+ targetptr = (intptr_t *)&list->c[cpu].head;
+ expectnot = (intptr_t)NULL;
+ offset = offsetof(struct percpu_list_node, next);
+ load = (intptr_t *)&head;
+ ret = rseq_load_cbeq_store_add_load_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expectnot,
+ offset, load, cpu);
+ if (rseq_likely(!ret)) {
+ if (_cpu)
+ *_cpu = cpu;
+ return head;
+ }
+ if (ret > 0)
+ return NULL;
+ /* Retry if rseq aborts. */
}
- if (ret > 0)
+}
+
+/*
+ * __percpu_list_pop is not safe against concurrent accesses. Should
+ * only be used on lists that are not concurrently modified.
+ */
+static struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
+{
+ struct percpu_list_node *node;
+
+ node = list->c[cpu].head;
+ if (!node)
return NULL;
- return head;
+ list->c[cpu].head = node->next;
+ return node;
}
-void *test_percpu_list_thread(void *arg)
+static void *test_percpu_list_thread(void *arg)
{
int i;
struct percpu_list *list = (struct percpu_list *)arg;
- if (is_rseq_available && rseq_register_current_thread()) {
+ if (rseq_register_current_thread()) {
fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
for (i = 0; i < 100000; i++) {
struct percpu_list_node *node;
- node = percpu_list_pop(list, percpu_current_cpu());
+ node = this_cpu_list_pop(list, NULL);
sched_yield(); /* encourage shuffling */
if (node)
- percpu_list_push(list, node, percpu_current_cpu());
+ this_cpu_list_push(list, node, NULL);
}
- if (is_rseq_available && rseq_unregister_current_thread()) {
+ if (rseq_unregister_current_thread()) {
fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
}
/* Simultaneous modification to a per-cpu linked list from many threads. */
-void test_percpu_list(void)
+static void test_percpu_list(void)
{
int i, j;
uint64_t sum = 0, expected_sum = 0;
pthread_t test_threads[200];
cpu_set_t allowed_cpus;
+ diag("percpu_list");
+
memset(&list, 0, sizeof(list));
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
struct percpu_list_node *node;
expected_sum += j;
- node = malloc(sizeof(*node));
+ node = (struct percpu_list_node *) malloc(sizeof(*node));
assert(node);
node->data = j;
node->next = list.c[i].head;
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node;
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
- while ((node = percpu_list_pop(&list, i))) {
+ while ((node = __percpu_list_pop(&list, i))) {
sum += node->data;
free(node);
}
* actor is interfering with our allowed affinity while this
* test is running).
*/
- assert(sum == expected_sum);
+ ok(sum == expected_sum, "percpu_list - sum (%" PRIu64 " == %" PRIu64 ")", sum, expected_sum);
}
int main(void)
{
- is_rseq_available = rseq_available();
- if (!is_rseq_available)
- fprintf(stderr, "Warning: rseq is not available\n");
- if (!cpu_op_available()) {
- fprintf(stderr, "Error: do_on_cpu is not available\n");
- goto error;
+ plan_tests(NR_TESTS);
+
+ if (!rseq_available(RSEQ_AVAILABLE_QUERY_KERNEL)) {
+ skip(NR_TESTS, "The rseq syscall is unavailable");
+ goto end;
}
- if (is_rseq_available && rseq_register_current_thread()) {
- fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ if (rseq_register_current_thread()) {
+ fail("rseq_register_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
- goto error;
+ goto end;
+ } else {
+ pass("Registered current thread with rseq");
+ }
+ if (!rseq_validate_cpu_id()) {
+ skip(NR_TESTS - 1, "Error: cpu id getter unavailable");
+ goto end;
}
- printf("spinlock\n");
test_percpu_spinlock();
- printf("percpu_list\n");
test_percpu_list();
- if (is_rseq_available && rseq_unregister_current_thread()) {
- fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+
+ if (rseq_unregister_current_thread()) {
+ fail("rseq_unregister_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
- goto error;
+ goto end;
+ } else {
+ pass("Unregistered current thread with rseq");
}
- return 0;
-
-error:
- return -1;
+end:
+ exit(exit_status());
}
-