#include <stdbool.h>
#include <poll.h>
#include <stdlib.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <linux/membarrier.h>
#include "rcu.h"
#include "smp.h"
+static int
+membarrier(int cmd, unsigned int flags, int cpu_id)
+{
+ return syscall(__NR_membarrier, cmd, flags, cpu_id);
+}
+
/* active_readers is an input/output parameter. */
static
void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
* incremented before "end", as guaranteed by memory barriers
* (A) or (B).
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+ abort();
for (i = 0; i < gp_state->nr_cpus; i++) {
struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
* exist after the grace period completes are ordered after
* loads and stores performed before the grace period.
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+ abort();
/*
* First scan through all cpus, for both period. If no readers
* are ordered before loads and stores performed after the grace
* period.
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+ abort();
}
void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp)
rcu_gp->percpu_state = calloc(rcu_gp->nr_cpus, sizeof(struct side_rcu_cpu_gp_state));
if (!rcu_gp->percpu_state)
abort();
+ if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0, 0))
+ abort();
}
void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp)
pthread_mutex_t gp_lock;
};
-//TODO: replace acquire/release by membarrier+compiler barrier (when available)
//TODO: implement wait/wakeup for grace period using sys_futex
static inline
unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
* barrier (C). It is redundant with memory barrier (B) for that
* purpose.
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rseq_barrier();
return period;
}
* barrier (C). It is redundant with memory barrier (A) for that
* purpose.
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rseq_barrier();
if (side_likely(rseq_offset > 0)) {
cpu = rseq_cpu_start();