Update barrier comments for membarrier/compiler barrier
[libside.git] / src / rcu.c
index bd6059cc3cacfa6b43cede802c6bd6154d0b1624..21dc1a3158ec29a7939d8efc3edeb42794e691cf 100644 (file)
--- a/src/rcu.c
+++ b/src/rcu.c
 #include <stdbool.h>
 #include <poll.h>
 #include <stdlib.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <linux/membarrier.h>
 
 #include "rcu.h"
 #include "smp.h"
 
+static int
+membarrier(int cmd, unsigned int flags, int cpu_id)
+{
+       return syscall(__NR_membarrier, cmd, flags, cpu_id);
+}
+
 /* active_readers is an input/output parameter. */
 static
 void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
@@ -24,10 +33,14 @@ void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_reade
        for (i = 0; i < gp_state->nr_cpus; i++) {
                struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
 
-               if (active_readers[0])
+               if (active_readers[0]) {
                        sum[0] -= __atomic_load_n(&cpu_state->count[0].end, __ATOMIC_RELAXED);
-               if (active_readers[1])
+                       sum[0] -= __atomic_load_n(&cpu_state->count[0].rseq_end, __ATOMIC_RELAXED);
+               }
+               if (active_readers[1]) {
                        sum[1] -= __atomic_load_n(&cpu_state->count[1].end, __ATOMIC_RELAXED);
+                       sum[1] -= __atomic_load_n(&cpu_state->count[1].rseq_end, __ATOMIC_RELAXED);
+               }
        }
 
        /*
@@ -40,15 +53,20 @@ void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_reade
         * incremented before "end", as guaranteed by memory barriers
         * (A) or (B).
         */
-       __atomic_thread_fence(__ATOMIC_SEQ_CST);
+       if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+               abort();
 
        for (i = 0; i < gp_state->nr_cpus; i++) {
                struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
 
-               if (active_readers[0])
+               if (active_readers[0]) {
                        sum[0] += __atomic_load_n(&cpu_state->count[0].begin, __ATOMIC_RELAXED);
-               if (active_readers[1])
+                       sum[0] += __atomic_load_n(&cpu_state->count[0].rseq_begin, __ATOMIC_RELAXED);
+               }
+               if (active_readers[1]) {
                        sum[1] += __atomic_load_n(&cpu_state->count[1].begin, __ATOMIC_RELAXED);
+                       sum[1] += __atomic_load_n(&cpu_state->count[1].rseq_begin, __ATOMIC_RELAXED);
+               }
        }
        if (active_readers[0])
                active_readers[0] = sum[0];
@@ -109,7 +127,8 @@ void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state)
         * exist after the grace period completes are ordered after
         * loads and stores performed before the grace period.
         */
-       __atomic_thread_fence(__ATOMIC_SEQ_CST);
+       if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+               abort();
 
        /*
         * First scan through all cpus, for both period. If no readers
@@ -150,7 +169,8 @@ end:
         * are ordered before loads and stores performed after the grace
         * period.
         */
-       __atomic_thread_fence(__ATOMIC_SEQ_CST);
+       if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0))
+               abort();
 }
 
 void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp)
@@ -163,4 +183,13 @@ void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp)
        rcu_gp->percpu_state = calloc(rcu_gp->nr_cpus, sizeof(struct side_rcu_cpu_gp_state));
        if (!rcu_gp->percpu_state)
                abort();
+       if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0, 0))
+               abort();
+}
+
+void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp)
+{
+       rseq_prepare_unload();
+       pthread_mutex_destroy(&rcu_gp->gp_lock);
+       free(rcu_gp->percpu_state);
 }
This page took 0.023636 seconds and 4 git commands to generate.