read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
read_state->cpu = cpu;
if (side_likely(side_rcu_rseq_membarrier_available &&
- !rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
+ !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
+ (intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
/*
* This compiler barrier (A) is paired with membarrier() at (C),
* (D), (E). It effectively upgrades this compiler barrier to a
*/
rseq_barrier();
if (side_likely(side_rcu_rseq_membarrier_available &&
- !rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
+ !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
+ (intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
/*
* This barrier (F) is paired with membarrier()
* at (G). It orders increment of the begin/end
/* Fallback to atomic increment and SEQ_CST. */
(void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST);
/*
- * This barrier (F) is paired with SEQ_CST barrier or
- * membarrier() at (G). It orders increment of the begin/end
- * counters before load/store to the futex.
+ * This barrier (F) implied by SEQ_CST is paired with SEQ_CST
+ * barrier or membarrier() at (G). It orders increment of the
+ * begin/end counters before load/store to the futex.
*/
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
end:
side_rcu_wake_up_gp(gp_state);
}
(_____side_v); \
})
-#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
+#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE);
void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));