1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 #include <side/trace.h>
15 #include <rseq/rseq.h>
17 #define SIDE_CACHE_LINE_SIZE 256
19 struct side_rcu_percpu_count
{
24 } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE
)));
26 struct side_rcu_cpu_gp_state
{
27 struct side_rcu_percpu_count count
[2];
30 struct side_rcu_gp_state
{
31 struct side_rcu_cpu_gp_state
*percpu_state
;
34 pthread_mutex_t gp_lock
;
37 extern unsigned int side_rcu_rseq_membarrier_available
__attribute__((visibility("hidden")));
39 //TODO: implement wait/wakeup for grace period using sys_futex
41 unsigned int side_rcu_read_begin(struct side_rcu_gp_state
*gp_state
)
43 unsigned int period
= __atomic_load_n(&gp_state
->period
, __ATOMIC_RELAXED
);
44 struct side_rcu_cpu_gp_state
*cpu_gp_state
;
47 if (side_likely(side_rcu_rseq_membarrier_available
)) {
48 cpu
= rseq_cpu_start();
49 cpu_gp_state
= &gp_state
->percpu_state
[cpu
];
50 if (side_likely(!rseq_addv((intptr_t *)&cpu_gp_state
->count
[period
].rseq_begin
, 1, cpu
))) {
52 * This compiler barrier (A) is paired with membarrier() at (C),
53 * (D), (E). It effectively upgrades this compiler barrier to a
54 * SEQ_CST fence with respect to the paired barriers.
56 * This barrier (A) ensures that the contents of the read-side
57 * critical section does not leak before the "begin" counter
58 * increment. It pairs with memory barriers (D) and (E).
60 * This barrier (A) also ensures that the "begin" increment is
61 * before the "end" increment. It pairs with memory barrier (C).
62 * It is redundant with barrier (B) for that purpose.
68 /* Fallback to atomic increment and SEQ_CST. */
70 if (side_unlikely(cpu
< 0))
72 cpu_gp_state
= &gp_state
->percpu_state
[cpu
];
73 (void) __atomic_add_fetch(&cpu_gp_state
->count
[period
].begin
, 1, __ATOMIC_SEQ_CST
);
78 void side_rcu_read_end(struct side_rcu_gp_state
*gp_state
, unsigned int period
)
80 struct side_rcu_cpu_gp_state
*cpu_gp_state
;
83 if (side_likely(side_rcu_rseq_membarrier_available
)) {
85 * This compiler barrier (B) is paired with membarrier() at (C),
86 * (D), (E). It effectively upgrades this compiler barrier to a
87 * SEQ_CST fence with respect to the paired barriers.
89 * This barrier (B) ensures that the contents of the read-side
90 * critical section does not leak after the "end" counter
91 * increment. It pairs with memory barriers (D) and (E).
93 * This barrier (B) also ensures that the "begin" increment is
94 * before the "end" increment. It pairs with memory barrier (C).
95 * It is redundant with barrier (A) for that purpose.
98 cpu
= rseq_cpu_start();
99 cpu_gp_state
= &gp_state
->percpu_state
[cpu
];
100 if (side_likely(!rseq_addv((intptr_t *)&cpu_gp_state
->count
[period
].rseq_end
, 1, cpu
)))
103 /* Fallback to atomic increment and SEQ_CST. */
104 cpu
= sched_getcpu();
105 if (side_unlikely(cpu
< 0))
107 cpu_gp_state
= &gp_state
->percpu_state
[cpu
];
108 (void) __atomic_add_fetch(&cpu_gp_state
->count
[period
].end
, 1, __ATOMIC_SEQ_CST
);
113 #define side_rcu_dereference(p) \
116 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
120 #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
122 void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
123 void side_rcu_gp_init(struct side_rcu_gp_state
*rcu_gp
) __attribute__((visibility("hidden")));
124 void side_rcu_gp_exit(struct side_rcu_gp_state
*rcu_gp
) __attribute__((visibility("hidden")));
126 #endif /* _SIDE_RCU_H */