1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
17 /* active_readers is an input/output parameter. */
19 void check_active_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
21 uintptr_t sum
[2] = { 0, 0 }; /* begin - end */
24 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
25 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
27 if (active_readers
[0]) {
28 sum
[0] -= __atomic_load_n(&cpu_state
->count
[0].end
, __ATOMIC_RELAXED
);
29 sum
[0] -= __atomic_load_n(&cpu_state
->count
[0].rseq_end
, __ATOMIC_RELAXED
);
31 if (active_readers
[1]) {
32 sum
[1] -= __atomic_load_n(&cpu_state
->count
[1].end
, __ATOMIC_RELAXED
);
33 sum
[1] -= __atomic_load_n(&cpu_state
->count
[1].rseq_end
, __ATOMIC_RELAXED
);
38 * This memory barrier (C) pairs with either of memory barriers
39 * (A) or (B) (one is sufficient).
41 * Read end counts before begin counts. Reading "end" before
42 * "begin" counts ensures we never see an "end" without having
43 * seen its associated "begin", because "begin" is always
44 * incremented before "end", as guaranteed by memory barriers
47 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
49 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
50 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
52 if (active_readers
[0]) {
53 sum
[0] += __atomic_load_n(&cpu_state
->count
[0].begin
, __ATOMIC_RELAXED
);
54 sum
[0] += __atomic_load_n(&cpu_state
->count
[0].rseq_begin
, __ATOMIC_RELAXED
);
56 if (active_readers
[1]) {
57 sum
[1] += __atomic_load_n(&cpu_state
->count
[1].begin
, __ATOMIC_RELAXED
);
58 sum
[1] += __atomic_load_n(&cpu_state
->count
[1].rseq_begin
, __ATOMIC_RELAXED
);
61 if (active_readers
[0])
62 active_readers
[0] = sum
[0];
63 if (active_readers
[1])
64 active_readers
[1] = sum
[1];
68 * Wait for previous period to have no active readers.
70 * active_readers is an input/output parameter.
73 void wait_for_prev_period_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
75 unsigned int prev_period
= gp_state
->period
^ 1;
78 * If a prior active readers scan already observed that no
79 * readers are present for the previous period, there is no need
82 if (!active_readers
[prev_period
])
85 * Wait for the sum of CPU begin/end counts to match for the
89 check_active_readers(gp_state
, active_readers
);
90 if (!active_readers
[prev_period
])
92 /* Retry after 10ms. */
98 * The grace period completes when it observes that there are no active
99 * readers within each of the periods.
101 * The active_readers state is initially true for each period, until the
102 * grace period observes that no readers are present for each given
103 * period, at which point the active_readers state becomes false.
105 void side_rcu_wait_grace_period(struct side_rcu_gp_state
*gp_state
)
107 bool active_readers
[2] = { true, true };
110 * This memory barrier (D) pairs with memory barriers (A) and
111 * (B) on the read-side.
113 * It orders prior loads and stores before the "end"/"begin"
114 * reader state loads. In other words, it orders prior loads and
115 * stores before observation of active readers quiescence,
116 * effectively ensuring that read-side critical sections which
117 * exist after the grace period completes are ordered after
118 * loads and stores performed before the grace period.
120 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
123 * First scan through all cpus, for both period. If no readers
124 * are accounted for, we have observed quiescence and can
125 * complete the grace period immediately.
127 check_active_readers(gp_state
, active_readers
);
128 if (!active_readers
[0] && !active_readers
[1])
131 pthread_mutex_lock(&gp_state
->gp_lock
);
133 wait_for_prev_period_readers(gp_state
, active_readers
);
135 * If the reader scan detected that there are no readers in the
136 * current period as well, we can complete the grace period
139 if (!active_readers
[gp_state
->period
])
142 /* Flip period: 0 -> 1, 1 -> 0. */
143 (void) __atomic_xor_fetch(&gp_state
->period
, 1, __ATOMIC_RELAXED
);
145 wait_for_prev_period_readers(gp_state
, active_readers
);
147 pthread_mutex_unlock(&gp_state
->gp_lock
);
150 * This memory barrier (E) pairs with memory barriers (A) and
151 * (B) on the read-side.
153 * It orders the "end"/"begin" reader state loads before
154 * following loads and stores. In other words, it orders
155 * observation of active readers quiescence before following
156 * loads and stores, effectively ensuring that read-side
157 * critical sections which existed prior to the grace period
158 * are ordered before loads and stores performed after the grace
161 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
164 void side_rcu_gp_init(struct side_rcu_gp_state
*rcu_gp
)
166 memset(rcu_gp
, 0, sizeof(*rcu_gp
));
167 rcu_gp
->nr_cpus
= get_possible_cpus_array_len();
168 if (!rcu_gp
->nr_cpus
)
170 pthread_mutex_init(&rcu_gp
->gp_lock
, NULL
);
171 rcu_gp
->percpu_state
= calloc(rcu_gp
->nr_cpus
, sizeof(struct side_rcu_cpu_gp_state
));
172 if (!rcu_gp
->percpu_state
)
176 void side_rcu_gp_exit(struct side_rcu_gp_state
*rcu_gp
)
178 rseq_prepare_unload();
179 pthread_mutex_destroy(&rcu_gp
->gp_lock
);
180 free(rcu_gp
->percpu_state
);
This page took 0.05003 seconds and 5 git commands to generate.