Cleanup: remove empty lines
[libside.git] / src / rcu.c
CommitLineData
48363c84
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <sched.h>
054b7b5c 7#include <string.h>
48363c84
MD
8#include <stdint.h>
9#include <pthread.h>
10#include <stdbool.h>
11#include <poll.h>
054b7b5c 12#include <stdlib.h>
5a76c31e 13#include <unistd.h>
bddcdc92 14#include <stdio.h>
5a76c31e
MD
15#include <sys/syscall.h>
16#include <linux/membarrier.h>
48363c84
MD
17
18#include "rcu.h"
054b7b5c 19#include "smp.h"
48363c84 20
bddcdc92
MD
21/*
22 * If both rseq (with glibc support) and membarrier system calls are
23 * available, use them to replace barriers and atomics on the fast-path.
24 */
25unsigned int side_rcu_rseq_membarrier_available;
26
5a76c31e
MD
27static int
28membarrier(int cmd, unsigned int flags, int cpu_id)
29{
30 return syscall(__NR_membarrier, cmd, flags, cpu_id);
31}
32
48363c84
MD
33/* active_readers is an input/output parameter. */
34static
35void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
36{
37 uintptr_t sum[2] = { 0, 0 }; /* begin - end */
38 int i;
39
40 for (i = 0; i < gp_state->nr_cpus; i++) {
41 struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
42
7fb53c62 43 if (active_readers[0]) {
48363c84 44 sum[0] -= __atomic_load_n(&cpu_state->count[0].end, __ATOMIC_RELAXED);
7fb53c62
MD
45 sum[0] -= __atomic_load_n(&cpu_state->count[0].rseq_end, __ATOMIC_RELAXED);
46 }
47 if (active_readers[1]) {
48363c84 48 sum[1] -= __atomic_load_n(&cpu_state->count[1].end, __ATOMIC_RELAXED);
7fb53c62
MD
49 sum[1] -= __atomic_load_n(&cpu_state->count[1].rseq_end, __ATOMIC_RELAXED);
50 }
48363c84
MD
51 }
52
53 /*
54 * This memory barrier (C) pairs with either of memory barriers
55 * (A) or (B) (one is sufficient).
56 *
57 * Read end counts before begin counts. Reading "end" before
58 * "begin" counts ensures we never see an "end" without having
59 * seen its associated "begin", because "begin" is always
60 * incremented before "end", as guaranteed by memory barriers
61 * (A) or (B).
62 */
bddcdc92
MD
63 if (side_rcu_rseq_membarrier_available) {
64 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0)) {
65 perror("membarrier");
66 abort();
67 }
68 } else {
69 __atomic_thread_fence(__ATOMIC_SEQ_CST);
70 }
48363c84
MD
71
72 for (i = 0; i < gp_state->nr_cpus; i++) {
73 struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
74
7fb53c62 75 if (active_readers[0]) {
48363c84 76 sum[0] += __atomic_load_n(&cpu_state->count[0].begin, __ATOMIC_RELAXED);
7fb53c62
MD
77 sum[0] += __atomic_load_n(&cpu_state->count[0].rseq_begin, __ATOMIC_RELAXED);
78 }
79 if (active_readers[1]) {
48363c84 80 sum[1] += __atomic_load_n(&cpu_state->count[1].begin, __ATOMIC_RELAXED);
7fb53c62
MD
81 sum[1] += __atomic_load_n(&cpu_state->count[1].rseq_begin, __ATOMIC_RELAXED);
82 }
48363c84
MD
83 }
84 if (active_readers[0])
85 active_readers[0] = sum[0];
86 if (active_readers[1])
87 active_readers[1] = sum[1];
88}
89
90/*
91 * Wait for previous period to have no active readers.
92 *
93 * active_readers is an input/output parameter.
94 */
95static
96void wait_for_prev_period_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
97{
98 unsigned int prev_period = gp_state->period ^ 1;
99
100 /*
101 * If a prior active readers scan already observed that no
102 * readers are present for the previous period, there is no need
103 * to scan again.
104 */
105 if (!active_readers[prev_period])
106 return;
107 /*
108 * Wait for the sum of CPU begin/end counts to match for the
109 * previous period.
110 */
111 for (;;) {
112 check_active_readers(gp_state, active_readers);
113 if (!active_readers[prev_period])
114 break;
115 /* Retry after 10ms. */
116 poll(NULL, 0, 10);
117 }
118}
119
120/*
121 * The grace period completes when it observes that there are no active
122 * readers within each of the periods.
123 *
124 * The active_readers state is initially true for each period, until the
125 * grace period observes that no readers are present for each given
126 * period, at which point the active_readers state becomes false.
127 */
128void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state)
129{
130 bool active_readers[2] = { true, true };
131
132 /*
133 * This memory barrier (D) pairs with memory barriers (A) and
134 * (B) on the read-side.
135 *
136 * It orders prior loads and stores before the "end"/"begin"
137 * reader state loads. In other words, it orders prior loads and
138 * stores before observation of active readers quiescence,
139 * effectively ensuring that read-side critical sections which
140 * exist after the grace period completes are ordered after
141 * loads and stores performed before the grace period.
142 */
bddcdc92
MD
143 if (side_rcu_rseq_membarrier_available) {
144 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0)) {
145 perror("membarrier");
146 abort();
147 }
148 } else {
149 __atomic_thread_fence(__ATOMIC_SEQ_CST);
150 }
48363c84
MD
151
152 /*
153 * First scan through all cpus, for both period. If no readers
154 * are accounted for, we have observed quiescence and can
155 * complete the grace period immediately.
156 */
157 check_active_readers(gp_state, active_readers);
158 if (!active_readers[0] && !active_readers[1])
159 goto end;
160
161 pthread_mutex_lock(&gp_state->gp_lock);
162
163 wait_for_prev_period_readers(gp_state, active_readers);
164 /*
165 * If the reader scan detected that there are no readers in the
166 * current period as well, we can complete the grace period
167 * immediately.
168 */
169 if (!active_readers[gp_state->period])
170 goto unlock;
171
172 /* Flip period: 0 -> 1, 1 -> 0. */
173 (void) __atomic_xor_fetch(&gp_state->period, 1, __ATOMIC_RELAXED);
174
175 wait_for_prev_period_readers(gp_state, active_readers);
176unlock:
177 pthread_mutex_unlock(&gp_state->gp_lock);
178end:
179 /*
180 * This memory barrier (E) pairs with memory barriers (A) and
181 * (B) on the read-side.
182 *
183 * It orders the "end"/"begin" reader state loads before
184 * following loads and stores. In other words, it orders
185 * observation of active readers quiescence before following
186 * loads and stores, effectively ensuring that read-side
187 * critical sections which existed prior to the grace period
188 * are ordered before loads and stores performed after the grace
189 * period.
190 */
bddcdc92
MD
191 if (side_rcu_rseq_membarrier_available) {
192 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0, 0)) {
193 perror("membarrier");
194 abort();
195 }
196 } else {
197 __atomic_thread_fence(__ATOMIC_SEQ_CST);
198 }
48363c84 199}
054b7b5c
MD
200
201void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp)
202{
bddcdc92
MD
203 bool has_membarrier = false, has_rseq = false;
204
054b7b5c
MD
205 memset(rcu_gp, 0, sizeof(*rcu_gp));
206 rcu_gp->nr_cpus = get_possible_cpus_array_len();
207 if (!rcu_gp->nr_cpus)
208 abort();
209 pthread_mutex_init(&rcu_gp->gp_lock, NULL);
210 rcu_gp->percpu_state = calloc(rcu_gp->nr_cpus, sizeof(struct side_rcu_cpu_gp_state));
211 if (!rcu_gp->percpu_state)
212 abort();
bddcdc92
MD
213 if (!membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0, 0))
214 has_membarrier = true;
215 if (rseq_available(RSEQ_AVAILABLE_QUERY_LIBC))
216 has_rseq = true;
217 if (has_membarrier && has_rseq)
218 side_rcu_rseq_membarrier_available = 1;
054b7b5c 219}
6e46f5e6
MD
220
221void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp)
222{
7fb53c62 223 rseq_prepare_unload();
6e46f5e6
MD
224 pthread_mutex_destroy(&rcu_gp->gp_lock);
225 free(rcu_gp->percpu_state);
226}
This page took 0.044711 seconds and 4 git commands to generate.