1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
17 /* active_readers is an input/output parameter. */
19 void check_active_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
21 uintptr_t sum
[2] = { 0, 0 }; /* begin - end */
24 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
25 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
27 if (active_readers
[0])
28 sum
[0] -= __atomic_load_n(&cpu_state
->count
[0].end
, __ATOMIC_RELAXED
);
29 if (active_readers
[1])
30 sum
[1] -= __atomic_load_n(&cpu_state
->count
[1].end
, __ATOMIC_RELAXED
);
34 * This memory barrier (C) pairs with either of memory barriers
35 * (A) or (B) (one is sufficient).
37 * Read end counts before begin counts. Reading "end" before
38 * "begin" counts ensures we never see an "end" without having
39 * seen its associated "begin", because "begin" is always
40 * incremented before "end", as guaranteed by memory barriers
43 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
45 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
46 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
48 if (active_readers
[0])
49 sum
[0] += __atomic_load_n(&cpu_state
->count
[0].begin
, __ATOMIC_RELAXED
);
50 if (active_readers
[1])
51 sum
[1] += __atomic_load_n(&cpu_state
->count
[1].begin
, __ATOMIC_RELAXED
);
53 if (active_readers
[0])
54 active_readers
[0] = sum
[0];
55 if (active_readers
[1])
56 active_readers
[1] = sum
[1];
60 * Wait for previous period to have no active readers.
62 * active_readers is an input/output parameter.
65 void wait_for_prev_period_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
67 unsigned int prev_period
= gp_state
->period
^ 1;
70 * If a prior active readers scan already observed that no
71 * readers are present for the previous period, there is no need
74 if (!active_readers
[prev_period
])
77 * Wait for the sum of CPU begin/end counts to match for the
81 check_active_readers(gp_state
, active_readers
);
82 if (!active_readers
[prev_period
])
84 /* Retry after 10ms. */
90 * The grace period completes when it observes that there are no active
91 * readers within each of the periods.
93 * The active_readers state is initially true for each period, until the
94 * grace period observes that no readers are present for each given
95 * period, at which point the active_readers state becomes false.
97 void side_rcu_wait_grace_period(struct side_rcu_gp_state
*gp_state
)
99 bool active_readers
[2] = { true, true };
102 * This memory barrier (D) pairs with memory barriers (A) and
103 * (B) on the read-side.
105 * It orders prior loads and stores before the "end"/"begin"
106 * reader state loads. In other words, it orders prior loads and
107 * stores before observation of active readers quiescence,
108 * effectively ensuring that read-side critical sections which
109 * exist after the grace period completes are ordered after
110 * loads and stores performed before the grace period.
112 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
115 * First scan through all cpus, for both period. If no readers
116 * are accounted for, we have observed quiescence and can
117 * complete the grace period immediately.
119 check_active_readers(gp_state
, active_readers
);
120 if (!active_readers
[0] && !active_readers
[1])
123 pthread_mutex_lock(&gp_state
->gp_lock
);
125 wait_for_prev_period_readers(gp_state
, active_readers
);
127 * If the reader scan detected that there are no readers in the
128 * current period as well, we can complete the grace period
131 if (!active_readers
[gp_state
->period
])
134 /* Flip period: 0 -> 1, 1 -> 0. */
135 (void) __atomic_xor_fetch(&gp_state
->period
, 1, __ATOMIC_RELAXED
);
137 wait_for_prev_period_readers(gp_state
, active_readers
);
139 pthread_mutex_unlock(&gp_state
->gp_lock
);
142 * This memory barrier (E) pairs with memory barriers (A) and
143 * (B) on the read-side.
145 * It orders the "end"/"begin" reader state loads before
146 * following loads and stores. In other words, it orders
147 * observation of active readers quiescence before following
148 * loads and stores, effectively ensuring that read-side
149 * critical sections which existed prior to the grace period
150 * are ordered before loads and stores performed after the grace
153 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
156 void side_rcu_gp_init(struct side_rcu_gp_state
*rcu_gp
)
158 memset(rcu_gp
, 0, sizeof(*rcu_gp
));
159 rcu_gp
->nr_cpus
= get_possible_cpus_array_len();
160 if (!rcu_gp
->nr_cpus
)
162 pthread_mutex_init(&rcu_gp
->gp_lock
, NULL
);
163 rcu_gp
->percpu_state
= calloc(rcu_gp
->nr_cpus
, sizeof(struct side_rcu_cpu_gp_state
));
164 if (!rcu_gp
->percpu_state
)
168 void side_rcu_gp_exit(struct side_rcu_gp_state
*rcu_gp
)
170 pthread_mutex_destroy(&rcu_gp
->gp_lock
);
171 free(rcu_gp
->percpu_state
);
This page took 0.052522 seconds and 5 git commands to generate.