1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 /* active_readers is an input/output parameter. */
16 void check_active_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
18 uintptr_t sum
[2] = { 0, 0 }; /* begin - end */
21 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
22 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
24 if (active_readers
[0])
25 sum
[0] -= __atomic_load_n(&cpu_state
->count
[0].end
, __ATOMIC_RELAXED
);
26 if (active_readers
[1])
27 sum
[1] -= __atomic_load_n(&cpu_state
->count
[1].end
, __ATOMIC_RELAXED
);
31 * This memory barrier (C) pairs with either of memory barriers
32 * (A) or (B) (one is sufficient).
34 * Read end counts before begin counts. Reading "end" before
35 * "begin" counts ensures we never see an "end" without having
36 * seen its associated "begin", because "begin" is always
37 * incremented before "end", as guaranteed by memory barriers
40 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
42 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
43 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
45 if (active_readers
[0])
46 sum
[0] += __atomic_load_n(&cpu_state
->count
[0].begin
, __ATOMIC_RELAXED
);
47 if (active_readers
[1])
48 sum
[1] += __atomic_load_n(&cpu_state
->count
[1].begin
, __ATOMIC_RELAXED
);
50 if (active_readers
[0])
51 active_readers
[0] = sum
[0];
52 if (active_readers
[1])
53 active_readers
[1] = sum
[1];
57 * Wait for previous period to have no active readers.
59 * active_readers is an input/output parameter.
62 void wait_for_prev_period_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
64 unsigned int prev_period
= gp_state
->period
^ 1;
67 * If a prior active readers scan already observed that no
68 * readers are present for the previous period, there is no need
71 if (!active_readers
[prev_period
])
74 * Wait for the sum of CPU begin/end counts to match for the
78 check_active_readers(gp_state
, active_readers
);
79 if (!active_readers
[prev_period
])
81 /* Retry after 10ms. */
87 * The grace period completes when it observes that there are no active
88 * readers within each of the periods.
90 * The active_readers state is initially true for each period, until the
91 * grace period observes that no readers are present for each given
92 * period, at which point the active_readers state becomes false.
94 void side_rcu_wait_grace_period(struct side_rcu_gp_state
*gp_state
)
96 bool active_readers
[2] = { true, true };
99 * This memory barrier (D) pairs with memory barriers (A) and
100 * (B) on the read-side.
102 * It orders prior loads and stores before the "end"/"begin"
103 * reader state loads. In other words, it orders prior loads and
104 * stores before observation of active readers quiescence,
105 * effectively ensuring that read-side critical sections which
106 * exist after the grace period completes are ordered after
107 * loads and stores performed before the grace period.
109 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
112 * First scan through all cpus, for both period. If no readers
113 * are accounted for, we have observed quiescence and can
114 * complete the grace period immediately.
116 check_active_readers(gp_state
, active_readers
);
117 if (!active_readers
[0] && !active_readers
[1])
120 pthread_mutex_lock(&gp_state
->gp_lock
);
122 wait_for_prev_period_readers(gp_state
, active_readers
);
124 * If the reader scan detected that there are no readers in the
125 * current period as well, we can complete the grace period
128 if (!active_readers
[gp_state
->period
])
131 /* Flip period: 0 -> 1, 1 -> 0. */
132 (void) __atomic_xor_fetch(&gp_state
->period
, 1, __ATOMIC_RELAXED
);
134 wait_for_prev_period_readers(gp_state
, active_readers
);
136 pthread_mutex_unlock(&gp_state
->gp_lock
);
139 * This memory barrier (E) pairs with memory barriers (A) and
140 * (B) on the read-side.
142 * It orders the "end"/"begin" reader state loads before
143 * following loads and stores. In other words, it orders
144 * observation of active readers quiescence before following
145 * loads and stores, effectively ensuring that read-side
146 * critical sections which existed prior to the grace period
147 * are ordered before loads and stores performed after the grace
150 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
This page took 0.066507 seconds and 5 git commands to generate.