Commit | Line | Data |
---|---|---|
adcfce54 MD |
1 | #ifndef _URCU_STATIC_H |
2 | #define _URCU_STATIC_H | |
3 | ||
4 | /* | |
5 | * urcu-static.h | |
6 | * | |
d2d23035 | 7 | * Userspace RCU header. |
adcfce54 | 8 | * |
d2d23035 MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking |
10 | * dynamically with the userspace rcu library. | |
adcfce54 | 11 | * |
d2d23035 MD |
12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
adcfce54 | 14 | * |
d2d23035 MD |
15 | * This library is free software; you can redistribute it and/or |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
adcfce54 MD |
28 | * |
29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
30 | */ | |
31 | ||
32 | #include <stdlib.h> | |
33 | #include <pthread.h> | |
bc6c15bb MD |
34 | #include <syscall.h> |
35 | #include <unistd.h> | |
adcfce54 | 36 | |
ec4e58a3 MD |
37 | #include <urcu/compiler.h> |
38 | #include <urcu/arch.h> | |
e3b0cef0 | 39 | #include <urcu/list.h> |
adcfce54 MD |
40 | |
41 | /* | |
42 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
43 | */ | |
44 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
45 | ||
46 | /* | |
47 | * Load a data from shared memory, doing a cache flush if required. | |
48 | */ | |
49 | #define LOAD_SHARED(p) \ | |
50 | ({ \ | |
51 | smp_rmc(); \ | |
52 | _LOAD_SHARED(p); \ | |
53 | }) | |
54 | ||
55 | /* | |
56 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
57 | */ | |
58 | #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) | |
59 | ||
60 | /* | |
61 | * Store v into x, where x is located in shared memory. Performs the required | |
62 | * cache flush after writing. Returns v. | |
63 | */ | |
64 | #define STORE_SHARED(x, v) \ | |
65 | ({ \ | |
66 | _STORE_SHARED(x, v); \ | |
67 | smp_wmc(); \ | |
68 | (v); \ | |
69 | }) | |
70 | ||
71 | /** | |
72 | * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable | |
73 | * into a RCU read-side critical section. The pointer can later be safely | |
74 | * dereferenced within the critical section. | |
75 | * | |
76 | * This ensures that the pointer copy is invariant thorough the whole critical | |
77 | * section. | |
78 | * | |
79 | * Inserts memory barriers on architectures that require them (currently only | |
80 | * Alpha) and documents which pointers are protected by RCU. | |
81 | * | |
809f4fde MD |
82 | * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative |
83 | * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the | |
84 | * data read before the pointer read by speculating the value of the pointer. | |
85 | * Correct ordering is ensured because the pointer is read as a volatile access. | |
86 | * This acts as a global side-effect operation, which forbids reordering of | |
015c702f MD |
87 | * dependent memory operations. Note that such concern about dependency-breaking |
88 | * optimizations will eventually be taken care of by the "memory_order_consume" | |
89 | * addition to forthcoming C++ standard. | |
809f4fde | 90 | * |
adcfce54 MD |
91 | * Should match rcu_assign_pointer() or rcu_xchg_pointer(). |
92 | */ | |
93 | ||
94 | #define _rcu_dereference(p) ({ \ | |
95 | typeof(p) _________p1 = LOAD_SHARED(p); \ | |
96 | smp_read_barrier_depends(); \ | |
97 | (_________p1); \ | |
98 | }) | |
99 | ||
bc6c15bb MD |
100 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) |
101 | #define FUTEX_WAIT 0 | |
102 | #define FUTEX_WAKE 1 | |
103 | ||
adcfce54 MD |
104 | /* |
105 | * This code section can only be included in LGPL 2.1 compatible source code. | |
106 | * See below for the function call wrappers which can be used in code meant to | |
107 | * be only linked with the Userspace RCU library. This comes with a small | |
108 | * performance degradation on the read-side due to the added function calls. | |
109 | * This is required to permit relinking with newer versions of the library. | |
110 | */ | |
111 | ||
112 | /* | |
113 | * The signal number used by the RCU library can be overridden with | |
114 | * -DSIGURCU= when compiling the library. | |
115 | */ | |
116 | #ifndef SIGURCU | |
117 | #define SIGURCU SIGUSR1 | |
118 | #endif | |
119 | ||
120 | /* | |
121 | * If a reader is really non-cooperative and refuses to commit its | |
122 | * urcu_active_readers count to memory (there is no barrier in the reader | |
123 | * per-se), kick it after a few loops waiting for it. | |
124 | */ | |
125 | #define KICK_READER_LOOPS 10000 | |
126 | ||
bc6c15bb MD |
127 | /* |
128 | * Active attempts to check for reader Q.S. before calling futex(). | |
129 | */ | |
130 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
131 | ||
7ac06cef MD |
132 | #ifdef DEBUG_RCU |
133 | #define rcu_assert(args...) assert(args) | |
134 | #else | |
135 | #define rcu_assert(args...) | |
136 | #endif | |
137 | ||
adcfce54 MD |
138 | #ifdef DEBUG_YIELD |
139 | #include <sched.h> | |
140 | #include <time.h> | |
141 | #include <pthread.h> | |
142 | #include <unistd.h> | |
143 | ||
144 | #define YIELD_READ (1 << 0) | |
145 | #define YIELD_WRITE (1 << 1) | |
146 | ||
b4ce1526 | 147 | /* |
0a1d290b | 148 | * Updates without URCU_MB are much slower. Account this in |
b4ce1526 MD |
149 | * the delay. |
150 | */ | |
0a1d290b | 151 | #ifdef URCU_MB |
adcfce54 MD |
152 | /* maximum sleep delay, in us */ |
153 | #define MAX_SLEEP 50 | |
154 | #else | |
155 | #define MAX_SLEEP 30000 | |
156 | #endif | |
157 | ||
158 | extern unsigned int yield_active; | |
159 | extern unsigned int __thread rand_yield; | |
160 | ||
161 | static inline void debug_yield_read(void) | |
162 | { | |
163 | if (yield_active & YIELD_READ) | |
164 | if (rand_r(&rand_yield) & 0x1) | |
165 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
166 | } | |
167 | ||
168 | static inline void debug_yield_write(void) | |
169 | { | |
170 | if (yield_active & YIELD_WRITE) | |
171 | if (rand_r(&rand_yield) & 0x1) | |
172 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
173 | } | |
174 | ||
175 | static inline void debug_yield_init(void) | |
176 | { | |
177 | rand_yield = time(NULL) ^ pthread_self(); | |
178 | } | |
179 | #else | |
180 | static inline void debug_yield_read(void) | |
181 | { | |
182 | } | |
183 | ||
184 | static inline void debug_yield_write(void) | |
185 | { | |
186 | } | |
187 | ||
188 | static inline void debug_yield_init(void) | |
189 | { | |
190 | ||
191 | } | |
192 | #endif | |
193 | ||
0a1d290b | 194 | #ifdef URCU_MB |
adcfce54 MD |
195 | static inline void reader_barrier() |
196 | { | |
197 | smp_mb(); | |
198 | } | |
199 | #else | |
200 | static inline void reader_barrier() | |
201 | { | |
202 | barrier(); | |
203 | } | |
204 | #endif | |
205 | ||
206 | /* | |
207 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a | |
208 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
209 | */ | |
210 | #define RCU_GP_COUNT (1UL << 0) | |
211 | /* Use the amount of bits equal to half of the architecture long size */ | |
212 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) | |
213 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
214 | ||
215 | /* | |
216 | * Global quiescent period counter with low-order bits unused. | |
217 | * Using a int rather than a char to eliminate false register dependencies | |
218 | * causing stalls on some architectures. | |
219 | */ | |
220 | extern long urcu_gp_ctr; | |
221 | ||
e3b0cef0 MD |
222 | struct urcu_reader { |
223 | long ctr; | |
224 | struct list_head head; | |
225 | pthread_t tid; | |
226 | char need_mb; | |
227 | }; | |
228 | ||
229 | extern struct urcu_reader __thread urcu_reader; | |
adcfce54 | 230 | |
bc6c15bb MD |
231 | extern int gp_futex; |
232 | ||
233 | /* | |
234 | * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. | |
235 | */ | |
236 | static inline void wake_up_gp(void) | |
237 | { | |
ec4e58a3 MD |
238 | if (unlikely(uatomic_read(&gp_futex) == -1)) { |
239 | uatomic_set(&gp_futex, 0); | |
bc6c15bb MD |
240 | futex(&gp_futex, FUTEX_WAKE, 1, |
241 | NULL, NULL, 0); | |
242 | } | |
243 | } | |
244 | ||
adcfce54 MD |
245 | static inline int rcu_old_gp_ongoing(long *value) |
246 | { | |
247 | long v; | |
248 | ||
249 | if (value == NULL) | |
250 | return 0; | |
251 | /* | |
252 | * Make sure both tests below are done on the same version of *value | |
253 | * to insure consistency. | |
254 | */ | |
255 | v = LOAD_SHARED(*value); | |
256 | return (v & RCU_GP_CTR_NEST_MASK) && | |
257 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); | |
258 | } | |
259 | ||
260 | static inline void _rcu_read_lock(void) | |
261 | { | |
262 | long tmp; | |
263 | ||
e3b0cef0 | 264 | tmp = urcu_reader.ctr; |
adcfce54 | 265 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ |
67ef1a2c | 266 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { |
e3b0cef0 | 267 | _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr)); |
67ef1a2c MD |
268 | /* |
269 | * Set active readers count for outermost nesting level before | |
270 | * accessing the pointer. See force_mb_all_threads(). | |
271 | */ | |
272 | reader_barrier(); | |
273 | } else { | |
e3b0cef0 | 274 | _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT); |
67ef1a2c | 275 | } |
adcfce54 MD |
276 | } |
277 | ||
278 | static inline void _rcu_read_unlock(void) | |
279 | { | |
bc6c15bb MD |
280 | long tmp; |
281 | ||
e3b0cef0 | 282 | tmp = urcu_reader.ctr; |
adcfce54 MD |
283 | /* |
284 | * Finish using rcu before decrementing the pointer. | |
285 | * See force_mb_all_threads(). | |
286 | */ | |
bc6c15bb MD |
287 | if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { |
288 | reader_barrier(); | |
e3b0cef0 MD |
289 | _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); |
290 | /* write urcu_reader.ctr before read futex */ | |
bc6c15bb MD |
291 | reader_barrier(); |
292 | wake_up_gp(); | |
293 | } else { | |
e3b0cef0 | 294 | _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT); |
bc6c15bb | 295 | } |
adcfce54 MD |
296 | } |
297 | ||
298 | /** | |
299 | * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure | |
300 | * meant to be read by RCU read-side critical sections. Returns the assigned | |
301 | * value. | |
302 | * | |
303 | * Documents which pointers will be dereferenced by RCU read-side critical | |
304 | * sections and adds the required memory barriers on architectures requiring | |
305 | * them. It also makes sure the compiler does not reorder code initializing the | |
306 | * data structure before its publication. | |
307 | * | |
308 | * Should match rcu_dereference_pointer(). | |
309 | */ | |
310 | ||
311 | #define _rcu_assign_pointer(p, v) \ | |
312 | ({ \ | |
313 | if (!__builtin_constant_p(v) || \ | |
314 | ((v) != NULL)) \ | |
315 | wmb(); \ | |
316 | STORE_SHARED(p, v); \ | |
317 | }) | |
318 | ||
4d1ce26f MD |
319 | /** |
320 | * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer | |
321 | * is as expected by "old". If succeeds, returns the previous pointer to the | |
322 | * data structure, which can be safely freed after waiting for a quiescent state | |
323 | * using synchronize_rcu(). If fails (unexpected value), returns old (which | |
324 | * should not be freed !). | |
325 | */ | |
326 | ||
327 | #define _rcu_cmpxchg_pointer(p, old, _new) \ | |
328 | ({ \ | |
329 | if (!__builtin_constant_p(_new) || \ | |
330 | ((_new) != NULL)) \ | |
331 | wmb(); \ | |
ec4e58a3 | 332 | uatomic_cmpxchg(p, old, _new); \ |
4d1ce26f MD |
333 | }) |
334 | ||
adcfce54 MD |
335 | /** |
336 | * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous | |
67ef1a2c | 337 | * pointer to the data structure, which can be safely freed after waiting for a |
adcfce54 MD |
338 | * quiescent state using synchronize_rcu(). |
339 | */ | |
340 | ||
341 | #define _rcu_xchg_pointer(p, v) \ | |
342 | ({ \ | |
343 | if (!__builtin_constant_p(v) || \ | |
344 | ((v) != NULL)) \ | |
345 | wmb(); \ | |
ec4e58a3 | 346 | uatomic_xchg(p, v); \ |
adcfce54 MD |
347 | }) |
348 | ||
349 | /* | |
350 | * Exchanges the pointer and waits for quiescent state. | |
351 | * The pointer returned can be freed. | |
352 | */ | |
353 | #define _rcu_publish_content(p, v) \ | |
354 | ({ \ | |
355 | void *oldptr; \ | |
356 | oldptr = _rcu_xchg_pointer(p, v); \ | |
357 | synchronize_rcu(); \ | |
358 | oldptr; \ | |
359 | }) | |
360 | ||
361 | #endif /* _URCU_STATIC_H */ |