4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
23 * Global grace period counter.
24 * Contains the current RCU_GP_CTR_BIT.
25 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
26 * Written to only by writer with mutex taken. Read by both writer and readers.
28 long urcu_gp_ctr
= RCU_GP_COUNT
;
31 * Written to only by each individual reader. Read by both the reader and the
34 long __thread urcu_active_readers
;
36 /* Thread IDs of registered readers */
37 #define INIT_NUM_THREADS 4
41 long *urcu_active_readers
;
45 unsigned int yield_active
;
46 unsigned int __thread rand_yield
;
49 static struct reader_data
*reader_data
;
50 static int num_readers
, alloc_readers
;
55 void internal_urcu_lock(void)
59 /* Mutex sleeping does not play well with busy-waiting loop. */
60 ret
= pthread_mutex_lock(&urcu_mutex
);
62 perror("Error in pthread mutex lock");
66 while (pthread_mutex_trylock(&urcu_mutex
) != 0)
70 void internal_urcu_unlock(void)
74 ret
= pthread_mutex_unlock(&urcu_mutex
);
76 perror("Error in pthread mutex unlock");
82 * called with urcu_mutex held.
84 static void switch_next_urcu_qparity(void)
86 STORE_SHARED(urcu_gp_ctr
, urcu_gp_ctr
^ RCU_GP_CTR_BIT
);
90 static void force_mb_single_thread(pthread_t tid
)
95 static void force_mb_all_threads(void)
101 static void force_mb_single_thread(pthread_t tid
)
106 * pthread_kill has a smp_mb(). But beware, we assume it performs
107 * a cache flush on architectures with non-coherent cache. Let's play
108 * safe and don't assume anything : we use smp_mc() to make sure the
109 * cache flush is enforced.
110 * smp_mb(); write sig_done before sending the signals
112 smp_mc(); /* write sig_done before sending the signals */
113 pthread_kill(tid
, SIGURCU
);
115 * Wait for sighandler (and thus mb()) to execute on every thread.
118 while (LOAD_SHARED(sig_done
) < 1)
120 smp_mb(); /* read sig_done before ending the barrier */
123 static void force_mb_all_threads(void)
125 struct reader_data
*index
;
127 * Ask for each threads to execute a smp_mb() so we can consider the
128 * compiler barriers around rcu read lock as real memory barriers.
134 * pthread_kill has a smp_mb(). But beware, we assume it performs
135 * a cache flush on architectures with non-coherent cache. Let's play
136 * safe and don't assume anything : we use smp_mc() to make sure the
137 * cache flush is enforced.
138 * smp_mb(); write sig_done before sending the signals
140 smp_mc(); /* write sig_done before sending the signals */
141 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++)
142 pthread_kill(index
->tid
, SIGURCU
);
144 * Wait for sighandler (and thus mb()) to execute on every thread.
147 while (LOAD_SHARED(sig_done
) < num_readers
)
149 smp_mb(); /* read sig_done before ending the barrier */
153 void wait_for_quiescent_state(void)
155 struct reader_data
*index
;
160 * Wait for each thread urcu_active_readers count to become 0.
162 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
165 * BUSY-LOOP. Force the reader thread to commit its
166 * urcu_active_readers update to memory if we wait for too long.
168 while (rcu_old_gp_ongoing(index
->urcu_active_readers
)) {
169 if (wait_loops
++ == KICK_READER_LOOPS
) {
170 force_mb_single_thread(index
->tid
);
179 void synchronize_rcu(void)
181 internal_urcu_lock();
183 /* All threads should read qparity before accessing data structure
184 * where new ptr points to. Must be done within internal_urcu_lock
185 * because it iterates on reader threads.*/
186 /* Write new ptr before changing the qparity */
187 force_mb_all_threads();
189 switch_next_urcu_qparity(); /* 0 -> 1 */
192 * Must commit qparity update to memory before waiting for parity
193 * 0 quiescent state. Failure to do so could result in the writer
194 * waiting forever while new readers are always accessing data (no
196 * Ensured by STORE_SHARED and LOAD_SHARED.
200 * Wait for previous parity to be empty of readers.
202 wait_for_quiescent_state(); /* Wait readers in parity 0 */
205 * Must finish waiting for quiescent state for parity 0 before
206 * committing qparity update to memory. Failure to do so could result in
207 * the writer waiting forever while new readers are always accessing
208 * data (no progress).
209 * Ensured by STORE_SHARED and LOAD_SHARED.
212 switch_next_urcu_qparity(); /* 1 -> 0 */
215 * Must commit qparity update to memory before waiting for parity
216 * 1 quiescent state. Failure to do so could result in the writer
217 * waiting forever while new readers are always accessing data (no
219 * Ensured by STORE_SHARED and LOAD_SHARED.
223 * Wait for previous parity to be empty of readers.
225 wait_for_quiescent_state(); /* Wait readers in parity 1 */
227 /* Finish waiting for reader threads before letting the old ptr being
228 * freed. Must be done within internal_urcu_lock because it iterates on
230 force_mb_all_threads();
232 internal_urcu_unlock();
235 void urcu_add_reader(pthread_t id
)
237 struct reader_data
*oldarray
;
240 alloc_readers
= INIT_NUM_THREADS
;
243 malloc(sizeof(struct reader_data
) * alloc_readers
);
245 if (alloc_readers
< num_readers
+ 1) {
246 oldarray
= reader_data
;
247 reader_data
= malloc(sizeof(struct reader_data
)
248 * (alloc_readers
<< 1));
249 memcpy(reader_data
, oldarray
,
250 sizeof(struct reader_data
) * alloc_readers
);
254 reader_data
[num_readers
].tid
= id
;
255 /* reference to the TLS of _this_ reader thread. */
256 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
261 * Never shrink (implementation limitation).
262 * This is O(nb threads). Eventually use a hash table.
264 void urcu_remove_reader(pthread_t id
)
266 struct reader_data
*index
;
268 assert(reader_data
!= NULL
);
269 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
270 if (pthread_equal(index
->tid
, id
)) {
271 memcpy(index
, &reader_data
[num_readers
- 1],
272 sizeof(struct reader_data
));
273 reader_data
[num_readers
- 1].tid
= 0;
274 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
279 /* Hrm not found, forgot to register ? */
283 void urcu_register_thread(void)
285 internal_urcu_lock();
286 urcu_add_reader(pthread_self());
287 internal_urcu_unlock();
290 void urcu_unregister_thread(void)
292 internal_urcu_lock();
293 urcu_remove_reader(pthread_self());
294 internal_urcu_unlock();
297 #ifndef DEBUG_FULL_MB
298 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
301 * Executing this smp_mb() is the only purpose of this signal handler.
302 * It punctually promotes barrier() into smp_mb() on every thread it is
306 atomic_inc(&sig_done
);
309 void __attribute__((constructor
)) urcu_init(void)
311 struct sigaction act
;
314 act
.sa_sigaction
= sigurcu_handler
;
315 ret
= sigaction(SIGURCU
, &act
, NULL
);
317 perror("Error in sigaction");
322 void __attribute__((destructor
)) urcu_exit(void)
324 struct sigaction act
;
327 ret
= sigaction(SIGURCU
, NULL
, &act
);
329 perror("Error in sigaction");
332 assert(act
.sa_sigaction
== sigurcu_handler
);