4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
39 #include "urcu/wfcqueue.h"
40 #include "urcu-call-rcu.h"
41 #include "urcu-pointer.h"
42 #include "urcu/list.h"
43 #include "urcu/futex.h"
44 #include "urcu/tls-compat.h"
47 /* Data structure that identifies a call_rcu thread. */
49 struct call_rcu_data
{
51 * Align the tail on cache line size to eliminate false-sharing
52 * with head. Small note, however: the "qlen" field, kept for
53 * debugging, will cause false-sharing between enqueue and
56 struct cds_wfcq_tail cbs_tail
;
57 /* Alignment on cache line size will add padding here */
59 struct cds_wfcq_head
__attribute__((aligned(CAA_CACHE_LINE_SIZE
))) cbs_head
;
62 unsigned long qlen
; /* maintained for debugging. */
65 struct cds_list_head list
;
66 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
69 * List of all call_rcu_data structures to keep valgrind happy.
70 * Protected by call_rcu_mutex.
73 CDS_LIST_HEAD(call_rcu_data_list
);
75 /* Link a thread using call_rcu() to its call_rcu thread. */
77 static DEFINE_URCU_TLS(struct call_rcu_data
*, thread_call_rcu_data
);
79 /* Guard call_rcu thread creation. */
81 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
83 /* If a given thread does not have its own call_rcu thread, this is default. */
85 static struct call_rcu_data
*default_call_rcu_data
;
88 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
89 * available, then we can have call_rcu threads assigned to individual
90 * CPUs rather than only to specific threads.
93 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
96 * Pointer to array of pointers to per-CPU call_rcu_data structures
97 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
98 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
99 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
100 * without mutex. The call_rcu_mutex protects updates.
103 static struct call_rcu_data
**per_cpu_call_rcu_data
;
106 static void maxcpus_reset(void)
111 /* Allocate the array if it has not already been allocated. */
113 static void alloc_cpu_call_rcu_data(void)
115 struct call_rcu_data
**p
;
116 static int warned
= 0;
120 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
124 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
126 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
127 rcu_set_pointer(&per_cpu_call_rcu_data
, p
);
130 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
136 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
139 * per_cpu_call_rcu_data should be constant, but some functions below, used both
140 * for cases where cpu number is available and not available, assume it it not
143 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
144 static const long maxcpus
= -1;
146 static void maxcpus_reset(void)
150 static void alloc_cpu_call_rcu_data(void)
154 static int sched_getcpu(void)
159 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
161 /* Acquire the specified pthread mutex. */
163 static void call_rcu_lock(pthread_mutex_t
*pmp
)
167 ret
= pthread_mutex_lock(pmp
);
172 /* Release the specified pthread mutex. */
174 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
178 ret
= pthread_mutex_unlock(pmp
);
183 #if HAVE_SCHED_SETAFFINITY
185 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
189 if (crdp
->cpu_affinity
< 0)
193 CPU_SET(crdp
->cpu_affinity
, &mask
);
194 #if SCHED_SETAFFINITY_ARGS == 2
195 return sched_setaffinity(0, &mask
);
197 return sched_setaffinity(0, sizeof(mask
), &mask
);
202 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
208 static void call_rcu_wait(struct call_rcu_data
*crdp
)
210 /* Read call_rcu list before read futex */
212 if (uatomic_read(&crdp
->futex
) == -1)
213 futex_async(&crdp
->futex
, FUTEX_WAIT
, -1,
217 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
219 /* Write to call_rcu list before reading/writing futex */
221 if (caa_unlikely(uatomic_read(&crdp
->futex
) == -1)) {
222 uatomic_set(&crdp
->futex
, 0);
223 futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
228 /* This is the code run by each call_rcu thread. */
230 static void *call_rcu_thread(void *arg
)
232 unsigned long cbcount
;
233 struct call_rcu_data
*crdp
= (struct call_rcu_data
*) arg
;
234 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
237 ret
= set_thread_cpu_affinity(crdp
);
242 * If callbacks take a read-side lock, we need to be registered.
244 rcu_register_thread();
246 URCU_TLS(thread_call_rcu_data
) = crdp
;
248 uatomic_dec(&crdp
->futex
);
249 /* Decrement futex before reading call_rcu list */
253 struct cds_wfcq_head cbs_tmp_head
;
254 struct cds_wfcq_tail cbs_tmp_tail
;
255 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
257 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
258 __cds_wfcq_splice_blocking(&cbs_tmp_head
, &cbs_tmp_tail
,
259 &crdp
->cbs_head
, &crdp
->cbs_tail
);
260 if (!cds_wfcq_empty(&cbs_tmp_head
, &cbs_tmp_tail
)) {
263 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
264 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
265 struct rcu_head
*rhp
;
267 rhp
= caa_container_of(cbs
,
268 struct rcu_head
, next
);
272 uatomic_sub(&crdp
->qlen
, cbcount
);
274 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
276 rcu_thread_offline();
278 if (cds_wfcq_empty(&crdp
->cbs_head
,
282 uatomic_dec(&crdp
->futex
);
284 * Decrement futex before reading
298 * Read call_rcu list before write futex.
301 uatomic_set(&crdp
->futex
, 0);
303 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
304 rcu_unregister_thread();
309 * Create both a call_rcu thread and the corresponding call_rcu_data
310 * structure, linking the structure in as specified. Caller must hold
314 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
318 struct call_rcu_data
*crdp
;
321 crdp
= malloc(sizeof(*crdp
));
324 memset(crdp
, '\0', sizeof(*crdp
));
325 cds_wfcq_init(&crdp
->cbs_head
, &crdp
->cbs_tail
);
329 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
330 crdp
->cpu_affinity
= cpu_affinity
;
331 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
333 ret
= pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
);
339 * Return a pointer to the call_rcu_data structure for the specified
340 * CPU, returning NULL if there is none. We cannot automatically
341 * created it because the platform we are running on might not define
344 * The call to this function and use of the returned call_rcu_data
345 * should be protected by RCU read-side lock.
348 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
350 static int warned
= 0;
351 struct call_rcu_data
**pcpu_crdp
;
353 pcpu_crdp
= rcu_dereference(per_cpu_call_rcu_data
);
354 if (pcpu_crdp
== NULL
)
356 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
357 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
360 if (cpu
< 0 || maxcpus
<= cpu
)
362 return rcu_dereference(pcpu_crdp
[cpu
]);
366 * Return the tid corresponding to the call_rcu thread whose
367 * call_rcu_data structure is specified.
370 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
376 * Create a call_rcu_data structure (with thread) and return a pointer.
379 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
382 struct call_rcu_data
*crdp
;
384 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
388 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
391 struct call_rcu_data
*crdp
;
393 call_rcu_lock(&call_rcu_mutex
);
394 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
395 call_rcu_unlock(&call_rcu_mutex
);
400 * Set the specified CPU to use the specified call_rcu_data structure.
402 * Use NULL to remove a CPU's call_rcu_data structure, but it is
403 * the caller's responsibility to dispose of the removed structure.
404 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
405 * (prior to NULLing it out, of course).
407 * The caller must wait for a grace-period to pass between return from
408 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
409 * previous call rcu data as argument.
412 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
414 static int warned
= 0;
416 call_rcu_lock(&call_rcu_mutex
);
417 alloc_cpu_call_rcu_data();
418 if (cpu
< 0 || maxcpus
<= cpu
) {
420 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
423 call_rcu_unlock(&call_rcu_mutex
);
428 if (per_cpu_call_rcu_data
== NULL
) {
429 call_rcu_unlock(&call_rcu_mutex
);
434 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
435 call_rcu_unlock(&call_rcu_mutex
);
440 rcu_set_pointer(&per_cpu_call_rcu_data
[cpu
], crdp
);
441 call_rcu_unlock(&call_rcu_mutex
);
446 * Return a pointer to the default call_rcu_data structure, creating
447 * one if need be. Because we never free call_rcu_data structures,
448 * we don't need to be in an RCU read-side critical section.
451 struct call_rcu_data
*get_default_call_rcu_data(void)
453 if (default_call_rcu_data
!= NULL
)
454 return rcu_dereference(default_call_rcu_data
);
455 call_rcu_lock(&call_rcu_mutex
);
456 if (default_call_rcu_data
!= NULL
) {
457 call_rcu_unlock(&call_rcu_mutex
);
458 return default_call_rcu_data
;
460 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
461 call_rcu_unlock(&call_rcu_mutex
);
462 return default_call_rcu_data
;
466 * Return the call_rcu_data structure that applies to the currently
467 * running thread. Any call_rcu_data structure assigned specifically
468 * to this thread has first priority, followed by any call_rcu_data
469 * structure assigned to the CPU on which the thread is running,
470 * followed by the default call_rcu_data structure. If there is not
471 * yet a default call_rcu_data structure, one will be created.
473 * Calls to this function and use of the returned call_rcu_data should
474 * be protected by RCU read-side lock.
476 struct call_rcu_data
*get_call_rcu_data(void)
478 struct call_rcu_data
*crd
;
480 if (URCU_TLS(thread_call_rcu_data
) != NULL
)
481 return URCU_TLS(thread_call_rcu_data
);
484 crd
= get_cpu_call_rcu_data(sched_getcpu());
489 return get_default_call_rcu_data();
493 * Return a pointer to this task's call_rcu_data if there is one.
496 struct call_rcu_data
*get_thread_call_rcu_data(void)
498 return URCU_TLS(thread_call_rcu_data
);
502 * Set this task's call_rcu_data structure as specified, regardless
503 * of whether or not this task already had one. (This allows switching
504 * to and from real-time call_rcu threads, for example.)
506 * Use NULL to remove a thread's call_rcu_data structure, but it is
507 * the caller's responsibility to dispose of the removed structure.
508 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
509 * (prior to NULLing it out, of course).
512 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
514 URCU_TLS(thread_call_rcu_data
) = crdp
;
518 * Create a separate call_rcu thread for each CPU. This does not
519 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
520 * function if you want that behavior. Should be paired with
521 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
525 int create_all_cpu_call_rcu_data(unsigned long flags
)
528 struct call_rcu_data
*crdp
;
531 call_rcu_lock(&call_rcu_mutex
);
532 alloc_cpu_call_rcu_data();
533 call_rcu_unlock(&call_rcu_mutex
);
538 if (per_cpu_call_rcu_data
== NULL
) {
542 for (i
= 0; i
< maxcpus
; i
++) {
543 call_rcu_lock(&call_rcu_mutex
);
544 if (get_cpu_call_rcu_data(i
)) {
545 call_rcu_unlock(&call_rcu_mutex
);
548 crdp
= __create_call_rcu_data(flags
, i
);
550 call_rcu_unlock(&call_rcu_mutex
);
554 call_rcu_unlock(&call_rcu_mutex
);
555 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
556 call_rcu_data_free(crdp
);
558 /* it has been created by other thread */
569 * Wake up the call_rcu thread corresponding to the specified
570 * call_rcu_data structure.
572 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
574 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
575 call_rcu_wake_up(crdp
);
579 * Schedule a function to be invoked after a following grace period.
580 * This is the only function that must be called -- the others are
581 * only present to allow applications to tune their use of RCU for
582 * maximum performance.
584 * Note that unless a call_rcu thread has not already been created,
585 * the first invocation of call_rcu() will create one. So, if you
586 * need the first invocation of call_rcu() to be fast, make sure
587 * to create a call_rcu thread first. One way to accomplish this is
588 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
590 * call_rcu must be called by registered RCU read-side threads.
593 void call_rcu(struct rcu_head
*head
,
594 void (*func
)(struct rcu_head
*head
))
596 struct call_rcu_data
*crdp
;
598 cds_wfcq_node_init(&head
->next
);
600 /* Holding rcu read-side lock across use of per-cpu crdp */
602 crdp
= get_call_rcu_data();
603 cds_wfcq_enqueue(&crdp
->cbs_head
, &crdp
->cbs_tail
, &head
->next
);
604 uatomic_inc(&crdp
->qlen
);
605 wake_call_rcu_thread(crdp
);
610 * Free up the specified call_rcu_data structure, terminating the
611 * associated call_rcu thread. The caller must have previously
612 * removed the call_rcu_data structure from per-thread or per-CPU
613 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
614 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
615 * per-thread call_rcu_data structures.
617 * We silently refuse to free up the default call_rcu_data structure
618 * because that is where we put any leftover callbacks. Note that
619 * the possibility of self-spawning callbacks makes it impossible
620 * to execute all the callbacks in finite time without putting any
621 * newly spawned callbacks somewhere else. The "somewhere else" of
622 * last resort is the default call_rcu_data structure.
624 * We also silently refuse to free NULL pointers. This simplifies
627 * The caller must wait for a grace-period to pass between return from
628 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
629 * previous call rcu data as argument.
631 void call_rcu_data_free(struct call_rcu_data
*crdp
)
633 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
636 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
637 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
638 wake_call_rcu_thread(crdp
);
639 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
642 if (!cds_wfcq_empty(&crdp
->cbs_head
, &crdp
->cbs_tail
)) {
643 /* Create default call rcu data if need be */
644 (void) get_default_call_rcu_data();
645 __cds_wfcq_splice_blocking(&default_call_rcu_data
->cbs_head
,
646 &default_call_rcu_data
->cbs_tail
,
647 &crdp
->cbs_head
, &crdp
->cbs_tail
);
648 uatomic_add(&default_call_rcu_data
->qlen
,
649 uatomic_read(&crdp
->qlen
));
650 wake_call_rcu_thread(default_call_rcu_data
);
653 call_rcu_lock(&call_rcu_mutex
);
654 cds_list_del(&crdp
->list
);
655 call_rcu_unlock(&call_rcu_mutex
);
661 * Clean up all the per-CPU call_rcu threads.
663 void free_all_cpu_call_rcu_data(void)
666 struct call_rcu_data
**crdp
;
667 static int warned
= 0;
672 crdp
= malloc(sizeof(*crdp
) * maxcpus
);
675 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
681 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
682 crdp
[cpu
] = get_cpu_call_rcu_data(cpu
);
683 if (crdp
[cpu
] == NULL
)
685 set_cpu_call_rcu_data(cpu
, NULL
);
688 * Wait for call_rcu sites acting as RCU readers of the
689 * call_rcu_data to become quiescent.
692 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
693 if (crdp
[cpu
] == NULL
)
695 call_rcu_data_free(crdp
[cpu
]);
701 * Acquire the call_rcu_mutex in order to ensure that the child sees
702 * all of the call_rcu() data structures in a consistent state.
703 * Suitable for pthread_atfork() and friends.
705 void call_rcu_before_fork(void)
707 call_rcu_lock(&call_rcu_mutex
);
711 * Clean up call_rcu data structures in the parent of a successful fork()
712 * that is not followed by exec() in the child. Suitable for
713 * pthread_atfork() and friends.
715 void call_rcu_after_fork_parent(void)
717 call_rcu_unlock(&call_rcu_mutex
);
721 * Clean up call_rcu data structures in the child of a successful fork()
722 * that is not followed by exec(). Suitable for pthread_atfork() and
725 void call_rcu_after_fork_child(void)
727 struct call_rcu_data
*crdp
, *next
;
729 /* Release the mutex. */
730 call_rcu_unlock(&call_rcu_mutex
);
732 /* Do nothing when call_rcu() has not been used */
733 if (cds_list_empty(&call_rcu_data_list
))
737 * Allocate a new default call_rcu_data structure in order
738 * to get a working call_rcu thread to go with it.
740 default_call_rcu_data
= NULL
;
741 (void)get_default_call_rcu_data();
743 /* Cleanup call_rcu_data pointers before use */
745 free(per_cpu_call_rcu_data
);
746 rcu_set_pointer(&per_cpu_call_rcu_data
, NULL
);
747 URCU_TLS(thread_call_rcu_data
) = NULL
;
749 /* Dispose of all of the rest of the call_rcu_data structures. */
750 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
751 if (crdp
== default_call_rcu_data
)
753 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
754 call_rcu_data_free(crdp
);