4 * Userspace RCU library - Userspace workqeues
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 * Copyright (c) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
39 #include "compat-getcpu.h"
40 #include "urcu/wfcqueue.h"
41 #include "urcu-pointer.h"
42 #include "urcu/list.h"
43 #include "urcu/futex.h"
44 #include "urcu/tls-compat.h"
48 #include "workqueue.h"
50 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
51 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
53 /* Data structure that identifies a workqueue. */
55 struct urcu_workqueue
{
57 * We do not align head on a different cache-line than tail
58 * mainly because workqueue threads use batching ("splice") to
59 * get an entire list of callbacks, which effectively empties
60 * the queue, and requires to touch the tail anyway.
62 struct cds_wfcq_tail cbs_tail
;
63 struct cds_wfcq_head cbs_head
;
66 unsigned long qlen
; /* maintained for debugging. */
69 unsigned long loop_count
;
71 void (*grace_period_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
72 void (*initialize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
73 void (*finalize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
74 void (*worker_before_pause_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
75 void (*worker_after_resume_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
76 void (*worker_before_wait_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
77 void (*worker_after_wake_up_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
78 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
80 struct urcu_workqueue_completion
{
86 struct urcu_workqueue_completion_work
{
87 struct urcu_work work
;
88 struct urcu_workqueue_completion
*completion
;
92 * Periodically retry setting CPU affinity if we migrate.
93 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
96 #if HAVE_SCHED_SETAFFINITY
97 static int set_thread_cpu_affinity(struct urcu_workqueue
*workqueue
)
102 if (workqueue
->cpu_affinity
< 0)
104 if (++workqueue
->loop_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
106 if (urcu_sched_getcpu() == workqueue
->cpu_affinity
)
110 CPU_SET(workqueue
->cpu_affinity
, &mask
);
111 #if SCHED_SETAFFINITY_ARGS == 2
112 ret
= sched_setaffinity(0, &mask
);
114 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
117 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
118 * cpuset(7). This is why we should always retry if we detect
121 if (ret
&& errno
== EINVAL
) {
128 static int set_thread_cpu_affinity(struct urcu_workqueue
*workqueue
)
134 static void futex_wait(int32_t *futex
)
136 /* Read condition before read futex */
138 if (uatomic_read(futex
) != -1)
140 while (futex_async(futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
143 /* Value already changed. */
146 /* Retry if interrupted by signal. */
147 break; /* Get out of switch. */
149 /* Unexpected error. */
155 static void futex_wake_up(int32_t *futex
)
157 /* Write to condition before reading/writing futex */
159 if (caa_unlikely(uatomic_read(futex
) == -1)) {
160 uatomic_set(futex
, 0);
161 if (futex_async(futex
, FUTEX_WAKE
, 1,
167 /* This is the code run by each worker thread. */
169 static void *workqueue_thread(void *arg
)
171 unsigned long cbcount
;
172 struct urcu_workqueue
*workqueue
= (struct urcu_workqueue
*) arg
;
173 int rt
= !!(uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_RT
);
175 if (set_thread_cpu_affinity(workqueue
))
178 if (workqueue
->initialize_worker_fct
)
179 workqueue
->initialize_worker_fct(workqueue
, workqueue
->priv
);
182 uatomic_dec(&workqueue
->futex
);
183 /* Decrement futex before reading workqueue */
187 struct cds_wfcq_head cbs_tmp_head
;
188 struct cds_wfcq_tail cbs_tmp_tail
;
189 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
190 enum cds_wfcq_ret splice_ret
;
192 if (set_thread_cpu_affinity(workqueue
))
195 if (uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSE
) {
197 * Pause requested. Become quiescent: remove
198 * ourself from all global lists, and don't
199 * process any callback. The callback lists may
200 * still be non-empty though.
202 if (workqueue
->worker_before_pause_fct
)
203 workqueue
->worker_before_pause_fct(workqueue
, workqueue
->priv
);
204 cmm_smp_mb__before_uatomic_or();
205 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_PAUSED
);
206 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSE
) != 0)
207 (void) poll(NULL
, 0, 1);
208 uatomic_and(&workqueue
->flags
, ~URCU_WORKQUEUE_PAUSED
);
209 cmm_smp_mb__after_uatomic_and();
210 if (workqueue
->worker_after_resume_fct
)
211 workqueue
->worker_after_resume_fct(workqueue
, workqueue
->priv
);
214 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
215 splice_ret
= __cds_wfcq_splice_blocking(&cbs_tmp_head
,
216 &cbs_tmp_tail
, &workqueue
->cbs_head
, &workqueue
->cbs_tail
);
217 assert(splice_ret
!= CDS_WFCQ_RET_WOULDBLOCK
);
218 assert(splice_ret
!= CDS_WFCQ_RET_DEST_NON_EMPTY
);
219 if (splice_ret
!= CDS_WFCQ_RET_SRC_EMPTY
) {
220 if (workqueue
->grace_period_fct
)
221 workqueue
->grace_period_fct(workqueue
, workqueue
->priv
);
223 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
224 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
225 struct urcu_work
*uwp
;
227 uwp
= caa_container_of(cbs
,
228 struct urcu_work
, next
);
232 uatomic_sub(&workqueue
->qlen
, cbcount
);
234 if (uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_STOP
)
236 if (workqueue
->worker_before_wait_fct
)
237 workqueue
->worker_before_wait_fct(workqueue
, workqueue
->priv
);
239 if (cds_wfcq_empty(&workqueue
->cbs_head
,
240 &workqueue
->cbs_tail
)) {
241 futex_wait(&workqueue
->futex
);
242 uatomic_dec(&workqueue
->futex
);
244 * Decrement futex before reading
250 if (cds_wfcq_empty(&workqueue
->cbs_head
,
251 &workqueue
->cbs_tail
)) {
252 (void) poll(NULL
, 0, 10);
255 if (workqueue
->worker_after_wake_up_fct
)
256 workqueue
->worker_after_wake_up_fct(workqueue
, workqueue
->priv
);
260 * Read urcu_work list before write futex.
263 uatomic_set(&workqueue
->futex
, 0);
265 if (workqueue
->finalize_worker_fct
)
266 workqueue
->finalize_worker_fct(workqueue
, workqueue
->priv
);
270 struct urcu_workqueue
*urcu_workqueue_create(unsigned long flags
,
271 int cpu_affinity
, void *priv
,
272 void (*grace_period_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
273 void (*initialize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
274 void (*finalize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
275 void (*worker_before_wait_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
276 void (*worker_after_wake_up_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
277 void (*worker_before_pause_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
278 void (*worker_after_resume_fct
)(struct urcu_workqueue
*workqueue
, void *priv
))
280 struct urcu_workqueue
*workqueue
;
283 workqueue
= malloc(sizeof(*workqueue
));
284 if (workqueue
== NULL
)
286 memset(workqueue
, '\0', sizeof(*workqueue
));
287 cds_wfcq_init(&workqueue
->cbs_head
, &workqueue
->cbs_tail
);
289 workqueue
->futex
= 0;
290 workqueue
->flags
= flags
;
291 workqueue
->priv
= priv
;
292 workqueue
->grace_period_fct
= grace_period_fct
;
293 workqueue
->initialize_worker_fct
= initialize_worker_fct
;
294 workqueue
->finalize_worker_fct
= finalize_worker_fct
;
295 workqueue
->worker_before_wait_fct
= worker_before_wait_fct
;
296 workqueue
->worker_after_wake_up_fct
= worker_after_wake_up_fct
;
297 workqueue
->worker_before_pause_fct
= worker_before_pause_fct
;
298 workqueue
->worker_after_resume_fct
= worker_after_resume_fct
;
299 workqueue
->cpu_affinity
= cpu_affinity
;
300 workqueue
->loop_count
= 0;
301 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
302 ret
= pthread_create(&workqueue
->tid
, NULL
, workqueue_thread
, workqueue
);
309 static void wake_worker_thread(struct urcu_workqueue
*workqueue
)
311 if (!(_CMM_LOAD_SHARED(workqueue
->flags
) & URCU_WORKQUEUE_RT
))
312 futex_wake_up(&workqueue
->futex
);
315 static int urcu_workqueue_destroy_worker(struct urcu_workqueue
*workqueue
)
320 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_STOP
);
321 wake_worker_thread(workqueue
);
323 ret
= pthread_join(workqueue
->tid
, &retval
);
327 if (retval
!= NULL
) {
330 workqueue
->flags
&= ~URCU_WORKQUEUE_STOP
;
335 void urcu_workqueue_destroy(struct urcu_workqueue
*workqueue
)
337 if (workqueue
== NULL
) {
340 if (urcu_workqueue_destroy_worker(workqueue
)) {
343 assert(cds_wfcq_empty(&workqueue
->cbs_head
, &workqueue
->cbs_tail
));
347 void urcu_workqueue_queue_work(struct urcu_workqueue
*workqueue
,
348 struct urcu_work
*work
,
349 void (*func
)(struct urcu_work
*work
))
351 cds_wfcq_node_init(&work
->next
);
353 cds_wfcq_enqueue(&workqueue
->cbs_head
, &workqueue
->cbs_tail
, &work
->next
);
354 uatomic_inc(&workqueue
->qlen
);
355 wake_worker_thread(workqueue
);
359 void free_completion(struct urcu_ref
*ref
)
361 struct urcu_workqueue_completion
*completion
;
363 completion
= caa_container_of(ref
, struct urcu_workqueue_completion
, ref
);
368 void _urcu_workqueue_wait_complete(struct urcu_work
*work
)
370 struct urcu_workqueue_completion_work
*completion_work
;
371 struct urcu_workqueue_completion
*completion
;
373 completion_work
= caa_container_of(work
, struct urcu_workqueue_completion_work
, work
);
374 completion
= completion_work
->completion
;
375 if (!uatomic_sub_return(&completion
->barrier_count
, 1))
376 futex_wake_up(&completion
->futex
);
377 urcu_ref_put(&completion
->ref
, free_completion
);
378 free(completion_work
);
381 struct urcu_workqueue_completion
*urcu_workqueue_create_completion(void)
383 struct urcu_workqueue_completion
*completion
;
385 completion
= calloc(sizeof(*completion
), 1);
388 urcu_ref_set(&completion
->ref
, 1);
389 completion
->barrier_count
= 0;
393 void urcu_workqueue_destroy_completion(struct urcu_workqueue_completion
*completion
)
395 urcu_ref_put(&completion
->ref
, free_completion
);
398 void urcu_workqueue_wait_completion(struct urcu_workqueue_completion
*completion
)
402 uatomic_dec(&completion
->futex
);
403 /* Decrement futex before reading barrier_count */
405 if (!uatomic_read(&completion
->barrier_count
))
407 futex_wait(&completion
->futex
);
411 void urcu_workqueue_queue_completion(struct urcu_workqueue
*workqueue
,
412 struct urcu_workqueue_completion
*completion
)
414 struct urcu_workqueue_completion_work
*work
;
416 work
= calloc(sizeof(*work
), 1);
419 work
->completion
= completion
;
420 urcu_ref_get(&completion
->ref
);
421 uatomic_inc(&completion
->barrier_count
);
422 urcu_workqueue_queue_work(workqueue
, &work
->work
, _urcu_workqueue_wait_complete
);
426 * Wait for all in-flight work to complete execution.
428 void urcu_workqueue_flush_queued_work(struct urcu_workqueue
*workqueue
)
430 struct urcu_workqueue_completion
*completion
;
432 completion
= urcu_workqueue_create_completion();
435 urcu_workqueue_queue_completion(workqueue
, completion
);
436 urcu_workqueue_wait_completion(completion
);
437 urcu_workqueue_destroy_completion(completion
);
440 /* To be used in before fork handler. */
441 void urcu_workqueue_pause_worker(struct urcu_workqueue
*workqueue
)
443 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_PAUSE
);
444 cmm_smp_mb__after_uatomic_or();
445 wake_worker_thread(workqueue
);
447 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSED
) == 0)
448 (void) poll(NULL
, 0, 1);
451 /* To be used in after fork parent handler. */
452 void urcu_workqueue_resume_worker(struct urcu_workqueue
*workqueue
)
454 uatomic_and(&workqueue
->flags
, ~URCU_WORKQUEUE_PAUSE
);
455 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSED
) != 0)
456 (void) poll(NULL
, 0, 1);
459 void urcu_workqueue_create_worker(struct urcu_workqueue
*workqueue
)
463 /* Clear workqueue state from parent. */
464 workqueue
->flags
&= ~URCU_WORKQUEUE_PAUSED
;
465 workqueue
->flags
&= ~URCU_WORKQUEUE_PAUSE
;
467 ret
= pthread_create(&workqueue
->tid
, NULL
, workqueue_thread
, workqueue
);