Fix: tests: invoke destroy APIs for queues/stacks
[deliverable/userspace-rcu.git] / urcu-wait.h
CommitLineData
cba82d7b
MD
1#ifndef _URCU_WAIT_H
2#define _URCU_WAIT_H
3
4/*
5 * urcu-wait.h
6 *
7 * Userspace RCU library wait/wakeup management
8 *
9 * Copyright (c) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <urcu/uatomic.h>
bf6822a6 27#include <urcu/wfstack.h>
b0a841b4 28#include "urcu-die.h"
cba82d7b
MD
29
30/*
31 * Number of busy-loop attempts before waiting on futex for grace period
32 * batching.
33 */
34#define URCU_WAIT_ATTEMPTS 1000
35
36enum urcu_wait_state {
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING = 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP = (1 << 0),
bf6822a6 41 URCU_WAIT_RUNNING = (1 << 1),
cba82d7b
MD
42 URCU_WAIT_TEARDOWN = (1 << 2),
43};
44
bf6822a6
MD
45struct urcu_wait_node {
46 struct cds_wfs_node node;
47 int32_t state; /* enum urcu_wait_state */
cba82d7b
MD
48};
49
bf6822a6
MD
50#define URCU_WAIT_NODE_INIT(name, _state) \
51 { .state = _state }
52
53#define DEFINE_URCU_WAIT_NODE(name, state) \
54 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
55
56#define DECLARE_URCU_WAIT_NODE(name) \
57 struct urcu_wait_node name
58
59struct urcu_wait_queue {
60 struct cds_wfs_stack stack;
61};
62
63#define URCU_WAIT_QUEUE_HEAD_INIT(name) \
64 { .stack.head = CDS_WFS_END, .stack.lock = PTHREAD_MUTEX_INITIALIZER }
65
66#define DECLARE_URCU_WAIT_QUEUE(name) \
67 struct urcu_wait_queue name
68
69#define DEFINE_URCU_WAIT_QUEUE(name) \
70 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
71
72struct urcu_waiters {
73 struct cds_wfs_head *head;
74};
75
76/*
77 * Add ourself atomically to a wait queue. Return 0 if queue was
78 * previously empty, else return 1.
79 * A full memory barrier is issued before being added to the wait queue.
80 */
81static inline
82bool urcu_wait_add(struct urcu_wait_queue *queue,
83 struct urcu_wait_node *node)
84{
85 return cds_wfs_push(&queue->stack, &node->node);
86}
87
88/*
89 * Atomically move all waiters from wait queue into our local struct
90 * urcu_waiters.
91 */
92static inline
93void urcu_move_waiters(struct urcu_waiters *waiters,
94 struct urcu_wait_queue *queue)
95{
96 waiters->head = __cds_wfs_pop_all(&queue->stack);
97}
98
99static inline
100void urcu_wait_set_state(struct urcu_wait_node *node,
101 enum urcu_wait_state state)
102{
103 node->state = state;
104}
105
cba82d7b 106static inline
bf6822a6
MD
107void urcu_wait_node_init(struct urcu_wait_node *node,
108 enum urcu_wait_state state)
cba82d7b 109{
bf6822a6
MD
110 urcu_wait_set_state(node, state);
111 cds_wfs_node_init(&node->node);
cba82d7b
MD
112}
113
114/*
115 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
bf6822a6 116 * throughout its execution. In this scheme, the waiter owns the node
cba82d7b
MD
117 * memory, and we only allow it to free this memory when it receives the
118 * URCU_WAIT_TEARDOWN flag.
119 */
120static inline
bf6822a6 121void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
cba82d7b
MD
122{
123 cmm_smp_mb();
bf6822a6
MD
124 assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING);
125 uatomic_set(&wait->state, URCU_WAIT_WAKEUP);
b0a841b4
MD
126 if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
127 if (futex_noasync(&wait->state, FUTEX_WAKE, 1,
128 NULL, NULL, 0) < 0)
129 urcu_die(errno);
130 }
cba82d7b 131 /* Allow teardown of struct urcu_wait memory. */
bf6822a6 132 uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
cba82d7b
MD
133}
134
135/*
136 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
137 * memory to waker thread.
138 */
bf6822a6
MD
139static inline
140void urcu_adaptative_busy_wait(struct urcu_wait_node *wait)
cba82d7b
MD
141{
142 unsigned int i;
143
bf6822a6 144 /* Load and test condition before read state */
cba82d7b
MD
145 cmm_smp_rmb();
146 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
bf6822a6 147 if (uatomic_read(&wait->state) != URCU_WAIT_WAITING)
cba82d7b
MD
148 goto skip_futex_wait;
149 caa_cpu_relax();
150 }
b0a841b4
MD
151 while (futex_noasync(&wait->state, FUTEX_WAIT, URCU_WAIT_WAITING,
152 NULL, NULL, 0)) {
153 switch (errno) {
154 case EWOULDBLOCK:
155 /* Value already changed. */
156 goto skip_futex_wait;
157 case EINTR:
158 /* Retry if interrupted by signal. */
159 break; /* Get out of switch. */
160 default:
161 /* Unexpected error. */
162 urcu_die(errno);
163 }
164 }
cba82d7b
MD
165skip_futex_wait:
166
ffa11a18 167 /* Tell waker thread than we are running. */
bf6822a6 168 uatomic_or(&wait->state, URCU_WAIT_RUNNING);
cba82d7b
MD
169
170 /*
171 * Wait until waker thread lets us know it's ok to tear down
172 * memory allocated for struct urcu_wait.
173 */
174 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
bf6822a6 175 if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)
cba82d7b
MD
176 break;
177 caa_cpu_relax();
178 }
bf6822a6 179 while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
cba82d7b 180 poll(NULL, 0, 10);
bf6822a6
MD
181 assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
182}
183
184static inline
185void urcu_wake_all_waiters(struct urcu_waiters *waiters)
186{
187 struct cds_wfs_node *iter, *iter_n;
188
189 /* Wake all waiters in our stack head */
190 cds_wfs_for_each_blocking_safe(waiters->head, iter, iter_n) {
191 struct urcu_wait_node *wait_node =
192 caa_container_of(iter, struct urcu_wait_node, node);
193
194 /* Don't wake already running threads */
195 if (wait_node->state & URCU_WAIT_RUNNING)
196 continue;
197 urcu_adaptative_wake_up(wait_node);
198 }
cba82d7b
MD
199}
200
201#endif /* _URCU_WAIT_H */
This page took 0.038825 seconds and 4 git commands to generate.