Commit | Line | Data |
---|---|---|
fdee2e6d MD |
1 | #ifndef _URCU_BP_STATIC_H |
2 | #define _URCU_BP_STATIC_H | |
3 | ||
4 | /* | |
5 | * urcu-bp-static.h | |
6 | * | |
7 | * Userspace RCU header. | |
8 | * | |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking | |
10 | * dynamically with the userspace rcu library. | |
11 | * | |
12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
14 | * | |
15 | * This library is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
28 | * | |
29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
30 | */ | |
31 | ||
32 | #include <stdlib.h> | |
33 | #include <pthread.h> | |
34 | #include <syscall.h> | |
35 | #include <unistd.h> | |
36 | ||
37 | #include <urcu/compiler.h> | |
38 | #include <urcu/arch.h> | |
39 | #include <urcu/system.h> | |
40 | #include <urcu/arch_uatomic.h> | |
41 | #include <urcu/list.h> | |
42 | ||
43 | /* | |
44 | * This code section can only be included in LGPL 2.1 compatible source code. | |
45 | * See below for the function call wrappers which can be used in code meant to | |
46 | * be only linked with the Userspace RCU library. This comes with a small | |
47 | * performance degradation on the read-side due to the added function calls. | |
48 | * This is required to permit relinking with newer versions of the library. | |
49 | */ | |
50 | ||
51 | /* | |
52 | * Active attempts to check for reader Q.S. before calling sleep(). | |
53 | */ | |
54 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
55 | ||
56 | #ifdef DEBUG_RCU | |
57 | #define rcu_assert(args...) assert(args) | |
58 | #else | |
59 | #define rcu_assert(args...) | |
60 | #endif | |
61 | ||
62 | #ifdef DEBUG_YIELD | |
63 | #include <sched.h> | |
64 | #include <time.h> | |
65 | #include <pthread.h> | |
66 | #include <unistd.h> | |
67 | ||
68 | #define YIELD_READ (1 << 0) | |
69 | #define YIELD_WRITE (1 << 1) | |
70 | ||
71 | /* | |
72 | * Updates without URCU_MB are much slower. Account this in | |
73 | * the delay. | |
74 | */ | |
75 | /* maximum sleep delay, in us */ | |
76 | #define MAX_SLEEP 50 | |
77 | ||
78 | extern unsigned int yield_active; | |
79 | extern unsigned int __thread rand_yield; | |
80 | ||
81 | static inline void debug_yield_read(void) | |
82 | { | |
83 | if (yield_active & YIELD_READ) | |
84 | if (rand_r(&rand_yield) & 0x1) | |
85 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
86 | } | |
87 | ||
88 | static inline void debug_yield_write(void) | |
89 | { | |
90 | if (yield_active & YIELD_WRITE) | |
91 | if (rand_r(&rand_yield) & 0x1) | |
92 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
93 | } | |
94 | ||
95 | static inline void debug_yield_init(void) | |
96 | { | |
97 | rand_yield = time(NULL) ^ pthread_self(); | |
98 | } | |
99 | #else | |
100 | static inline void debug_yield_read(void) | |
101 | { | |
102 | } | |
103 | ||
104 | static inline void debug_yield_write(void) | |
105 | { | |
106 | } | |
107 | ||
108 | static inline void debug_yield_init(void) | |
109 | { | |
110 | ||
111 | } | |
112 | #endif | |
113 | ||
114 | /* | |
115 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a | |
116 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
117 | */ | |
118 | #define RCU_GP_COUNT (1UL << 0) | |
119 | /* Use the amount of bits equal to half of the architecture long size */ | |
120 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) | |
121 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
122 | ||
123 | /* | |
124 | * Used internally by _rcu_read_lock. | |
125 | */ | |
126 | extern void rcu_bp_register(void); | |
127 | ||
128 | /* | |
129 | * Global quiescent period counter with low-order bits unused. | |
130 | * Using a int rather than a char to eliminate false register dependencies | |
131 | * causing stalls on some architectures. | |
132 | */ | |
133 | extern long urcu_gp_ctr; | |
134 | ||
135 | struct urcu_reader { | |
136 | /* Data used by both reader and synchronize_rcu() */ | |
137 | long ctr; | |
138 | /* Data used for registry */ | |
139 | struct list_head head __attribute__((aligned(CACHE_LINE_SIZE))); | |
140 | pthread_t tid; | |
141 | int alloc; /* registry entry allocated */ | |
142 | }; | |
143 | ||
144 | /* | |
145 | * Bulletproof version keeps a pointer to a registry not part of the TLS. | |
146 | * Adds a pointer dereference on the read-side, but won't require to unregister | |
147 | * the reader thread. | |
148 | */ | |
149 | extern struct urcu_reader __thread *urcu_reader; | |
150 | ||
151 | static inline int rcu_old_gp_ongoing(long *value) | |
152 | { | |
153 | long v; | |
154 | ||
155 | if (value == NULL) | |
156 | return 0; | |
157 | /* | |
158 | * Make sure both tests below are done on the same version of *value | |
159 | * to insure consistency. | |
160 | */ | |
161 | v = LOAD_SHARED(*value); | |
162 | return (v & RCU_GP_CTR_NEST_MASK) && | |
163 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); | |
164 | } | |
165 | ||
166 | static inline void _rcu_read_lock(void) | |
167 | { | |
168 | long tmp; | |
169 | ||
170 | /* Check if registered */ | |
171 | if (unlikely(!urcu_reader)) | |
172 | rcu_bp_register(); | |
173 | ||
174 | tmp = urcu_reader->ctr; | |
175 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ | |
176 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { | |
177 | _STORE_SHARED(urcu_reader->ctr, _LOAD_SHARED(urcu_gp_ctr)); | |
178 | /* | |
179 | * Set active readers count for outermost nesting level before | |
180 | * accessing the pointer. | |
181 | */ | |
182 | smp_mb(); | |
183 | } else { | |
184 | _STORE_SHARED(urcu_reader->ctr, tmp + RCU_GP_COUNT); | |
185 | } | |
186 | } | |
187 | ||
188 | static inline void _rcu_read_unlock(void) | |
189 | { | |
190 | /* | |
191 | * Finish using rcu before decrementing the pointer. | |
192 | */ | |
193 | smp_mb(); | |
194 | _STORE_SHARED(urcu_reader->ctr, urcu_reader->ctr - RCU_GP_COUNT); | |
195 | } | |
196 | ||
197 | #endif /* _URCU_BP_STATIC_H */ |