Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
01c1c660 | 18 | * Copyright IBM Corporation, 2001 |
1da177e4 LT |
19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
a71fca58 | 22 | * |
1da177e4 LT |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4 LT |
31 | * |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/sched.h> | |
60063497 | 40 | #include <linux/atomic.h> |
1da177e4 | 41 | #include <linux/bitops.h> |
1da177e4 LT |
42 | #include <linux/percpu.h> |
43 | #include <linux/notifier.h> | |
1da177e4 | 44 | #include <linux/cpu.h> |
9331b315 | 45 | #include <linux/mutex.h> |
9984de1a | 46 | #include <linux/export.h> |
e3818b8d | 47 | #include <linux/hardirq.h> |
e3ebfb96 | 48 | #include <linux/delay.h> |
3705b88d | 49 | #include <linux/module.h> |
1da177e4 | 50 | |
29c00b4a PM |
51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/rcu.h> | |
53 | ||
54 | #include "rcu.h" | |
55 | ||
3705b88d AM |
56 | module_param(rcu_expedited, int, 0); |
57 | ||
9dd8fb16 PM |
58 | #ifdef CONFIG_PREEMPT_RCU |
59 | ||
2a3fa843 PM |
60 | /* |
61 | * Preemptible RCU implementation for rcu_read_lock(). | |
62 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
63 | * if we block. | |
64 | */ | |
65 | void __rcu_read_lock(void) | |
66 | { | |
67 | current->rcu_read_lock_nesting++; | |
68 | barrier(); /* critical section after entry code. */ | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
71 | ||
72 | /* | |
73 | * Preemptible RCU implementation for rcu_read_unlock(). | |
74 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
75 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
76 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
77 | * in an RCU read-side critical section and other special cases. | |
78 | */ | |
79 | void __rcu_read_unlock(void) | |
80 | { | |
81 | struct task_struct *t = current; | |
82 | ||
83 | if (t->rcu_read_lock_nesting != 1) { | |
84 | --t->rcu_read_lock_nesting; | |
85 | } else { | |
86 | barrier(); /* critical section before exit code. */ | |
87 | t->rcu_read_lock_nesting = INT_MIN; | |
e3ebfb96 PM |
88 | #ifdef CONFIG_PROVE_RCU_DELAY |
89 | udelay(10); /* Make preemption more probable. */ | |
90 | #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ | |
2a3fa843 PM |
91 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
92 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
93 | rcu_read_unlock_special(t); | |
94 | barrier(); /* ->rcu_read_unlock_special load before assign */ | |
95 | t->rcu_read_lock_nesting = 0; | |
96 | } | |
97 | #ifdef CONFIG_PROVE_LOCKING | |
98 | { | |
99 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | |
100 | ||
101 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
102 | } | |
103 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
106 | ||
9dd8fb16 PM |
107 | /* |
108 | * Check for a task exiting while in a preemptible-RCU read-side | |
109 | * critical section, clean up if so. No need to issue warnings, | |
110 | * as debug_check_no_locks_held() already does this if lockdep | |
111 | * is enabled. | |
112 | */ | |
113 | void exit_rcu(void) | |
114 | { | |
115 | struct task_struct *t = current; | |
116 | ||
117 | if (likely(list_empty(¤t->rcu_node_entry))) | |
118 | return; | |
119 | t->rcu_read_lock_nesting = 1; | |
120 | barrier(); | |
121 | t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; | |
122 | __rcu_read_unlock(); | |
123 | } | |
124 | ||
125 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | |
126 | ||
127 | void exit_rcu(void) | |
128 | { | |
129 | } | |
130 | ||
131 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | |
132 | ||
162cc279 PM |
133 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
134 | static struct lock_class_key rcu_lock_key; | |
135 | struct lockdep_map rcu_lock_map = | |
136 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
137 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
632ee200 PM |
138 | |
139 | static struct lock_class_key rcu_bh_lock_key; | |
140 | struct lockdep_map rcu_bh_lock_map = | |
141 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); | |
142 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); | |
143 | ||
144 | static struct lock_class_key rcu_sched_lock_key; | |
145 | struct lockdep_map rcu_sched_lock_map = | |
146 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); | |
147 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |
162cc279 PM |
148 | #endif |
149 | ||
e3818b8d PM |
150 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
151 | ||
bc293d62 PM |
152 | int debug_lockdep_rcu_enabled(void) |
153 | { | |
154 | return rcu_scheduler_active && debug_locks && | |
155 | current->lockdep_recursion == 0; | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | |
158 | ||
e3818b8d | 159 | /** |
ca5ecddf | 160 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8d PM |
161 | * |
162 | * Check for bottom half being disabled, which covers both the | |
163 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | |
164 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | |
ca5ecddf PM |
165 | * will show the situation. This is useful for debug checks in functions |
166 | * that require that they be called within an RCU read-side critical | |
167 | * section. | |
e3818b8d PM |
168 | * |
169 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | |
c0d6d01b PM |
170 | * |
171 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | |
172 | * offline from an RCU perspective, so check for those as well. | |
e3818b8d PM |
173 | */ |
174 | int rcu_read_lock_bh_held(void) | |
175 | { | |
176 | if (!debug_lockdep_rcu_enabled()) | |
177 | return 1; | |
e6b80a3b FW |
178 | if (rcu_is_cpu_idle()) |
179 | return 0; | |
c0d6d01b PM |
180 | if (!rcu_lockdep_current_cpu_online()) |
181 | return 0; | |
773e3f93 | 182 | return in_softirq() || irqs_disabled(); |
e3818b8d PM |
183 | } |
184 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |
185 | ||
186 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
187 | ||
2c42818e PM |
188 | struct rcu_synchronize { |
189 | struct rcu_head head; | |
190 | struct completion completion; | |
191 | }; | |
192 | ||
fbf6bfca PM |
193 | /* |
194 | * Awaken the corresponding synchronize_rcu() instance now that a | |
195 | * grace period has elapsed. | |
196 | */ | |
2c42818e | 197 | static void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9e | 198 | { |
01c1c660 PM |
199 | struct rcu_synchronize *rcu; |
200 | ||
201 | rcu = container_of(head, struct rcu_synchronize, head); | |
202 | complete(&rcu->completion); | |
21a1ea9e | 203 | } |
ee84b824 | 204 | |
2c42818e PM |
205 | void wait_rcu_gp(call_rcu_func_t crf) |
206 | { | |
207 | struct rcu_synchronize rcu; | |
208 | ||
209 | init_rcu_head_on_stack(&rcu.head); | |
210 | init_completion(&rcu.completion); | |
211 | /* Will wake me after RCU finished. */ | |
212 | crf(&rcu.head, wakeme_after_rcu); | |
213 | /* Wait for it. */ | |
214 | wait_for_completion(&rcu.completion); | |
215 | destroy_rcu_head_on_stack(&rcu.head); | |
216 | } | |
217 | EXPORT_SYMBOL_GPL(wait_rcu_gp); | |
218 | ||
ee84b824 PM |
219 | #ifdef CONFIG_PROVE_RCU |
220 | /* | |
221 | * wrapper function to avoid #include problems. | |
222 | */ | |
223 | int rcu_my_thread_group_empty(void) | |
224 | { | |
225 | return thread_group_empty(current); | |
226 | } | |
227 | EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); | |
228 | #endif /* #ifdef CONFIG_PROVE_RCU */ | |
551d55a9 MD |
229 | |
230 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | |
231 | static inline void debug_init_rcu_head(struct rcu_head *head) | |
232 | { | |
233 | debug_object_init(head, &rcuhead_debug_descr); | |
234 | } | |
235 | ||
236 | static inline void debug_rcu_head_free(struct rcu_head *head) | |
237 | { | |
238 | debug_object_free(head, &rcuhead_debug_descr); | |
239 | } | |
240 | ||
241 | /* | |
242 | * fixup_init is called when: | |
243 | * - an active object is initialized | |
244 | */ | |
245 | static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) | |
246 | { | |
247 | struct rcu_head *head = addr; | |
248 | ||
249 | switch (state) { | |
250 | case ODEBUG_STATE_ACTIVE: | |
251 | /* | |
252 | * Ensure that queued callbacks are all executed. | |
253 | * If we detect that we are nested in a RCU read-side critical | |
254 | * section, we should simply fail, otherwise we would deadlock. | |
fc2ecf7e MD |
255 | * In !PREEMPT configurations, there is no way to tell if we are |
256 | * in a RCU read-side critical section or not, so we never | |
257 | * attempt any fixup and just print a warning. | |
551d55a9 | 258 | */ |
fc2ecf7e | 259 | #ifndef CONFIG_PREEMPT |
108aae22 | 260 | WARN_ON_ONCE(1); |
fc2ecf7e MD |
261 | return 0; |
262 | #endif | |
551d55a9 MD |
263 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
264 | irqs_disabled()) { | |
108aae22 | 265 | WARN_ON_ONCE(1); |
551d55a9 MD |
266 | return 0; |
267 | } | |
268 | rcu_barrier(); | |
269 | rcu_barrier_sched(); | |
270 | rcu_barrier_bh(); | |
271 | debug_object_init(head, &rcuhead_debug_descr); | |
272 | return 1; | |
273 | default: | |
274 | return 0; | |
275 | } | |
276 | } | |
277 | ||
278 | /* | |
279 | * fixup_activate is called when: | |
280 | * - an active object is activated | |
281 | * - an unknown object is activated (might be a statically initialized object) | |
282 | * Activation is performed internally by call_rcu(). | |
283 | */ | |
284 | static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) | |
285 | { | |
286 | struct rcu_head *head = addr; | |
287 | ||
288 | switch (state) { | |
289 | ||
290 | case ODEBUG_STATE_NOTAVAILABLE: | |
291 | /* | |
292 | * This is not really a fixup. We just make sure that it is | |
293 | * tracked in the object tracker. | |
294 | */ | |
295 | debug_object_init(head, &rcuhead_debug_descr); | |
296 | debug_object_activate(head, &rcuhead_debug_descr); | |
297 | return 0; | |
298 | ||
299 | case ODEBUG_STATE_ACTIVE: | |
300 | /* | |
301 | * Ensure that queued callbacks are all executed. | |
302 | * If we detect that we are nested in a RCU read-side critical | |
303 | * section, we should simply fail, otherwise we would deadlock. | |
fc2ecf7e MD |
304 | * In !PREEMPT configurations, there is no way to tell if we are |
305 | * in a RCU read-side critical section or not, so we never | |
306 | * attempt any fixup and just print a warning. | |
551d55a9 | 307 | */ |
fc2ecf7e | 308 | #ifndef CONFIG_PREEMPT |
108aae22 | 309 | WARN_ON_ONCE(1); |
fc2ecf7e MD |
310 | return 0; |
311 | #endif | |
551d55a9 MD |
312 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
313 | irqs_disabled()) { | |
108aae22 | 314 | WARN_ON_ONCE(1); |
551d55a9 MD |
315 | return 0; |
316 | } | |
317 | rcu_barrier(); | |
318 | rcu_barrier_sched(); | |
319 | rcu_barrier_bh(); | |
320 | debug_object_activate(head, &rcuhead_debug_descr); | |
321 | return 1; | |
322 | default: | |
323 | return 0; | |
324 | } | |
325 | } | |
326 | ||
327 | /* | |
328 | * fixup_free is called when: | |
329 | * - an active object is freed | |
330 | */ | |
331 | static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) | |
332 | { | |
333 | struct rcu_head *head = addr; | |
334 | ||
335 | switch (state) { | |
336 | case ODEBUG_STATE_ACTIVE: | |
337 | /* | |
338 | * Ensure that queued callbacks are all executed. | |
339 | * If we detect that we are nested in a RCU read-side critical | |
340 | * section, we should simply fail, otherwise we would deadlock. | |
fc2ecf7e MD |
341 | * In !PREEMPT configurations, there is no way to tell if we are |
342 | * in a RCU read-side critical section or not, so we never | |
343 | * attempt any fixup and just print a warning. | |
551d55a9 | 344 | */ |
fc2ecf7e | 345 | #ifndef CONFIG_PREEMPT |
108aae22 | 346 | WARN_ON_ONCE(1); |
fc2ecf7e MD |
347 | return 0; |
348 | #endif | |
551d55a9 MD |
349 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
350 | irqs_disabled()) { | |
108aae22 | 351 | WARN_ON_ONCE(1); |
551d55a9 MD |
352 | return 0; |
353 | } | |
354 | rcu_barrier(); | |
355 | rcu_barrier_sched(); | |
356 | rcu_barrier_bh(); | |
357 | debug_object_free(head, &rcuhead_debug_descr); | |
358 | return 1; | |
551d55a9 MD |
359 | default: |
360 | return 0; | |
361 | } | |
362 | } | |
363 | ||
364 | /** | |
365 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects | |
366 | * @head: pointer to rcu_head structure to be initialized | |
367 | * | |
368 | * This function informs debugobjects of a new rcu_head structure that | |
369 | * has been allocated as an auto variable on the stack. This function | |
370 | * is not required for rcu_head structures that are statically defined or | |
371 | * that are dynamically allocated on the heap. This function has no | |
372 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
373 | */ | |
374 | void init_rcu_head_on_stack(struct rcu_head *head) | |
375 | { | |
376 | debug_object_init_on_stack(head, &rcuhead_debug_descr); | |
377 | } | |
378 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); | |
379 | ||
380 | /** | |
381 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects | |
382 | * @head: pointer to rcu_head structure to be initialized | |
383 | * | |
384 | * This function informs debugobjects that an on-stack rcu_head structure | |
385 | * is about to go out of scope. As with init_rcu_head_on_stack(), this | |
386 | * function is not required for rcu_head structures that are statically | |
387 | * defined or that are dynamically allocated on the heap. Also as with | |
388 | * init_rcu_head_on_stack(), this function has no effect for | |
389 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
390 | */ | |
391 | void destroy_rcu_head_on_stack(struct rcu_head *head) | |
392 | { | |
393 | debug_object_free(head, &rcuhead_debug_descr); | |
394 | } | |
395 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); | |
396 | ||
397 | struct debug_obj_descr rcuhead_debug_descr = { | |
398 | .name = "rcu_head", | |
399 | .fixup_init = rcuhead_fixup_init, | |
400 | .fixup_activate = rcuhead_fixup_activate, | |
401 | .fixup_free = rcuhead_fixup_free, | |
402 | }; | |
403 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | |
404 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
91afaf30 PM |
405 | |
406 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) | |
52494535 PM |
407 | void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp, |
408 | unsigned long secs, | |
409 | unsigned long c_old, unsigned long c) | |
91afaf30 | 410 | { |
52494535 | 411 | trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
91afaf30 PM |
412 | } |
413 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); | |
414 | #else | |
52494535 PM |
415 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
416 | do { } while (0) | |
91afaf30 | 417 | #endif |
6bfc09e2 PM |
418 | |
419 | #ifdef CONFIG_RCU_STALL_COMMON | |
420 | ||
421 | #ifdef CONFIG_PROVE_RCU | |
422 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | |
423 | #else | |
424 | #define RCU_STALL_DELAY_DELTA 0 | |
425 | #endif | |
426 | ||
427 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | |
428 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; | |
429 | ||
430 | module_param(rcu_cpu_stall_suppress, int, 0644); | |
431 | module_param(rcu_cpu_stall_timeout, int, 0644); | |
432 | ||
433 | int rcu_jiffies_till_stall_check(void) | |
434 | { | |
435 | int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); | |
436 | ||
437 | /* | |
438 | * Limit check must be consistent with the Kconfig limits | |
439 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
440 | */ | |
441 | if (till_stall_check < 3) { | |
442 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; | |
443 | till_stall_check = 3; | |
444 | } else if (till_stall_check > 300) { | |
445 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; | |
446 | till_stall_check = 300; | |
447 | } | |
448 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
449 | } | |
450 | ||
451 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | |
452 | { | |
453 | rcu_cpu_stall_suppress = 1; | |
454 | return NOTIFY_DONE; | |
455 | } | |
456 | ||
457 | static struct notifier_block rcu_panic_block = { | |
458 | .notifier_call = rcu_panic, | |
459 | }; | |
460 | ||
461 | static int __init check_cpu_stall_init(void) | |
462 | { | |
463 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
464 | return 0; | |
465 | } | |
466 | early_initcall(check_cpu_stall_init); | |
467 | ||
468 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |