Merge branch 'pci/resource' into next
[deliverable/linux.git] / kernel / rcu / tiny.c
1 /*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/notifier.h>
28 #include <linux/rcupdate.h>
29 #include <linux/kernel.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/init.h>
35 #include <linux/time.h>
36 #include <linux/cpu.h>
37 #include <linux/prefetch.h>
38 #include <linux/ftrace_event.h>
39
40 #ifdef CONFIG_RCU_TRACE
41 #include <trace/events/rcu.h>
42 #endif /* #else #ifdef CONFIG_RCU_TRACE */
43
44 #include "rcu.h"
45
46 /* Forward declarations for tiny_plugin.h. */
47 struct rcu_ctrlblk;
48 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49 static void rcu_process_callbacks(struct softirq_action *unused);
50 static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
53
54 static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
55
56 #include "tiny_plugin.h"
57
58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59 static void rcu_idle_enter_common(long long newval)
60 {
61 if (newval) {
62 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
63 rcu_dynticks_nesting, newval));
64 rcu_dynticks_nesting = newval;
65 return;
66 }
67 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
68 rcu_dynticks_nesting, newval));
69 if (!is_idle_task(current)) {
70 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
71
72 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
73 rcu_dynticks_nesting, newval));
74 ftrace_dump(DUMP_ALL);
75 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
76 current->pid, current->comm,
77 idle->pid, idle->comm); /* must be idle task! */
78 }
79 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
80 barrier();
81 rcu_dynticks_nesting = newval;
82 }
83
84 /*
85 * Enter idle, which is an extended quiescent state if we have fully
86 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
87 */
88 void rcu_idle_enter(void)
89 {
90 unsigned long flags;
91 long long newval;
92
93 local_irq_save(flags);
94 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
95 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
96 DYNTICK_TASK_NEST_VALUE)
97 newval = 0;
98 else
99 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
100 rcu_idle_enter_common(newval);
101 local_irq_restore(flags);
102 }
103 EXPORT_SYMBOL_GPL(rcu_idle_enter);
104
105 /*
106 * Exit an interrupt handler towards idle.
107 */
108 void rcu_irq_exit(void)
109 {
110 unsigned long flags;
111 long long newval;
112
113 local_irq_save(flags);
114 newval = rcu_dynticks_nesting - 1;
115 WARN_ON_ONCE(newval < 0);
116 rcu_idle_enter_common(newval);
117 local_irq_restore(flags);
118 }
119 EXPORT_SYMBOL_GPL(rcu_irq_exit);
120
121 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
122 static void rcu_idle_exit_common(long long oldval)
123 {
124 if (oldval) {
125 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
126 oldval, rcu_dynticks_nesting));
127 return;
128 }
129 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
130 if (!is_idle_task(current)) {
131 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
132
133 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
134 oldval, rcu_dynticks_nesting));
135 ftrace_dump(DUMP_ALL);
136 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
137 current->pid, current->comm,
138 idle->pid, idle->comm); /* must be idle task! */
139 }
140 }
141
142 /*
143 * Exit idle, so that we are no longer in an extended quiescent state.
144 */
145 void rcu_idle_exit(void)
146 {
147 unsigned long flags;
148 long long oldval;
149
150 local_irq_save(flags);
151 oldval = rcu_dynticks_nesting;
152 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
153 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
154 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
155 else
156 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
157 rcu_idle_exit_common(oldval);
158 local_irq_restore(flags);
159 }
160 EXPORT_SYMBOL_GPL(rcu_idle_exit);
161
162 /*
163 * Enter an interrupt handler, moving away from idle.
164 */
165 void rcu_irq_enter(void)
166 {
167 unsigned long flags;
168 long long oldval;
169
170 local_irq_save(flags);
171 oldval = rcu_dynticks_nesting;
172 rcu_dynticks_nesting++;
173 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
174 rcu_idle_exit_common(oldval);
175 local_irq_restore(flags);
176 }
177 EXPORT_SYMBOL_GPL(rcu_irq_enter);
178
179 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
180
181 /*
182 * Test whether RCU thinks that the current CPU is idle.
183 */
184 bool notrace __rcu_is_watching(void)
185 {
186 return rcu_dynticks_nesting;
187 }
188 EXPORT_SYMBOL(__rcu_is_watching);
189
190 #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
191
192 /*
193 * Test whether the current CPU was interrupted from idle. Nested
194 * interrupts don't count, we must be running at the first interrupt
195 * level.
196 */
197 static int rcu_is_cpu_rrupt_from_idle(void)
198 {
199 return rcu_dynticks_nesting <= 1;
200 }
201
202 /*
203 * Helper function for rcu_sched_qs() and rcu_bh_qs().
204 * Also irqs are disabled to avoid confusion due to interrupt handlers
205 * invoking call_rcu().
206 */
207 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
208 {
209 RCU_TRACE(reset_cpu_stall_ticks(rcp));
210 if (rcp->rcucblist != NULL &&
211 rcp->donetail != rcp->curtail) {
212 rcp->donetail = rcp->curtail;
213 return 1;
214 }
215
216 return 0;
217 }
218
219 /*
220 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
221 * are at it, given that any rcu quiescent state is also an rcu_bh
222 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
223 */
224 void rcu_sched_qs(int cpu)
225 {
226 unsigned long flags;
227
228 local_irq_save(flags);
229 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
230 rcu_qsctr_help(&rcu_bh_ctrlblk))
231 raise_softirq(RCU_SOFTIRQ);
232 local_irq_restore(flags);
233 }
234
235 /*
236 * Record an rcu_bh quiescent state.
237 */
238 void rcu_bh_qs(int cpu)
239 {
240 unsigned long flags;
241
242 local_irq_save(flags);
243 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
244 raise_softirq(RCU_SOFTIRQ);
245 local_irq_restore(flags);
246 }
247
248 /*
249 * Check to see if the scheduling-clock interrupt came from an extended
250 * quiescent state, and, if so, tell RCU about it. This function must
251 * be called from hardirq context. It is normally called from the
252 * scheduling-clock interrupt.
253 */
254 void rcu_check_callbacks(int cpu, int user)
255 {
256 RCU_TRACE(check_cpu_stalls());
257 if (user || rcu_is_cpu_rrupt_from_idle())
258 rcu_sched_qs(cpu);
259 else if (!in_softirq())
260 rcu_bh_qs(cpu);
261 }
262
263 /*
264 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
265 * whose grace period has elapsed.
266 */
267 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
268 {
269 const char *rn = NULL;
270 struct rcu_head *next, *list;
271 unsigned long flags;
272 RCU_TRACE(int cb_count = 0);
273
274 /* If no RCU callbacks ready to invoke, just return. */
275 if (&rcp->rcucblist == rcp->donetail) {
276 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
277 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
278 !!ACCESS_ONCE(rcp->rcucblist),
279 need_resched(),
280 is_idle_task(current),
281 false));
282 return;
283 }
284
285 /* Move the ready-to-invoke callbacks to a local list. */
286 local_irq_save(flags);
287 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
288 list = rcp->rcucblist;
289 rcp->rcucblist = *rcp->donetail;
290 *rcp->donetail = NULL;
291 if (rcp->curtail == rcp->donetail)
292 rcp->curtail = &rcp->rcucblist;
293 rcp->donetail = &rcp->rcucblist;
294 local_irq_restore(flags);
295
296 /* Invoke the callbacks on the local list. */
297 RCU_TRACE(rn = rcp->name);
298 while (list) {
299 next = list->next;
300 prefetch(next);
301 debug_rcu_head_unqueue(list);
302 local_bh_disable();
303 __rcu_reclaim(rn, list);
304 local_bh_enable();
305 list = next;
306 RCU_TRACE(cb_count++);
307 }
308 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
309 RCU_TRACE(trace_rcu_batch_end(rcp->name,
310 cb_count, 0, need_resched(),
311 is_idle_task(current),
312 false));
313 }
314
315 static void rcu_process_callbacks(struct softirq_action *unused)
316 {
317 __rcu_process_callbacks(&rcu_sched_ctrlblk);
318 __rcu_process_callbacks(&rcu_bh_ctrlblk);
319 }
320
321 /*
322 * Wait for a grace period to elapse. But it is illegal to invoke
323 * synchronize_sched() from within an RCU read-side critical section.
324 * Therefore, any legal call to synchronize_sched() is a quiescent
325 * state, and so on a UP system, synchronize_sched() need do nothing.
326 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
327 * benefits of doing might_sleep() to reduce latency.)
328 *
329 * Cool, huh? (Due to Josh Triplett.)
330 *
331 * But we want to make this a static inline later. The cond_resched()
332 * currently makes this problematic.
333 */
334 void synchronize_sched(void)
335 {
336 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
337 !lock_is_held(&rcu_lock_map) &&
338 !lock_is_held(&rcu_sched_lock_map),
339 "Illegal synchronize_sched() in RCU read-side critical section");
340 cond_resched();
341 }
342 EXPORT_SYMBOL_GPL(synchronize_sched);
343
344 /*
345 * Helper function for call_rcu() and call_rcu_bh().
346 */
347 static void __call_rcu(struct rcu_head *head,
348 void (*func)(struct rcu_head *rcu),
349 struct rcu_ctrlblk *rcp)
350 {
351 unsigned long flags;
352
353 debug_rcu_head_queue(head);
354 head->func = func;
355 head->next = NULL;
356
357 local_irq_save(flags);
358 *rcp->curtail = head;
359 rcp->curtail = &head->next;
360 RCU_TRACE(rcp->qlen++);
361 local_irq_restore(flags);
362 }
363
364 /*
365 * Post an RCU callback to be invoked after the end of an RCU-sched grace
366 * period. But since we have but one CPU, that would be after any
367 * quiescent state.
368 */
369 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
370 {
371 __call_rcu(head, func, &rcu_sched_ctrlblk);
372 }
373 EXPORT_SYMBOL_GPL(call_rcu_sched);
374
375 /*
376 * Post an RCU bottom-half callback to be invoked after any subsequent
377 * quiescent state.
378 */
379 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
380 {
381 __call_rcu(head, func, &rcu_bh_ctrlblk);
382 }
383 EXPORT_SYMBOL_GPL(call_rcu_bh);
384
385 void rcu_init(void)
386 {
387 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
388 }
This page took 0.039052 seconds and 5 git commands to generate.