Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
6cc68793 | 4 | * or preemptible semantics. |
f41d911f PM |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
f41d911f | 28 | |
5b61b0ba MG |
29 | #define RCU_KTHREAD_PRIO 1 |
30 | ||
31 | #ifdef CONFIG_RCU_BOOST | |
32 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO | |
33 | #else | |
34 | #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO | |
35 | #endif | |
36 | ||
26845c28 PM |
37 | /* |
38 | * Check the RCU kernel configuration parameters and print informative | |
39 | * messages about anything out of the ordinary. If you like #ifdef, you | |
40 | * will love this function. | |
41 | */ | |
42 | static void __init rcu_bootup_announce_oddness(void) | |
43 | { | |
44 | #ifdef CONFIG_RCU_TRACE | |
45 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | |
46 | #endif | |
47 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | |
48 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | |
49 | CONFIG_RCU_FANOUT); | |
50 | #endif | |
51 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
52 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | |
53 | #endif | |
54 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
55 | printk(KERN_INFO | |
56 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | |
57 | #endif | |
58 | #ifdef CONFIG_PROVE_RCU | |
59 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | |
60 | #endif | |
61 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | |
62 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | |
63 | #endif | |
81a294c4 | 64 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
a858af28 PM |
65 | printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n"); |
66 | #endif | |
67 | #if defined(CONFIG_RCU_CPU_STALL_INFO) | |
68 | printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); | |
26845c28 PM |
69 | #endif |
70 | #if NUM_RCU_LVL_4 != 0 | |
71 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | |
72 | #endif | |
73 | } | |
74 | ||
f41d911f PM |
75 | #ifdef CONFIG_TREE_PREEMPT_RCU |
76 | ||
e99033c5 | 77 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); |
f41d911f | 78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
27f4d280 | 79 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
f41d911f | 80 | |
10f39bb1 | 81 | static void rcu_read_unlock_special(struct task_struct *t); |
d9a3da06 PM |
82 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
83 | ||
f41d911f PM |
84 | /* |
85 | * Tell them what RCU they are running. | |
86 | */ | |
0e0fc1c2 | 87 | static void __init rcu_bootup_announce(void) |
f41d911f | 88 | { |
6cc68793 | 89 | printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n"); |
26845c28 | 90 | rcu_bootup_announce_oddness(); |
f41d911f PM |
91 | } |
92 | ||
93 | /* | |
94 | * Return the number of RCU-preempt batches processed thus far | |
95 | * for debug and statistics. | |
96 | */ | |
97 | long rcu_batches_completed_preempt(void) | |
98 | { | |
99 | return rcu_preempt_state.completed; | |
100 | } | |
101 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
102 | ||
103 | /* | |
104 | * Return the number of RCU batches processed thus far for debug & stats. | |
105 | */ | |
106 | long rcu_batches_completed(void) | |
107 | { | |
108 | return rcu_batches_completed_preempt(); | |
109 | } | |
110 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
111 | ||
bf66f18e PM |
112 | /* |
113 | * Force a quiescent state for preemptible RCU. | |
114 | */ | |
115 | void rcu_force_quiescent_state(void) | |
116 | { | |
117 | force_quiescent_state(&rcu_preempt_state, 0); | |
118 | } | |
119 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
120 | ||
f41d911f | 121 | /* |
6cc68793 | 122 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
f41d911f PM |
123 | * that this just means that the task currently running on the CPU is |
124 | * not in a quiescent state. There might be any number of tasks blocked | |
125 | * while in an RCU read-side critical section. | |
25502a6c PM |
126 | * |
127 | * Unlike the other rcu_*_qs() functions, callers to this function | |
128 | * must disable irqs in order to protect the assignment to | |
129 | * ->rcu_read_unlock_special. | |
f41d911f | 130 | */ |
c3422bea | 131 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
132 | { |
133 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
25502a6c | 134 | |
e4cc1f22 | 135 | rdp->passed_quiesce_gpnum = rdp->gpnum; |
c3422bea | 136 | barrier(); |
e4cc1f22 | 137 | if (rdp->passed_quiesce == 0) |
d4c08f2a | 138 | trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); |
e4cc1f22 | 139 | rdp->passed_quiesce = 1; |
25502a6c | 140 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
141 | } |
142 | ||
143 | /* | |
c3422bea PM |
144 | * We have entered the scheduler, and the current task might soon be |
145 | * context-switched away from. If this task is in an RCU read-side | |
146 | * critical section, we will no longer be able to rely on the CPU to | |
12f5f524 PM |
147 | * record that fact, so we enqueue the task on the blkd_tasks list. |
148 | * The task will dequeue itself when it exits the outermost enclosing | |
149 | * RCU read-side critical section. Therefore, the current grace period | |
150 | * cannot be permitted to complete until the blkd_tasks list entries | |
151 | * predating the current grace period drain, in other words, until | |
152 | * rnp->gp_tasks becomes NULL. | |
c3422bea PM |
153 | * |
154 | * Caller must disable preemption. | |
f41d911f | 155 | */ |
616c310e | 156 | void rcu_preempt_note_context_switch(void) |
f41d911f PM |
157 | { |
158 | struct task_struct *t = current; | |
c3422bea | 159 | unsigned long flags; |
f41d911f PM |
160 | struct rcu_data *rdp; |
161 | struct rcu_node *rnp; | |
162 | ||
10f39bb1 | 163 | if (t->rcu_read_lock_nesting > 0 && |
f41d911f PM |
164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
165 | ||
166 | /* Possibly blocking in an RCU read-side critical section. */ | |
616c310e | 167 | rdp = __this_cpu_ptr(rcu_preempt_state.rda); |
f41d911f | 168 | rnp = rdp->mynode; |
1304afb2 | 169 | raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 171 | t->rcu_blocked_node = rnp; |
f41d911f PM |
172 | |
173 | /* | |
174 | * If this CPU has already checked in, then this task | |
175 | * will hold up the next grace period rather than the | |
176 | * current grace period. Queue the task accordingly. | |
177 | * If the task is queued for the current grace period | |
178 | * (i.e., this CPU has not yet passed through a quiescent | |
179 | * state for the current grace period), then as long | |
180 | * as that task remains queued, the current grace period | |
12f5f524 PM |
181 | * cannot end. Note that there is some uncertainty as |
182 | * to exactly when the current grace period started. | |
183 | * We take a conservative approach, which can result | |
184 | * in unnecessarily waiting on tasks that started very | |
185 | * slightly after the current grace period began. C'est | |
186 | * la vie!!! | |
b0e165c0 PM |
187 | * |
188 | * But first, note that the current CPU must still be | |
189 | * on line! | |
f41d911f | 190 | */ |
b0e165c0 | 191 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e | 192 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
12f5f524 PM |
193 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
194 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | |
195 | rnp->gp_tasks = &t->rcu_node_entry; | |
27f4d280 PM |
196 | #ifdef CONFIG_RCU_BOOST |
197 | if (rnp->boost_tasks != NULL) | |
198 | rnp->boost_tasks = rnp->gp_tasks; | |
199 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 PM |
200 | } else { |
201 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | |
202 | if (rnp->qsmask & rdp->grpmask) | |
203 | rnp->gp_tasks = &t->rcu_node_entry; | |
204 | } | |
d4c08f2a PM |
205 | trace_rcu_preempt_task(rdp->rsp->name, |
206 | t->pid, | |
207 | (rnp->qsmask & rdp->grpmask) | |
208 | ? rnp->gpnum | |
209 | : rnp->gpnum + 1); | |
1304afb2 | 210 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
10f39bb1 PM |
211 | } else if (t->rcu_read_lock_nesting < 0 && |
212 | t->rcu_read_unlock_special) { | |
213 | ||
214 | /* | |
215 | * Complete exit from RCU read-side critical section on | |
216 | * behalf of preempted instance of __rcu_read_unlock(). | |
217 | */ | |
218 | rcu_read_unlock_special(t); | |
f41d911f PM |
219 | } |
220 | ||
221 | /* | |
222 | * Either we were not in an RCU read-side critical section to | |
223 | * begin with, or we have now recorded that critical section | |
224 | * globally. Either way, we can now note a quiescent state | |
225 | * for this CPU. Again, if we were in an RCU read-side critical | |
226 | * section, and if that critical section was blocking the current | |
227 | * grace period, then the fact that the task has been enqueued | |
228 | * means that we continue to block the current grace period. | |
229 | */ | |
e7d8842e | 230 | local_irq_save(flags); |
616c310e | 231 | rcu_preempt_qs(smp_processor_id()); |
e7d8842e | 232 | local_irq_restore(flags); |
f41d911f PM |
233 | } |
234 | ||
235 | /* | |
6cc68793 | 236 | * Tree-preemptible RCU implementation for rcu_read_lock(). |
f41d911f PM |
237 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
238 | * if we block. | |
239 | */ | |
240 | void __rcu_read_lock(void) | |
241 | { | |
80dcf60e | 242 | current->rcu_read_lock_nesting++; |
f41d911f PM |
243 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
244 | } | |
245 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
246 | ||
fc2219d4 PM |
247 | /* |
248 | * Check for preempted RCU readers blocking the current grace period | |
249 | * for the specified rcu_node structure. If the caller needs a reliable | |
250 | * answer, it must hold the rcu_node's ->lock. | |
251 | */ | |
27f4d280 | 252 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 | 253 | { |
12f5f524 | 254 | return rnp->gp_tasks != NULL; |
fc2219d4 PM |
255 | } |
256 | ||
b668c9cf PM |
257 | /* |
258 | * Record a quiescent state for all tasks that were previously queued | |
259 | * on the specified rcu_node structure and that were blocking the current | |
260 | * RCU grace period. The caller must hold the specified rnp->lock with | |
261 | * irqs disabled, and this lock is released upon return, but irqs remain | |
262 | * disabled. | |
263 | */ | |
d3f6bad3 | 264 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
265 | __releases(rnp->lock) |
266 | { | |
267 | unsigned long mask; | |
268 | struct rcu_node *rnp_p; | |
269 | ||
27f4d280 | 270 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
1304afb2 | 271 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
272 | return; /* Still need more quiescent states! */ |
273 | } | |
274 | ||
275 | rnp_p = rnp->parent; | |
276 | if (rnp_p == NULL) { | |
277 | /* | |
278 | * Either there is only one rcu_node in the tree, | |
279 | * or tasks were kicked up to root rcu_node due to | |
280 | * CPUs going offline. | |
281 | */ | |
d3f6bad3 | 282 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
283 | return; |
284 | } | |
285 | ||
286 | /* Report up the rest of the hierarchy. */ | |
287 | mask = rnp->grpmask; | |
1304afb2 PM |
288 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
289 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 290 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
291 | } |
292 | ||
12f5f524 PM |
293 | /* |
294 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | |
295 | * returning NULL if at the end of the list. | |
296 | */ | |
297 | static struct list_head *rcu_next_node_entry(struct task_struct *t, | |
298 | struct rcu_node *rnp) | |
299 | { | |
300 | struct list_head *np; | |
301 | ||
302 | np = t->rcu_node_entry.next; | |
303 | if (np == &rnp->blkd_tasks) | |
304 | np = NULL; | |
305 | return np; | |
306 | } | |
307 | ||
b668c9cf PM |
308 | /* |
309 | * Handle special cases during rcu_read_unlock(), such as needing to | |
310 | * notify RCU core processing or task having blocked during the RCU | |
311 | * read-side critical section. | |
312 | */ | |
be0e1e21 | 313 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
f41d911f PM |
314 | { |
315 | int empty; | |
d9a3da06 | 316 | int empty_exp; |
389abd48 | 317 | int empty_exp_now; |
f41d911f | 318 | unsigned long flags; |
12f5f524 | 319 | struct list_head *np; |
82e78d80 PM |
320 | #ifdef CONFIG_RCU_BOOST |
321 | struct rt_mutex *rbmp = NULL; | |
322 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
f41d911f PM |
323 | struct rcu_node *rnp; |
324 | int special; | |
325 | ||
326 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
327 | if (in_nmi()) | |
328 | return; | |
329 | ||
330 | local_irq_save(flags); | |
331 | ||
332 | /* | |
333 | * If RCU core is waiting for this CPU to exit critical section, | |
334 | * let it know that we have done so. | |
335 | */ | |
336 | special = t->rcu_read_unlock_special; | |
337 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
c3422bea | 338 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
339 | } |
340 | ||
341 | /* Hardware IRQ handlers cannot block. */ | |
ec433f0c | 342 | if (in_irq() || in_serving_softirq()) { |
f41d911f PM |
343 | local_irq_restore(flags); |
344 | return; | |
345 | } | |
346 | ||
347 | /* Clean up if blocked during RCU read-side critical section. */ | |
348 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
349 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
350 | ||
dd5d19ba PM |
351 | /* |
352 | * Remove this task from the list it blocked on. The | |
353 | * task can migrate while we acquire the lock, but at | |
354 | * most one time. So at most two passes through loop. | |
355 | */ | |
356 | for (;;) { | |
86848966 | 357 | rnp = t->rcu_blocked_node; |
1304afb2 | 358 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 359 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 360 | break; |
1304afb2 | 361 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 362 | } |
27f4d280 | 363 | empty = !rcu_preempt_blocked_readers_cgp(rnp); |
d9a3da06 PM |
364 | empty_exp = !rcu_preempted_readers_exp(rnp); |
365 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
12f5f524 | 366 | np = rcu_next_node_entry(t, rnp); |
f41d911f | 367 | list_del_init(&t->rcu_node_entry); |
82e78d80 | 368 | t->rcu_blocked_node = NULL; |
d4c08f2a PM |
369 | trace_rcu_unlock_preempted_task("rcu_preempt", |
370 | rnp->gpnum, t->pid); | |
12f5f524 PM |
371 | if (&t->rcu_node_entry == rnp->gp_tasks) |
372 | rnp->gp_tasks = np; | |
373 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
374 | rnp->exp_tasks = np; | |
27f4d280 PM |
375 | #ifdef CONFIG_RCU_BOOST |
376 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
377 | rnp->boost_tasks = np; | |
82e78d80 PM |
378 | /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */ |
379 | if (t->rcu_boost_mutex) { | |
380 | rbmp = t->rcu_boost_mutex; | |
381 | t->rcu_boost_mutex = NULL; | |
7765be2f | 382 | } |
27f4d280 | 383 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
f41d911f PM |
384 | |
385 | /* | |
386 | * If this was the last task on the current list, and if | |
387 | * we aren't waiting on any CPUs, report the quiescent state. | |
389abd48 PM |
388 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, |
389 | * so we must take a snapshot of the expedited state. | |
f41d911f | 390 | */ |
389abd48 | 391 | empty_exp_now = !rcu_preempted_readers_exp(rnp); |
d4c08f2a PM |
392 | if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { |
393 | trace_rcu_quiescent_state_report("preempt_rcu", | |
394 | rnp->gpnum, | |
395 | 0, rnp->qsmask, | |
396 | rnp->level, | |
397 | rnp->grplo, | |
398 | rnp->grphi, | |
399 | !!rnp->gp_tasks); | |
d3f6bad3 | 400 | rcu_report_unblock_qs_rnp(rnp, flags); |
d4c08f2a PM |
401 | } else |
402 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
d9a3da06 | 403 | |
27f4d280 PM |
404 | #ifdef CONFIG_RCU_BOOST |
405 | /* Unboost if we were boosted. */ | |
82e78d80 PM |
406 | if (rbmp) |
407 | rt_mutex_unlock(rbmp); | |
27f4d280 PM |
408 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
409 | ||
d9a3da06 PM |
410 | /* |
411 | * If this was the last task on the expedited lists, | |
412 | * then we need to report up the rcu_node hierarchy. | |
413 | */ | |
389abd48 | 414 | if (!empty_exp && empty_exp_now) |
b40d293e | 415 | rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); |
b668c9cf PM |
416 | } else { |
417 | local_irq_restore(flags); | |
f41d911f | 418 | } |
f41d911f PM |
419 | } |
420 | ||
421 | /* | |
6cc68793 | 422 | * Tree-preemptible RCU implementation for rcu_read_unlock(). |
f41d911f PM |
423 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
424 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
425 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
426 | * in an RCU read-side critical section and other special cases. | |
427 | */ | |
428 | void __rcu_read_unlock(void) | |
429 | { | |
430 | struct task_struct *t = current; | |
431 | ||
10f39bb1 PM |
432 | if (t->rcu_read_lock_nesting != 1) |
433 | --t->rcu_read_lock_nesting; | |
434 | else { | |
6206ab9b | 435 | barrier(); /* critical section before exit code. */ |
10f39bb1 PM |
436 | t->rcu_read_lock_nesting = INT_MIN; |
437 | barrier(); /* assign before ->rcu_read_unlock_special load */ | |
be0e1e21 PM |
438 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
439 | rcu_read_unlock_special(t); | |
10f39bb1 PM |
440 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
441 | t->rcu_read_lock_nesting = 0; | |
be0e1e21 | 442 | } |
cba8244a | 443 | #ifdef CONFIG_PROVE_LOCKING |
10f39bb1 PM |
444 | { |
445 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | |
446 | ||
447 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
448 | } | |
cba8244a | 449 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
f41d911f PM |
450 | } |
451 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
452 | ||
1ed509a2 PM |
453 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
454 | ||
455 | /* | |
456 | * Dump detailed information for all tasks blocking the current RCU | |
457 | * grace period on the specified rcu_node structure. | |
458 | */ | |
459 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
460 | { | |
461 | unsigned long flags; | |
1ed509a2 PM |
462 | struct task_struct *t; |
463 | ||
27f4d280 | 464 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
12f5f524 PM |
465 | return; |
466 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
467 | t = list_entry(rnp->gp_tasks, | |
468 | struct task_struct, rcu_node_entry); | |
469 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
470 | sched_show_task(t); | |
471 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1ed509a2 PM |
472 | } |
473 | ||
474 | /* | |
475 | * Dump detailed information for all tasks blocking the current RCU | |
476 | * grace period. | |
477 | */ | |
478 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
479 | { | |
480 | struct rcu_node *rnp = rcu_get_root(rsp); | |
481 | ||
482 | rcu_print_detail_task_stall_rnp(rnp); | |
483 | rcu_for_each_leaf_node(rsp, rnp) | |
484 | rcu_print_detail_task_stall_rnp(rnp); | |
485 | } | |
486 | ||
487 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
488 | ||
489 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
490 | { | |
491 | } | |
492 | ||
493 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
494 | ||
a858af28 PM |
495 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
496 | ||
497 | static void rcu_print_task_stall_begin(struct rcu_node *rnp) | |
498 | { | |
499 | printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", | |
500 | rnp->level, rnp->grplo, rnp->grphi); | |
501 | } | |
502 | ||
503 | static void rcu_print_task_stall_end(void) | |
504 | { | |
505 | printk(KERN_CONT "\n"); | |
506 | } | |
507 | ||
508 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ | |
509 | ||
510 | static void rcu_print_task_stall_begin(struct rcu_node *rnp) | |
511 | { | |
512 | } | |
513 | ||
514 | static void rcu_print_task_stall_end(void) | |
515 | { | |
516 | } | |
517 | ||
518 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ | |
519 | ||
f41d911f PM |
520 | /* |
521 | * Scan the current list of tasks blocked within RCU read-side critical | |
522 | * sections, printing out the tid of each. | |
523 | */ | |
9bc8b558 | 524 | static int rcu_print_task_stall(struct rcu_node *rnp) |
f41d911f | 525 | { |
f41d911f | 526 | struct task_struct *t; |
9bc8b558 | 527 | int ndetected = 0; |
f41d911f | 528 | |
27f4d280 | 529 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
9bc8b558 | 530 | return 0; |
a858af28 | 531 | rcu_print_task_stall_begin(rnp); |
12f5f524 PM |
532 | t = list_entry(rnp->gp_tasks, |
533 | struct task_struct, rcu_node_entry); | |
9bc8b558 | 534 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
a858af28 | 535 | printk(KERN_CONT " P%d", t->pid); |
9bc8b558 PM |
536 | ndetected++; |
537 | } | |
a858af28 | 538 | rcu_print_task_stall_end(); |
9bc8b558 | 539 | return ndetected; |
f41d911f PM |
540 | } |
541 | ||
53d84e00 PM |
542 | /* |
543 | * Suppress preemptible RCU's CPU stall warnings by pushing the | |
544 | * time of the next stall-warning message comfortably far into the | |
545 | * future. | |
546 | */ | |
547 | static void rcu_preempt_stall_reset(void) | |
548 | { | |
549 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | |
550 | } | |
551 | ||
b0e165c0 PM |
552 | /* |
553 | * Check that the list of blocked tasks for the newly completed grace | |
554 | * period is in fact empty. It is a serious bug to complete a grace | |
555 | * period that still has RCU readers blocked! This function must be | |
556 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
557 | * must be held by the caller. | |
12f5f524 PM |
558 | * |
559 | * Also, if there are blocked tasks on the list, they automatically | |
560 | * block the newly created grace period, so set up ->gp_tasks accordingly. | |
b0e165c0 PM |
561 | */ |
562 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
563 | { | |
27f4d280 | 564 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
12f5f524 PM |
565 | if (!list_empty(&rnp->blkd_tasks)) |
566 | rnp->gp_tasks = rnp->blkd_tasks.next; | |
28ecd580 | 567 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
568 | } |
569 | ||
33f76148 PM |
570 | #ifdef CONFIG_HOTPLUG_CPU |
571 | ||
dd5d19ba PM |
572 | /* |
573 | * Handle tasklist migration for case in which all CPUs covered by the | |
574 | * specified rcu_node have gone offline. Move them up to the root | |
575 | * rcu_node. The reason for not just moving them to the immediate | |
576 | * parent is to remove the need for rcu_read_unlock_special() to | |
577 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
578 | * Returns true if there were tasks blocking the current RCU grace |
579 | * period. | |
dd5d19ba | 580 | * |
237c80c5 PM |
581 | * Returns 1 if there was previously a task blocking the current grace |
582 | * period on the specified rcu_node structure. | |
583 | * | |
dd5d19ba PM |
584 | * The caller must hold rnp->lock with irqs disabled. |
585 | */ | |
237c80c5 PM |
586 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
587 | struct rcu_node *rnp, | |
588 | struct rcu_data *rdp) | |
dd5d19ba | 589 | { |
dd5d19ba PM |
590 | struct list_head *lp; |
591 | struct list_head *lp_root; | |
d9a3da06 | 592 | int retval = 0; |
dd5d19ba | 593 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
12f5f524 | 594 | struct task_struct *t; |
dd5d19ba | 595 | |
86848966 PM |
596 | if (rnp == rnp_root) { |
597 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 598 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 599 | } |
12f5f524 PM |
600 | |
601 | /* If we are on an internal node, complain bitterly. */ | |
602 | WARN_ON_ONCE(rnp != rdp->mynode); | |
dd5d19ba PM |
603 | |
604 | /* | |
12f5f524 PM |
605 | * Move tasks up to root rcu_node. Don't try to get fancy for |
606 | * this corner-case operation -- just put this node's tasks | |
607 | * at the head of the root node's list, and update the root node's | |
608 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, | |
609 | * if non-NULL. This might result in waiting for more tasks than | |
610 | * absolutely necessary, but this is a good performance/complexity | |
611 | * tradeoff. | |
dd5d19ba | 612 | */ |
2036d94a | 613 | if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) |
d9a3da06 PM |
614 | retval |= RCU_OFL_TASKS_NORM_GP; |
615 | if (rcu_preempted_readers_exp(rnp)) | |
616 | retval |= RCU_OFL_TASKS_EXP_GP; | |
12f5f524 PM |
617 | lp = &rnp->blkd_tasks; |
618 | lp_root = &rnp_root->blkd_tasks; | |
619 | while (!list_empty(lp)) { | |
620 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | |
621 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
622 | list_del(&t->rcu_node_entry); | |
623 | t->rcu_blocked_node = rnp_root; | |
624 | list_add(&t->rcu_node_entry, lp_root); | |
625 | if (&t->rcu_node_entry == rnp->gp_tasks) | |
626 | rnp_root->gp_tasks = rnp->gp_tasks; | |
627 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
628 | rnp_root->exp_tasks = rnp->exp_tasks; | |
27f4d280 PM |
629 | #ifdef CONFIG_RCU_BOOST |
630 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
631 | rnp_root->boost_tasks = rnp->boost_tasks; | |
632 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 | 633 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ |
dd5d19ba | 634 | } |
27f4d280 PM |
635 | |
636 | #ifdef CONFIG_RCU_BOOST | |
637 | /* In case root is being boosted and leaf is not. */ | |
638 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
639 | if (rnp_root->boost_tasks != NULL && | |
640 | rnp_root->boost_tasks != rnp_root->gp_tasks) | |
641 | rnp_root->boost_tasks = rnp_root->gp_tasks; | |
642 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | |
643 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
644 | ||
12f5f524 PM |
645 | rnp->gp_tasks = NULL; |
646 | rnp->exp_tasks = NULL; | |
237c80c5 | 647 | return retval; |
dd5d19ba PM |
648 | } |
649 | ||
e5601400 PM |
650 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
651 | ||
33f76148 | 652 | /* |
6cc68793 | 653 | * Do CPU-offline processing for preemptible RCU. |
33f76148 | 654 | */ |
e5601400 | 655 | static void rcu_preempt_cleanup_dead_cpu(int cpu) |
33f76148 | 656 | { |
e5601400 | 657 | rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state); |
33f76148 PM |
658 | } |
659 | ||
f41d911f PM |
660 | /* |
661 | * Check for a quiescent state from the current CPU. When a task blocks, | |
662 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
663 | * which is checked elsewhere. | |
664 | * | |
665 | * Caller must disable hard irqs. | |
666 | */ | |
667 | static void rcu_preempt_check_callbacks(int cpu) | |
668 | { | |
669 | struct task_struct *t = current; | |
670 | ||
671 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea | 672 | rcu_preempt_qs(cpu); |
f41d911f PM |
673 | return; |
674 | } | |
10f39bb1 PM |
675 | if (t->rcu_read_lock_nesting > 0 && |
676 | per_cpu(rcu_preempt_data, cpu).qs_pending) | |
c3422bea | 677 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
678 | } |
679 | ||
680 | /* | |
6cc68793 | 681 | * Process callbacks for preemptible RCU. |
f41d911f PM |
682 | */ |
683 | static void rcu_preempt_process_callbacks(void) | |
684 | { | |
685 | __rcu_process_callbacks(&rcu_preempt_state, | |
686 | &__get_cpu_var(rcu_preempt_data)); | |
687 | } | |
688 | ||
a46e0899 PM |
689 | #ifdef CONFIG_RCU_BOOST |
690 | ||
09223371 SL |
691 | static void rcu_preempt_do_callbacks(void) |
692 | { | |
693 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | |
694 | } | |
695 | ||
a46e0899 PM |
696 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
697 | ||
f41d911f | 698 | /* |
6cc68793 | 699 | * Queue a preemptible-RCU callback for invocation after a grace period. |
f41d911f PM |
700 | */ |
701 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
702 | { | |
486e2593 | 703 | __call_rcu(head, func, &rcu_preempt_state, 0); |
f41d911f PM |
704 | } |
705 | EXPORT_SYMBOL_GPL(call_rcu); | |
706 | ||
486e2593 PM |
707 | /* |
708 | * Queue an RCU callback for lazy invocation after a grace period. | |
709 | * This will likely be later named something like "call_rcu_lazy()", | |
710 | * but this change will require some way of tagging the lazy RCU | |
711 | * callbacks in the list of pending callbacks. Until then, this | |
712 | * function may only be called from __kfree_rcu(). | |
713 | */ | |
714 | void kfree_call_rcu(struct rcu_head *head, | |
715 | void (*func)(struct rcu_head *rcu)) | |
716 | { | |
717 | __call_rcu(head, func, &rcu_preempt_state, 1); | |
718 | } | |
719 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | |
720 | ||
6ebb237b PM |
721 | /** |
722 | * synchronize_rcu - wait until a grace period has elapsed. | |
723 | * | |
724 | * Control will return to the caller some time after a full grace | |
725 | * period has elapsed, in other words after all currently executing RCU | |
77d8485a PM |
726 | * read-side critical sections have completed. Note, however, that |
727 | * upon return from synchronize_rcu(), the caller might well be executing | |
728 | * concurrently with new RCU read-side critical sections that began while | |
729 | * synchronize_rcu() was waiting. RCU read-side critical sections are | |
730 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | |
6ebb237b PM |
731 | */ |
732 | void synchronize_rcu(void) | |
733 | { | |
fe15d706 PM |
734 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
735 | !lock_is_held(&rcu_lock_map) && | |
736 | !lock_is_held(&rcu_sched_lock_map), | |
737 | "Illegal synchronize_rcu() in RCU read-side critical section"); | |
6ebb237b PM |
738 | if (!rcu_scheduler_active) |
739 | return; | |
2c42818e | 740 | wait_rcu_gp(call_rcu); |
6ebb237b PM |
741 | } |
742 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
743 | ||
d9a3da06 PM |
744 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
745 | static long sync_rcu_preempt_exp_count; | |
746 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
747 | ||
748 | /* | |
749 | * Return non-zero if there are any tasks in RCU read-side critical | |
750 | * sections blocking the current preemptible-RCU expedited grace period. | |
751 | * If there is no preemptible-RCU expedited grace period currently in | |
752 | * progress, returns zero unconditionally. | |
753 | */ | |
754 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
755 | { | |
12f5f524 | 756 | return rnp->exp_tasks != NULL; |
d9a3da06 PM |
757 | } |
758 | ||
759 | /* | |
760 | * return non-zero if there is no RCU expedited grace period in progress | |
761 | * for the specified rcu_node structure, in other words, if all CPUs and | |
762 | * tasks covered by the specified rcu_node structure have done their bit | |
763 | * for the current expedited grace period. Works only for preemptible | |
764 | * RCU -- other RCU implementation use other means. | |
765 | * | |
766 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
767 | */ | |
768 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
769 | { | |
770 | return !rcu_preempted_readers_exp(rnp) && | |
771 | ACCESS_ONCE(rnp->expmask) == 0; | |
772 | } | |
773 | ||
774 | /* | |
775 | * Report the exit from RCU read-side critical section for the last task | |
776 | * that queued itself during or before the current expedited preemptible-RCU | |
777 | * grace period. This event is reported either to the rcu_node structure on | |
778 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
779 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
780 | * iteratively!) | |
781 | * | |
b40d293e TG |
782 | * Most callers will set the "wake" flag, but the task initiating the |
783 | * expedited grace period need not wake itself. | |
784 | * | |
d9a3da06 PM |
785 | * Caller must hold sync_rcu_preempt_exp_mutex. |
786 | */ | |
b40d293e TG |
787 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
788 | bool wake) | |
d9a3da06 PM |
789 | { |
790 | unsigned long flags; | |
791 | unsigned long mask; | |
792 | ||
1304afb2 | 793 | raw_spin_lock_irqsave(&rnp->lock, flags); |
d9a3da06 | 794 | for (;;) { |
131906b0 PM |
795 | if (!sync_rcu_preempt_exp_done(rnp)) { |
796 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
d9a3da06 | 797 | break; |
131906b0 | 798 | } |
d9a3da06 | 799 | if (rnp->parent == NULL) { |
131906b0 | 800 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b40d293e TG |
801 | if (wake) |
802 | wake_up(&sync_rcu_preempt_exp_wq); | |
d9a3da06 PM |
803 | break; |
804 | } | |
805 | mask = rnp->grpmask; | |
1304afb2 | 806 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 | 807 | rnp = rnp->parent; |
1304afb2 | 808 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
809 | rnp->expmask &= ~mask; |
810 | } | |
d9a3da06 PM |
811 | } |
812 | ||
813 | /* | |
814 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
815 | * grace period for the specified rcu_node structure. If there are no such | |
816 | * tasks, report it up the rcu_node hierarchy. | |
817 | * | |
818 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
819 | */ | |
820 | static void | |
821 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
822 | { | |
1217ed1b | 823 | unsigned long flags; |
12f5f524 | 824 | int must_wait = 0; |
d9a3da06 | 825 | |
1217ed1b PM |
826 | raw_spin_lock_irqsave(&rnp->lock, flags); |
827 | if (list_empty(&rnp->blkd_tasks)) | |
828 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
829 | else { | |
12f5f524 | 830 | rnp->exp_tasks = rnp->blkd_tasks.next; |
1217ed1b | 831 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
12f5f524 PM |
832 | must_wait = 1; |
833 | } | |
d9a3da06 | 834 | if (!must_wait) |
b40d293e | 835 | rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ |
d9a3da06 PM |
836 | } |
837 | ||
236fefaf PM |
838 | /** |
839 | * synchronize_rcu_expedited - Brute-force RCU grace period | |
840 | * | |
841 | * Wait for an RCU-preempt grace period, but expedite it. The basic | |
842 | * idea is to invoke synchronize_sched_expedited() to push all the tasks to | |
843 | * the ->blkd_tasks lists and wait for this list to drain. This consumes | |
844 | * significant time on all CPUs and is unfriendly to real-time workloads, | |
845 | * so is thus not recommended for any sort of common-case code. | |
846 | * In fact, if you are using synchronize_rcu_expedited() in a loop, | |
847 | * please restructure your code to batch your updates, and then Use a | |
848 | * single synchronize_rcu() instead. | |
849 | * | |
850 | * Note that it is illegal to call this function while holding any lock | |
851 | * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal | |
852 | * to call this function from a CPU-hotplug notifier. Failing to observe | |
853 | * these restriction will result in deadlock. | |
019129d5 PM |
854 | */ |
855 | void synchronize_rcu_expedited(void) | |
856 | { | |
d9a3da06 PM |
857 | unsigned long flags; |
858 | struct rcu_node *rnp; | |
859 | struct rcu_state *rsp = &rcu_preempt_state; | |
860 | long snap; | |
861 | int trycount = 0; | |
862 | ||
863 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
864 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
865 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
866 | ||
867 | /* | |
868 | * Acquire lock, falling back to synchronize_rcu() if too many | |
869 | * lock-acquisition failures. Of course, if someone does the | |
870 | * expedited grace period for us, just leave. | |
871 | */ | |
872 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
873 | if (trycount++ < 10) | |
874 | udelay(trycount * num_online_cpus()); | |
875 | else { | |
876 | synchronize_rcu(); | |
877 | return; | |
878 | } | |
879 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
880 | goto mb_ret; /* Others did our work for us. */ | |
881 | } | |
882 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
883 | goto unlock_mb_ret; /* Others did our work for us. */ | |
884 | ||
12f5f524 | 885 | /* force all RCU readers onto ->blkd_tasks lists. */ |
d9a3da06 PM |
886 | synchronize_sched_expedited(); |
887 | ||
1304afb2 | 888 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
d9a3da06 PM |
889 | |
890 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
891 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
1304afb2 | 892 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
d9a3da06 | 893 | rnp->expmask = rnp->qsmaskinit; |
1304afb2 | 894 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d9a3da06 PM |
895 | } |
896 | ||
12f5f524 | 897 | /* Snapshot current state of ->blkd_tasks lists. */ |
d9a3da06 PM |
898 | rcu_for_each_leaf_node(rsp, rnp) |
899 | sync_rcu_preempt_exp_init(rsp, rnp); | |
900 | if (NUM_RCU_NODES > 1) | |
901 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
902 | ||
1304afb2 | 903 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
d9a3da06 | 904 | |
12f5f524 | 905 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
d9a3da06 PM |
906 | rnp = rcu_get_root(rsp); |
907 | wait_event(sync_rcu_preempt_exp_wq, | |
908 | sync_rcu_preempt_exp_done(rnp)); | |
909 | ||
910 | /* Clean up and exit. */ | |
911 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
912 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
913 | unlock_mb_ret: | |
914 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
915 | mb_ret: | |
916 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
917 | } |
918 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
919 | ||
f41d911f | 920 | /* |
6cc68793 | 921 | * Check to see if there is any immediate preemptible-RCU-related work |
f41d911f PM |
922 | * to be done. |
923 | */ | |
924 | static int rcu_preempt_pending(int cpu) | |
925 | { | |
926 | return __rcu_pending(&rcu_preempt_state, | |
927 | &per_cpu(rcu_preempt_data, cpu)); | |
928 | } | |
929 | ||
930 | /* | |
30fbcc90 | 931 | * Does preemptible RCU have callbacks on this CPU? |
f41d911f | 932 | */ |
30fbcc90 | 933 | static int rcu_preempt_cpu_has_callbacks(int cpu) |
f41d911f PM |
934 | { |
935 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
936 | } | |
937 | ||
e74f4c45 PM |
938 | /** |
939 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
940 | */ | |
941 | void rcu_barrier(void) | |
942 | { | |
943 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
944 | } | |
945 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
946 | ||
f41d911f | 947 | /* |
6cc68793 | 948 | * Initialize preemptible RCU's per-CPU data. |
f41d911f PM |
949 | */ |
950 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
951 | { | |
952 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
953 | } | |
954 | ||
e74f4c45 | 955 | /* |
e5601400 PM |
956 | * Move preemptible RCU's callbacks from dying CPU to other online CPU |
957 | * and record a quiescent state. | |
e74f4c45 | 958 | */ |
e5601400 | 959 | static void rcu_preempt_cleanup_dying_cpu(void) |
e74f4c45 | 960 | { |
e5601400 | 961 | rcu_cleanup_dying_cpu(&rcu_preempt_state); |
e74f4c45 PM |
962 | } |
963 | ||
1eba8f84 | 964 | /* |
6cc68793 | 965 | * Initialize preemptible RCU's state structures. |
1eba8f84 PM |
966 | */ |
967 | static void __init __rcu_init_preempt(void) | |
968 | { | |
394f99a9 | 969 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
1eba8f84 PM |
970 | } |
971 | ||
f41d911f PM |
972 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
973 | ||
27f4d280 PM |
974 | static struct rcu_state *rcu_state = &rcu_sched_state; |
975 | ||
f41d911f PM |
976 | /* |
977 | * Tell them what RCU they are running. | |
978 | */ | |
0e0fc1c2 | 979 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
980 | { |
981 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
26845c28 | 982 | rcu_bootup_announce_oddness(); |
f41d911f PM |
983 | } |
984 | ||
985 | /* | |
986 | * Return the number of RCU batches processed thus far for debug & stats. | |
987 | */ | |
988 | long rcu_batches_completed(void) | |
989 | { | |
990 | return rcu_batches_completed_sched(); | |
991 | } | |
992 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
993 | ||
bf66f18e PM |
994 | /* |
995 | * Force a quiescent state for RCU, which, because there is no preemptible | |
996 | * RCU, becomes the same as rcu-sched. | |
997 | */ | |
998 | void rcu_force_quiescent_state(void) | |
999 | { | |
1000 | rcu_sched_force_quiescent_state(); | |
1001 | } | |
1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
1003 | ||
fc2219d4 | 1004 | /* |
6cc68793 | 1005 | * Because preemptible RCU does not exist, there are never any preempted |
fc2219d4 PM |
1006 | * RCU readers. |
1007 | */ | |
27f4d280 | 1008 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 PM |
1009 | { |
1010 | return 0; | |
1011 | } | |
1012 | ||
b668c9cf PM |
1013 | #ifdef CONFIG_HOTPLUG_CPU |
1014 | ||
1015 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 1016 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf | 1017 | { |
1304afb2 | 1018 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
1019 | } |
1020 | ||
1021 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1022 | ||
1ed509a2 | 1023 | /* |
6cc68793 | 1024 | * Because preemptible RCU does not exist, we never have to check for |
1ed509a2 PM |
1025 | * tasks blocked within RCU read-side critical sections. |
1026 | */ | |
1027 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
1028 | { | |
1029 | } | |
1030 | ||
f41d911f | 1031 | /* |
6cc68793 | 1032 | * Because preemptible RCU does not exist, we never have to check for |
f41d911f PM |
1033 | * tasks blocked within RCU read-side critical sections. |
1034 | */ | |
9bc8b558 | 1035 | static int rcu_print_task_stall(struct rcu_node *rnp) |
f41d911f | 1036 | { |
9bc8b558 | 1037 | return 0; |
f41d911f PM |
1038 | } |
1039 | ||
53d84e00 PM |
1040 | /* |
1041 | * Because preemptible RCU does not exist, there is no need to suppress | |
1042 | * its CPU stall warnings. | |
1043 | */ | |
1044 | static void rcu_preempt_stall_reset(void) | |
1045 | { | |
1046 | } | |
1047 | ||
b0e165c0 | 1048 | /* |
6cc68793 | 1049 | * Because there is no preemptible RCU, there can be no readers blocked, |
49e29126 PM |
1050 | * so there is no need to check for blocked tasks. So check only for |
1051 | * bogus qsmask values. | |
b0e165c0 PM |
1052 | */ |
1053 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
1054 | { | |
49e29126 | 1055 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
1056 | } |
1057 | ||
33f76148 PM |
1058 | #ifdef CONFIG_HOTPLUG_CPU |
1059 | ||
dd5d19ba | 1060 | /* |
6cc68793 | 1061 | * Because preemptible RCU does not exist, it never needs to migrate |
237c80c5 PM |
1062 | * tasks that were blocked within RCU read-side critical sections, and |
1063 | * such non-existent tasks cannot possibly have been blocking the current | |
1064 | * grace period. | |
dd5d19ba | 1065 | */ |
237c80c5 PM |
1066 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
1067 | struct rcu_node *rnp, | |
1068 | struct rcu_data *rdp) | |
dd5d19ba | 1069 | { |
237c80c5 | 1070 | return 0; |
dd5d19ba PM |
1071 | } |
1072 | ||
e5601400 PM |
1073 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
1074 | ||
33f76148 | 1075 | /* |
6cc68793 | 1076 | * Because preemptible RCU does not exist, it never needs CPU-offline |
33f76148 PM |
1077 | * processing. |
1078 | */ | |
e5601400 | 1079 | static void rcu_preempt_cleanup_dead_cpu(int cpu) |
33f76148 PM |
1080 | { |
1081 | } | |
1082 | ||
f41d911f | 1083 | /* |
6cc68793 | 1084 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1085 | * to check. |
1086 | */ | |
1eba8f84 | 1087 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
1088 | { |
1089 | } | |
1090 | ||
1091 | /* | |
6cc68793 | 1092 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1093 | * to process. |
1094 | */ | |
1eba8f84 | 1095 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
1096 | { |
1097 | } | |
1098 | ||
486e2593 PM |
1099 | /* |
1100 | * Queue an RCU callback for lazy invocation after a grace period. | |
1101 | * This will likely be later named something like "call_rcu_lazy()", | |
1102 | * but this change will require some way of tagging the lazy RCU | |
1103 | * callbacks in the list of pending callbacks. Until then, this | |
1104 | * function may only be called from __kfree_rcu(). | |
1105 | * | |
1106 | * Because there is no preemptible RCU, we use RCU-sched instead. | |
1107 | */ | |
1108 | void kfree_call_rcu(struct rcu_head *head, | |
1109 | void (*func)(struct rcu_head *rcu)) | |
1110 | { | |
1111 | __call_rcu(head, func, &rcu_sched_state, 1); | |
1112 | } | |
1113 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | |
1114 | ||
019129d5 PM |
1115 | /* |
1116 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
6cc68793 | 1117 | * But because preemptible RCU does not exist, map to rcu-sched. |
019129d5 PM |
1118 | */ |
1119 | void synchronize_rcu_expedited(void) | |
1120 | { | |
1121 | synchronize_sched_expedited(); | |
1122 | } | |
1123 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
1124 | ||
d9a3da06 PM |
1125 | #ifdef CONFIG_HOTPLUG_CPU |
1126 | ||
1127 | /* | |
6cc68793 | 1128 | * Because preemptible RCU does not exist, there is never any need to |
d9a3da06 PM |
1129 | * report on tasks preempted in RCU read-side critical sections during |
1130 | * expedited RCU grace periods. | |
1131 | */ | |
b40d293e TG |
1132 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
1133 | bool wake) | |
d9a3da06 | 1134 | { |
d9a3da06 PM |
1135 | } |
1136 | ||
1137 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1138 | ||
f41d911f | 1139 | /* |
6cc68793 | 1140 | * Because preemptible RCU does not exist, it never has any work to do. |
f41d911f PM |
1141 | */ |
1142 | static int rcu_preempt_pending(int cpu) | |
1143 | { | |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | /* | |
30fbcc90 | 1148 | * Because preemptible RCU does not exist, it never has callbacks |
f41d911f | 1149 | */ |
30fbcc90 | 1150 | static int rcu_preempt_cpu_has_callbacks(int cpu) |
f41d911f PM |
1151 | { |
1152 | return 0; | |
1153 | } | |
1154 | ||
e74f4c45 | 1155 | /* |
6cc68793 | 1156 | * Because preemptible RCU does not exist, rcu_barrier() is just |
e74f4c45 PM |
1157 | * another name for rcu_barrier_sched(). |
1158 | */ | |
1159 | void rcu_barrier(void) | |
1160 | { | |
1161 | rcu_barrier_sched(); | |
1162 | } | |
1163 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
1164 | ||
f41d911f | 1165 | /* |
6cc68793 | 1166 | * Because preemptible RCU does not exist, there is no per-CPU |
f41d911f PM |
1167 | * data to initialize. |
1168 | */ | |
1169 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
1170 | { | |
1171 | } | |
1172 | ||
e74f4c45 | 1173 | /* |
e5601400 | 1174 | * Because there is no preemptible RCU, there is no cleanup to do. |
e74f4c45 | 1175 | */ |
e5601400 | 1176 | static void rcu_preempt_cleanup_dying_cpu(void) |
e74f4c45 PM |
1177 | { |
1178 | } | |
1179 | ||
1eba8f84 | 1180 | /* |
6cc68793 | 1181 | * Because preemptible RCU does not exist, it need not be initialized. |
1eba8f84 PM |
1182 | */ |
1183 | static void __init __rcu_init_preempt(void) | |
1184 | { | |
1185 | } | |
1186 | ||
f41d911f | 1187 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
8bd93a2c | 1188 | |
27f4d280 PM |
1189 | #ifdef CONFIG_RCU_BOOST |
1190 | ||
1191 | #include "rtmutex_common.h" | |
1192 | ||
0ea1f2eb PM |
1193 | #ifdef CONFIG_RCU_TRACE |
1194 | ||
1195 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1196 | { | |
1197 | if (list_empty(&rnp->blkd_tasks)) | |
1198 | rnp->n_balk_blkd_tasks++; | |
1199 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) | |
1200 | rnp->n_balk_exp_gp_tasks++; | |
1201 | else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) | |
1202 | rnp->n_balk_boost_tasks++; | |
1203 | else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) | |
1204 | rnp->n_balk_notblocked++; | |
1205 | else if (rnp->gp_tasks != NULL && | |
a9f4793d | 1206 | ULONG_CMP_LT(jiffies, rnp->boost_time)) |
0ea1f2eb PM |
1207 | rnp->n_balk_notyet++; |
1208 | else | |
1209 | rnp->n_balk_nos++; | |
1210 | } | |
1211 | ||
1212 | #else /* #ifdef CONFIG_RCU_TRACE */ | |
1213 | ||
1214 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1215 | { | |
1216 | } | |
1217 | ||
1218 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | |
1219 | ||
27f4d280 PM |
1220 | /* |
1221 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | |
1222 | * or ->boost_tasks, advancing the pointer to the next task in the | |
1223 | * ->blkd_tasks list. | |
1224 | * | |
1225 | * Note that irqs must be enabled: boosting the task can block. | |
1226 | * Returns 1 if there are more tasks needing to be boosted. | |
1227 | */ | |
1228 | static int rcu_boost(struct rcu_node *rnp) | |
1229 | { | |
1230 | unsigned long flags; | |
1231 | struct rt_mutex mtx; | |
1232 | struct task_struct *t; | |
1233 | struct list_head *tb; | |
1234 | ||
1235 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) | |
1236 | return 0; /* Nothing left to boost. */ | |
1237 | ||
1238 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1239 | ||
1240 | /* | |
1241 | * Recheck under the lock: all tasks in need of boosting | |
1242 | * might exit their RCU read-side critical sections on their own. | |
1243 | */ | |
1244 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { | |
1245 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | /* | |
1250 | * Preferentially boost tasks blocking expedited grace periods. | |
1251 | * This cannot starve the normal grace periods because a second | |
1252 | * expedited grace period must boost all blocked tasks, including | |
1253 | * those blocking the pre-existing normal grace period. | |
1254 | */ | |
0ea1f2eb | 1255 | if (rnp->exp_tasks != NULL) { |
27f4d280 | 1256 | tb = rnp->exp_tasks; |
0ea1f2eb PM |
1257 | rnp->n_exp_boosts++; |
1258 | } else { | |
27f4d280 | 1259 | tb = rnp->boost_tasks; |
0ea1f2eb PM |
1260 | rnp->n_normal_boosts++; |
1261 | } | |
1262 | rnp->n_tasks_boosted++; | |
27f4d280 PM |
1263 | |
1264 | /* | |
1265 | * We boost task t by manufacturing an rt_mutex that appears to | |
1266 | * be held by task t. We leave a pointer to that rt_mutex where | |
1267 | * task t can find it, and task t will release the mutex when it | |
1268 | * exits its outermost RCU read-side critical section. Then | |
1269 | * simply acquiring this artificial rt_mutex will boost task | |
1270 | * t's priority. (Thanks to tglx for suggesting this approach!) | |
1271 | * | |
1272 | * Note that task t must acquire rnp->lock to remove itself from | |
1273 | * the ->blkd_tasks list, which it will do from exit() if from | |
1274 | * nowhere else. We therefore are guaranteed that task t will | |
1275 | * stay around at least until we drop rnp->lock. Note that | |
1276 | * rnp->lock also resolves races between our priority boosting | |
1277 | * and task t's exiting its outermost RCU read-side critical | |
1278 | * section. | |
1279 | */ | |
1280 | t = container_of(tb, struct task_struct, rcu_node_entry); | |
1281 | rt_mutex_init_proxy_locked(&mtx, t); | |
1282 | t->rcu_boost_mutex = &mtx; | |
27f4d280 PM |
1283 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1284 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | |
1285 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | |
1286 | ||
4f89b336 PM |
1287 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || |
1288 | ACCESS_ONCE(rnp->boost_tasks) != NULL; | |
27f4d280 PM |
1289 | } |
1290 | ||
1291 | /* | |
1292 | * Timer handler to initiate waking up of boost kthreads that | |
1293 | * have yielded the CPU due to excessive numbers of tasks to | |
1294 | * boost. We wake up the per-rcu_node kthread, which in turn | |
1295 | * will wake up the booster kthread. | |
1296 | */ | |
1297 | static void rcu_boost_kthread_timer(unsigned long arg) | |
1298 | { | |
1217ed1b | 1299 | invoke_rcu_node_kthread((struct rcu_node *)arg); |
27f4d280 PM |
1300 | } |
1301 | ||
1302 | /* | |
1303 | * Priority-boosting kthread. One per leaf rcu_node and one for the | |
1304 | * root rcu_node. | |
1305 | */ | |
1306 | static int rcu_boost_kthread(void *arg) | |
1307 | { | |
1308 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1309 | int spincnt = 0; | |
1310 | int more2boost; | |
1311 | ||
385680a9 | 1312 | trace_rcu_utilization("Start boost kthread@init"); |
27f4d280 | 1313 | for (;;) { |
d71df90e | 1314 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
385680a9 | 1315 | trace_rcu_utilization("End boost kthread@rcu_wait"); |
08bca60a | 1316 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); |
385680a9 | 1317 | trace_rcu_utilization("Start boost kthread@rcu_wait"); |
d71df90e | 1318 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
27f4d280 PM |
1319 | more2boost = rcu_boost(rnp); |
1320 | if (more2boost) | |
1321 | spincnt++; | |
1322 | else | |
1323 | spincnt = 0; | |
1324 | if (spincnt > 10) { | |
385680a9 | 1325 | trace_rcu_utilization("End boost kthread@rcu_yield"); |
27f4d280 | 1326 | rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); |
385680a9 | 1327 | trace_rcu_utilization("Start boost kthread@rcu_yield"); |
27f4d280 PM |
1328 | spincnt = 0; |
1329 | } | |
1330 | } | |
1217ed1b | 1331 | /* NOTREACHED */ |
385680a9 | 1332 | trace_rcu_utilization("End boost kthread@notreached"); |
27f4d280 PM |
1333 | return 0; |
1334 | } | |
1335 | ||
1336 | /* | |
1337 | * Check to see if it is time to start boosting RCU readers that are | |
1338 | * blocking the current grace period, and, if so, tell the per-rcu_node | |
1339 | * kthread to start boosting them. If there is an expedited grace | |
1340 | * period in progress, it is always time to boost. | |
1341 | * | |
1217ed1b PM |
1342 | * The caller must hold rnp->lock, which this function releases, |
1343 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | |
1344 | * so we don't need to worry about it going away. | |
27f4d280 | 1345 | */ |
1217ed1b | 1346 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 PM |
1347 | { |
1348 | struct task_struct *t; | |
1349 | ||
0ea1f2eb PM |
1350 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1351 | rnp->n_balk_exp_gp_tasks++; | |
1217ed1b | 1352 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 | 1353 | return; |
0ea1f2eb | 1354 | } |
27f4d280 PM |
1355 | if (rnp->exp_tasks != NULL || |
1356 | (rnp->gp_tasks != NULL && | |
1357 | rnp->boost_tasks == NULL && | |
1358 | rnp->qsmask == 0 && | |
1359 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | |
1360 | if (rnp->exp_tasks == NULL) | |
1361 | rnp->boost_tasks = rnp->gp_tasks; | |
1217ed1b | 1362 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1363 | t = rnp->boost_kthread_task; |
1364 | if (t != NULL) | |
1365 | wake_up_process(t); | |
1217ed1b | 1366 | } else { |
0ea1f2eb | 1367 | rcu_initiate_boost_trace(rnp); |
1217ed1b PM |
1368 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1369 | } | |
27f4d280 PM |
1370 | } |
1371 | ||
a46e0899 PM |
1372 | /* |
1373 | * Wake up the per-CPU kthread to invoke RCU callbacks. | |
1374 | */ | |
1375 | static void invoke_rcu_callbacks_kthread(void) | |
1376 | { | |
1377 | unsigned long flags; | |
1378 | ||
1379 | local_irq_save(flags); | |
1380 | __this_cpu_write(rcu_cpu_has_work, 1); | |
1eb52121 SL |
1381 | if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && |
1382 | current != __this_cpu_read(rcu_cpu_kthread_task)) | |
1383 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | |
a46e0899 PM |
1384 | local_irq_restore(flags); |
1385 | } | |
1386 | ||
dff1672d PM |
1387 | /* |
1388 | * Is the current CPU running the RCU-callbacks kthread? | |
1389 | * Caller must have preemption disabled. | |
1390 | */ | |
1391 | static bool rcu_is_callbacks_kthread(void) | |
1392 | { | |
1393 | return __get_cpu_var(rcu_cpu_kthread_task) == current; | |
1394 | } | |
1395 | ||
0f962a5e PM |
1396 | /* |
1397 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | |
1398 | * held, so no one should be messing with the existence of the boost | |
1399 | * kthread. | |
1400 | */ | |
27f4d280 PM |
1401 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
1402 | cpumask_var_t cm) | |
1403 | { | |
27f4d280 PM |
1404 | struct task_struct *t; |
1405 | ||
27f4d280 PM |
1406 | t = rnp->boost_kthread_task; |
1407 | if (t != NULL) | |
1408 | set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); | |
27f4d280 PM |
1409 | } |
1410 | ||
1411 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | |
1412 | ||
1413 | /* | |
1414 | * Do priority-boost accounting for the start of a new grace period. | |
1415 | */ | |
1416 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |
1417 | { | |
1418 | rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | |
1419 | } | |
1420 | ||
27f4d280 PM |
1421 | /* |
1422 | * Create an RCU-boost kthread for the specified node if one does not | |
1423 | * already exist. We only create this kthread for preemptible RCU. | |
1424 | * Returns zero if all is well, a negated errno otherwise. | |
1425 | */ | |
1426 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |
1427 | struct rcu_node *rnp, | |
1428 | int rnp_index) | |
1429 | { | |
1430 | unsigned long flags; | |
1431 | struct sched_param sp; | |
1432 | struct task_struct *t; | |
1433 | ||
1434 | if (&rcu_preempt_state != rsp) | |
1435 | return 0; | |
a46e0899 | 1436 | rsp->boost = 1; |
27f4d280 PM |
1437 | if (rnp->boost_kthread_task != NULL) |
1438 | return 0; | |
1439 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | |
5b61b0ba | 1440 | "rcub/%d", rnp_index); |
27f4d280 PM |
1441 | if (IS_ERR(t)) |
1442 | return PTR_ERR(t); | |
1443 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1444 | rnp->boost_kthread_task = t; | |
1445 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
5b61b0ba | 1446 | sp.sched_priority = RCU_BOOST_PRIO; |
27f4d280 | 1447 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
9a432736 | 1448 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ |
27f4d280 PM |
1449 | return 0; |
1450 | } | |
1451 | ||
f8b7fc6b PM |
1452 | #ifdef CONFIG_HOTPLUG_CPU |
1453 | ||
1454 | /* | |
1455 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | |
1456 | */ | |
1457 | static void rcu_stop_cpu_kthread(int cpu) | |
1458 | { | |
1459 | struct task_struct *t; | |
1460 | ||
1461 | /* Stop the CPU's kthread. */ | |
1462 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1463 | if (t != NULL) { | |
1464 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | |
1465 | kthread_stop(t); | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1470 | ||
1471 | static void rcu_kthread_do_work(void) | |
1472 | { | |
1473 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | |
1474 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | |
1475 | rcu_preempt_do_callbacks(); | |
1476 | } | |
1477 | ||
1478 | /* | |
1479 | * Wake up the specified per-rcu_node-structure kthread. | |
1480 | * Because the per-rcu_node kthreads are immortal, we don't need | |
1481 | * to do anything to keep them alive. | |
1482 | */ | |
1483 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | |
1484 | { | |
1485 | struct task_struct *t; | |
1486 | ||
1487 | t = rnp->node_kthread_task; | |
1488 | if (t != NULL) | |
1489 | wake_up_process(t); | |
1490 | } | |
1491 | ||
1492 | /* | |
1493 | * Set the specified CPU's kthread to run RT or not, as specified by | |
1494 | * the to_rt argument. The CPU-hotplug locks are held, so the task | |
1495 | * is not going away. | |
1496 | */ | |
1497 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1498 | { | |
1499 | int policy; | |
1500 | struct sched_param sp; | |
1501 | struct task_struct *t; | |
1502 | ||
1503 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1504 | if (t == NULL) | |
1505 | return; | |
1506 | if (to_rt) { | |
1507 | policy = SCHED_FIFO; | |
1508 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1509 | } else { | |
1510 | policy = SCHED_NORMAL; | |
1511 | sp.sched_priority = 0; | |
1512 | } | |
1513 | sched_setscheduler_nocheck(t, policy, &sp); | |
1514 | } | |
1515 | ||
1516 | /* | |
1517 | * Timer handler to initiate the waking up of per-CPU kthreads that | |
1518 | * have yielded the CPU due to excess numbers of RCU callbacks. | |
1519 | * We wake up the per-rcu_node kthread, which in turn will wake up | |
1520 | * the booster kthread. | |
1521 | */ | |
1522 | static void rcu_cpu_kthread_timer(unsigned long arg) | |
1523 | { | |
1524 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | |
1525 | struct rcu_node *rnp = rdp->mynode; | |
1526 | ||
1527 | atomic_or(rdp->grpmask, &rnp->wakemask); | |
1528 | invoke_rcu_node_kthread(rnp); | |
1529 | } | |
1530 | ||
1531 | /* | |
1532 | * Drop to non-real-time priority and yield, but only after posting a | |
1533 | * timer that will cause us to regain our real-time priority if we | |
1534 | * remain preempted. Either way, we restore our real-time priority | |
1535 | * before returning. | |
1536 | */ | |
1537 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | |
1538 | { | |
1539 | struct sched_param sp; | |
1540 | struct timer_list yield_timer; | |
5b61b0ba | 1541 | int prio = current->rt_priority; |
f8b7fc6b PM |
1542 | |
1543 | setup_timer_on_stack(&yield_timer, f, arg); | |
1544 | mod_timer(&yield_timer, jiffies + 2); | |
1545 | sp.sched_priority = 0; | |
1546 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | |
1547 | set_user_nice(current, 19); | |
1548 | schedule(); | |
5b61b0ba MG |
1549 | set_user_nice(current, 0); |
1550 | sp.sched_priority = prio; | |
f8b7fc6b PM |
1551 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
1552 | del_timer(&yield_timer); | |
1553 | } | |
1554 | ||
1555 | /* | |
1556 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | |
1557 | * This can happen while the corresponding CPU is either coming online | |
1558 | * or going offline. We cannot wait until the CPU is fully online | |
1559 | * before starting the kthread, because the various notifier functions | |
1560 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | |
1561 | * the corresponding CPU is online. | |
1562 | * | |
1563 | * Return 1 if the kthread needs to stop, 0 otherwise. | |
1564 | * | |
1565 | * Caller must disable bh. This function can momentarily enable it. | |
1566 | */ | |
1567 | static int rcu_cpu_kthread_should_stop(int cpu) | |
1568 | { | |
1569 | while (cpu_is_offline(cpu) || | |
1570 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | |
1571 | smp_processor_id() != cpu) { | |
1572 | if (kthread_should_stop()) | |
1573 | return 1; | |
1574 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | |
1575 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | |
1576 | local_bh_enable(); | |
1577 | schedule_timeout_uninterruptible(1); | |
1578 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | |
1579 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | |
1580 | local_bh_disable(); | |
1581 | } | |
1582 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1583 | return 0; | |
1584 | } | |
1585 | ||
1586 | /* | |
1587 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | |
e0f23060 PM |
1588 | * RCU softirq used in flavors and configurations of RCU that do not |
1589 | * support RCU priority boosting. | |
f8b7fc6b PM |
1590 | */ |
1591 | static int rcu_cpu_kthread(void *arg) | |
1592 | { | |
1593 | int cpu = (int)(long)arg; | |
1594 | unsigned long flags; | |
1595 | int spincnt = 0; | |
1596 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | |
1597 | char work; | |
1598 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | |
1599 | ||
385680a9 | 1600 | trace_rcu_utilization("Start CPU kthread@init"); |
f8b7fc6b PM |
1601 | for (;;) { |
1602 | *statusp = RCU_KTHREAD_WAITING; | |
385680a9 | 1603 | trace_rcu_utilization("End CPU kthread@rcu_wait"); |
f8b7fc6b | 1604 | rcu_wait(*workp != 0 || kthread_should_stop()); |
385680a9 | 1605 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); |
f8b7fc6b PM |
1606 | local_bh_disable(); |
1607 | if (rcu_cpu_kthread_should_stop(cpu)) { | |
1608 | local_bh_enable(); | |
1609 | break; | |
1610 | } | |
1611 | *statusp = RCU_KTHREAD_RUNNING; | |
1612 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | |
1613 | local_irq_save(flags); | |
1614 | work = *workp; | |
1615 | *workp = 0; | |
1616 | local_irq_restore(flags); | |
1617 | if (work) | |
1618 | rcu_kthread_do_work(); | |
1619 | local_bh_enable(); | |
1620 | if (*workp != 0) | |
1621 | spincnt++; | |
1622 | else | |
1623 | spincnt = 0; | |
1624 | if (spincnt > 10) { | |
1625 | *statusp = RCU_KTHREAD_YIELDING; | |
385680a9 | 1626 | trace_rcu_utilization("End CPU kthread@rcu_yield"); |
f8b7fc6b | 1627 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); |
385680a9 | 1628 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); |
f8b7fc6b PM |
1629 | spincnt = 0; |
1630 | } | |
1631 | } | |
1632 | *statusp = RCU_KTHREAD_STOPPED; | |
385680a9 | 1633 | trace_rcu_utilization("End CPU kthread@term"); |
f8b7fc6b PM |
1634 | return 0; |
1635 | } | |
1636 | ||
1637 | /* | |
1638 | * Spawn a per-CPU kthread, setting up affinity and priority. | |
1639 | * Because the CPU hotplug lock is held, no other CPU will be attempting | |
1640 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | |
1641 | * attempting to access it during boot, but the locking in kthread_bind() | |
1642 | * will enforce sufficient ordering. | |
1643 | * | |
1644 | * Please note that we cannot simply refuse to wake up the per-CPU | |
1645 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | |
1646 | * which can result in softlockup complaints if the task ends up being | |
1647 | * idle for more than a couple of minutes. | |
1648 | * | |
1649 | * However, please note also that we cannot bind the per-CPU kthread to its | |
1650 | * CPU until that CPU is fully online. We also cannot wait until the | |
1651 | * CPU is fully online before we create its per-CPU kthread, as this would | |
1652 | * deadlock the system when CPU notifiers tried waiting for grace | |
1653 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | |
1654 | * is online. If its CPU is not yet fully online, then the code in | |
1655 | * rcu_cpu_kthread() will wait until it is fully online, and then do | |
1656 | * the binding. | |
1657 | */ | |
1658 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |
1659 | { | |
1660 | struct sched_param sp; | |
1661 | struct task_struct *t; | |
1662 | ||
b0d30417 | 1663 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1664 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) |
1665 | return 0; | |
1f288094 ED |
1666 | t = kthread_create_on_node(rcu_cpu_kthread, |
1667 | (void *)(long)cpu, | |
1668 | cpu_to_node(cpu), | |
5b61b0ba | 1669 | "rcuc/%d", cpu); |
f8b7fc6b PM |
1670 | if (IS_ERR(t)) |
1671 | return PTR_ERR(t); | |
1672 | if (cpu_online(cpu)) | |
1673 | kthread_bind(t, cpu); | |
1674 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1675 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | |
1676 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1677 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1678 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | |
1679 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | |
1680 | return 0; | |
1681 | } | |
1682 | ||
1683 | /* | |
1684 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | |
1685 | * kthreads when needed. We ignore requests to wake up kthreads | |
1686 | * for offline CPUs, which is OK because force_quiescent_state() | |
1687 | * takes care of this case. | |
1688 | */ | |
1689 | static int rcu_node_kthread(void *arg) | |
1690 | { | |
1691 | int cpu; | |
1692 | unsigned long flags; | |
1693 | unsigned long mask; | |
1694 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1695 | struct sched_param sp; | |
1696 | struct task_struct *t; | |
1697 | ||
1698 | for (;;) { | |
1699 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | |
1700 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | |
1701 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | |
1702 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1703 | mask = atomic_xchg(&rnp->wakemask, 0); | |
1704 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | |
1705 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | |
1706 | if ((mask & 0x1) == 0) | |
1707 | continue; | |
1708 | preempt_disable(); | |
1709 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1710 | if (!cpu_online(cpu) || t == NULL) { | |
1711 | preempt_enable(); | |
1712 | continue; | |
1713 | } | |
1714 | per_cpu(rcu_cpu_has_work, cpu) = 1; | |
1715 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1716 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1717 | preempt_enable(); | |
1718 | } | |
1719 | } | |
1720 | /* NOTREACHED */ | |
1721 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | |
1722 | return 0; | |
1723 | } | |
1724 | ||
1725 | /* | |
1726 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | |
1727 | * served by the rcu_node in question. The CPU hotplug lock is still | |
1728 | * held, so the value of rnp->qsmaskinit will be stable. | |
1729 | * | |
1730 | * We don't include outgoingcpu in the affinity set, use -1 if there is | |
1731 | * no outgoing CPU. If there are no CPUs left in the affinity set, | |
1732 | * this function allows the kthread to execute on any CPU. | |
1733 | */ | |
1734 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1735 | { | |
1736 | cpumask_var_t cm; | |
1737 | int cpu; | |
1738 | unsigned long mask = rnp->qsmaskinit; | |
1739 | ||
1740 | if (rnp->node_kthread_task == NULL) | |
1741 | return; | |
1742 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | |
1743 | return; | |
1744 | cpumask_clear(cm); | |
1745 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | |
1746 | if ((mask & 0x1) && cpu != outgoingcpu) | |
1747 | cpumask_set_cpu(cpu, cm); | |
1748 | if (cpumask_weight(cm) == 0) { | |
1749 | cpumask_setall(cm); | |
1750 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | |
1751 | cpumask_clear_cpu(cpu, cm); | |
1752 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | |
1753 | } | |
1754 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | |
1755 | rcu_boost_kthread_setaffinity(rnp, cm); | |
1756 | free_cpumask_var(cm); | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * Spawn a per-rcu_node kthread, setting priority and affinity. | |
1761 | * Called during boot before online/offline can happen, or, if | |
1762 | * during runtime, with the main CPU-hotplug locks held. So only | |
1763 | * one of these can be executing at a time. | |
1764 | */ | |
1765 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |
1766 | struct rcu_node *rnp) | |
1767 | { | |
1768 | unsigned long flags; | |
1769 | int rnp_index = rnp - &rsp->node[0]; | |
1770 | struct sched_param sp; | |
1771 | struct task_struct *t; | |
1772 | ||
b0d30417 | 1773 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1774 | rnp->qsmaskinit == 0) |
1775 | return 0; | |
1776 | if (rnp->node_kthread_task == NULL) { | |
1777 | t = kthread_create(rcu_node_kthread, (void *)rnp, | |
5b61b0ba | 1778 | "rcun/%d", rnp_index); |
f8b7fc6b PM |
1779 | if (IS_ERR(t)) |
1780 | return PTR_ERR(t); | |
1781 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1782 | rnp->node_kthread_task = t; | |
1783 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1784 | sp.sched_priority = 99; | |
1785 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1786 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | |
1787 | } | |
1788 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | |
1789 | } | |
1790 | ||
1791 | /* | |
1792 | * Spawn all kthreads -- called as soon as the scheduler is running. | |
1793 | */ | |
1794 | static int __init rcu_spawn_kthreads(void) | |
1795 | { | |
1796 | int cpu; | |
1797 | struct rcu_node *rnp; | |
1798 | ||
b0d30417 | 1799 | rcu_scheduler_fully_active = 1; |
f8b7fc6b PM |
1800 | for_each_possible_cpu(cpu) { |
1801 | per_cpu(rcu_cpu_has_work, cpu) = 0; | |
1802 | if (cpu_online(cpu)) | |
1803 | (void)rcu_spawn_one_cpu_kthread(cpu); | |
1804 | } | |
1805 | rnp = rcu_get_root(rcu_state); | |
1806 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1807 | if (NUM_RCU_NODES > 1) { | |
1808 | rcu_for_each_leaf_node(rcu_state, rnp) | |
1809 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1810 | } | |
1811 | return 0; | |
1812 | } | |
1813 | early_initcall(rcu_spawn_kthreads); | |
1814 | ||
1815 | static void __cpuinit rcu_prepare_kthreads(int cpu) | |
1816 | { | |
1817 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | |
1818 | struct rcu_node *rnp = rdp->mynode; | |
1819 | ||
1820 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | |
b0d30417 | 1821 | if (rcu_scheduler_fully_active) { |
f8b7fc6b PM |
1822 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1823 | if (rnp->node_kthread_task == NULL) | |
1824 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1825 | } | |
1826 | } | |
1827 | ||
27f4d280 PM |
1828 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1829 | ||
1217ed1b | 1830 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 | 1831 | { |
1217ed1b | 1832 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1833 | } |
1834 | ||
a46e0899 | 1835 | static void invoke_rcu_callbacks_kthread(void) |
27f4d280 | 1836 | { |
a46e0899 | 1837 | WARN_ON_ONCE(1); |
27f4d280 PM |
1838 | } |
1839 | ||
dff1672d PM |
1840 | static bool rcu_is_callbacks_kthread(void) |
1841 | { | |
1842 | return false; | |
1843 | } | |
1844 | ||
27f4d280 PM |
1845 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
1846 | { | |
1847 | } | |
1848 | ||
f8b7fc6b PM |
1849 | #ifdef CONFIG_HOTPLUG_CPU |
1850 | ||
1851 | static void rcu_stop_cpu_kthread(int cpu) | |
1852 | { | |
1853 | } | |
1854 | ||
1855 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1856 | ||
1857 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1858 | { | |
1859 | } | |
1860 | ||
1861 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1862 | { | |
1863 | } | |
1864 | ||
b0d30417 PM |
1865 | static int __init rcu_scheduler_really_started(void) |
1866 | { | |
1867 | rcu_scheduler_fully_active = 1; | |
1868 | return 0; | |
1869 | } | |
1870 | early_initcall(rcu_scheduler_really_started); | |
1871 | ||
f8b7fc6b PM |
1872 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
1873 | { | |
1874 | } | |
1875 | ||
27f4d280 PM |
1876 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1877 | ||
8bd93a2c PM |
1878 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1879 | ||
1880 | /* | |
1881 | * Check to see if any future RCU-related work will need to be done | |
1882 | * by the current CPU, even if none need be done immediately, returning | |
1883 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1884 | * an exported member of the RCU API. | |
1885 | * | |
7cb92499 PM |
1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
1887 | * any flavor of RCU. | |
8bd93a2c | 1888 | */ |
aa9b1630 | 1889 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
8bd93a2c | 1890 | { |
aa9b1630 | 1891 | *delta_jiffies = ULONG_MAX; |
aea1b35e PM |
1892 | return rcu_cpu_has_callbacks(cpu); |
1893 | } | |
1894 | ||
7cb92499 PM |
1895 | /* |
1896 | * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it. | |
1897 | */ | |
1898 | static void rcu_prepare_for_idle_init(int cpu) | |
1899 | { | |
1900 | } | |
1901 | ||
1902 | /* | |
1903 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up | |
1904 | * after it. | |
1905 | */ | |
1906 | static void rcu_cleanup_after_idle(int cpu) | |
1907 | { | |
1908 | } | |
1909 | ||
aea1b35e | 1910 | /* |
a858af28 | 1911 | * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, |
aea1b35e PM |
1912 | * is nothing. |
1913 | */ | |
1914 | static void rcu_prepare_for_idle(int cpu) | |
1915 | { | |
1916 | } | |
1917 | ||
c57afe80 PM |
1918 | /* |
1919 | * Don't bother keeping a running count of the number of RCU callbacks | |
1920 | * posted because CONFIG_RCU_FAST_NO_HZ=n. | |
1921 | */ | |
1922 | static void rcu_idle_count_callbacks_posted(void) | |
1923 | { | |
1924 | } | |
1925 | ||
8bd93a2c PM |
1926 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1927 | ||
f23f7fa1 PM |
1928 | /* |
1929 | * This code is invoked when a CPU goes idle, at which point we want | |
1930 | * to have the CPU do everything required for RCU so that it can enter | |
1931 | * the energy-efficient dyntick-idle mode. This is handled by a | |
1932 | * state machine implemented by rcu_prepare_for_idle() below. | |
1933 | * | |
1934 | * The following three proprocessor symbols control this state machine: | |
1935 | * | |
1936 | * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt | |
1937 | * to satisfy RCU. Beyond this point, it is better to incur a periodic | |
1938 | * scheduling-clock interrupt than to loop through the state machine | |
1939 | * at full power. | |
1940 | * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are | |
1941 | * optional if RCU does not need anything immediately from this | |
1942 | * CPU, even if this CPU still has RCU callbacks queued. The first | |
1943 | * times through the state machine are mandatory: we need to give | |
1944 | * the state machine a chance to communicate a quiescent state | |
1945 | * to the RCU core. | |
1946 | * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted | |
1947 | * to sleep in dyntick-idle mode with RCU callbacks pending. This | |
1948 | * is sized to be roughly one RCU grace period. Those energy-efficiency | |
1949 | * benchmarkers who might otherwise be tempted to set this to a large | |
1950 | * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your | |
1951 | * system. And if you are -that- concerned about energy efficiency, | |
1952 | * just power the system down and be done with it! | |
778d250a PM |
1953 | * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is |
1954 | * permitted to sleep in dyntick-idle mode with only lazy RCU | |
1955 | * callbacks pending. Setting this too high can OOM your system. | |
f23f7fa1 PM |
1956 | * |
1957 | * The values below work well in practice. If future workloads require | |
1958 | * adjustment, they can be converted into kernel config parameters, though | |
1959 | * making the state machine smarter might be a better option. | |
1960 | */ | |
1961 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ | |
1962 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ | |
7cb92499 | 1963 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
778d250a | 1964 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
f23f7fa1 | 1965 | |
486e2593 PM |
1966 | /* |
1967 | * Does the specified flavor of RCU have non-lazy callbacks pending on | |
1968 | * the specified CPU? Both RCU flavor and CPU are specified by the | |
1969 | * rcu_data structure. | |
1970 | */ | |
1971 | static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp) | |
1972 | { | |
1973 | return rdp->qlen != rdp->qlen_lazy; | |
1974 | } | |
1975 | ||
1976 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
1977 | ||
1978 | /* | |
1979 | * Are there non-lazy RCU-preempt callbacks? (There cannot be if there | |
1980 | * is no RCU-preempt in the kernel.) | |
1981 | */ | |
1982 | static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu) | |
1983 | { | |
1984 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
1985 | ||
1986 | return __rcu_cpu_has_nonlazy_callbacks(rdp); | |
1987 | } | |
1988 | ||
1989 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
1990 | ||
1991 | static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu) | |
1992 | { | |
1993 | return 0; | |
1994 | } | |
1995 | ||
1996 | #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
1997 | ||
1998 | /* | |
1999 | * Does any flavor of RCU have non-lazy callbacks on the specified CPU? | |
2000 | */ | |
2001 | static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |
2002 | { | |
2003 | return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) || | |
2004 | __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) || | |
2005 | rcu_preempt_cpu_has_nonlazy_callbacks(cpu); | |
2006 | } | |
2007 | ||
aa9b1630 PM |
2008 | /* |
2009 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | |
2010 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | |
2011 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | |
2012 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | |
2013 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | |
2014 | * it is better to incur scheduling-clock interrupts than to spin | |
2015 | * continuously for the same time duration! | |
2016 | * | |
2017 | * The delta_jiffies argument is used to store the time when RCU is | |
2018 | * going to need the CPU again if it still has callbacks. The reason | |
2019 | * for this is that rcu_prepare_for_idle() might need to post a timer, | |
2020 | * but if so, it will do so after tick_nohz_stop_sched_tick() has set | |
2021 | * the wakeup time for this CPU. This means that RCU's timer can be | |
2022 | * delayed until the wakeup time, which defeats the purpose of posting | |
2023 | * a timer. | |
2024 | */ | |
2025 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | |
2026 | { | |
2027 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | |
2028 | ||
2029 | /* Flag a new idle sojourn to the idle-entry state machine. */ | |
2030 | rdtp->idle_first_pass = 1; | |
2031 | /* If no callbacks, RCU doesn't need the CPU. */ | |
2032 | if (!rcu_cpu_has_callbacks(cpu)) { | |
2033 | *delta_jiffies = ULONG_MAX; | |
2034 | return 0; | |
2035 | } | |
2036 | if (rdtp->dyntick_holdoff == jiffies) { | |
2037 | /* RCU recently tried and failed, so don't try again. */ | |
2038 | *delta_jiffies = 1; | |
2039 | return 1; | |
2040 | } | |
2041 | /* Set up for the possibility that RCU will post a timer. */ | |
2042 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | |
2043 | *delta_jiffies = RCU_IDLE_GP_DELAY; | |
2044 | else | |
2045 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | |
2046 | return 0; | |
2047 | } | |
2048 | ||
21e52e15 PM |
2049 | /* |
2050 | * Handler for smp_call_function_single(). The only point of this | |
2051 | * handler is to wake the CPU up, so the handler does only tracing. | |
2052 | */ | |
2053 | void rcu_idle_demigrate(void *unused) | |
2054 | { | |
2055 | trace_rcu_prep_idle("Demigrate"); | |
2056 | } | |
2057 | ||
7cb92499 PM |
2058 | /* |
2059 | * Timer handler used to force CPU to start pushing its remaining RCU | |
2060 | * callbacks in the case where it entered dyntick-idle mode with callbacks | |
2061 | * pending. The hander doesn't really need to do anything because the | |
2062 | * real work is done upon re-entry to idle, or by the next scheduling-clock | |
2063 | * interrupt should idle not be re-entered. | |
21e52e15 PM |
2064 | * |
2065 | * One special case: the timer gets migrated without awakening the CPU | |
2066 | * on which the timer was scheduled on. In this case, we must wake up | |
2067 | * that CPU. We do so with smp_call_function_single(). | |
7cb92499 | 2068 | */ |
21e52e15 | 2069 | static void rcu_idle_gp_timer_func(unsigned long cpu_in) |
7cb92499 | 2070 | { |
21e52e15 PM |
2071 | int cpu = (int)cpu_in; |
2072 | ||
7cb92499 | 2073 | trace_rcu_prep_idle("Timer"); |
21e52e15 PM |
2074 | if (cpu != smp_processor_id()) |
2075 | smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0); | |
2076 | else | |
2077 | WARN_ON_ONCE(1); /* Getting here can hang the system... */ | |
7cb92499 PM |
2078 | } |
2079 | ||
2080 | /* | |
2081 | * Initialize the timer used to pull CPUs out of dyntick-idle mode. | |
2082 | */ | |
2083 | static void rcu_prepare_for_idle_init(int cpu) | |
2084 | { | |
5955f7ee PM |
2085 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2086 | ||
2087 | rdtp->dyntick_holdoff = jiffies - 1; | |
2088 | setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); | |
2089 | rdtp->idle_gp_timer_expires = jiffies - 1; | |
2090 | rdtp->idle_first_pass = 1; | |
7cb92499 PM |
2091 | } |
2092 | ||
2093 | /* | |
2094 | * Clean up for exit from idle. Because we are exiting from idle, there | |
5955f7ee | 2095 | * is no longer any point to ->idle_gp_timer, so cancel it. This will |
7cb92499 PM |
2096 | * do nothing if this timer is not active, so just cancel it unconditionally. |
2097 | */ | |
2098 | static void rcu_cleanup_after_idle(int cpu) | |
2099 | { | |
5955f7ee PM |
2100 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2101 | ||
2102 | del_timer(&rdtp->idle_gp_timer); | |
2fdbb31b | 2103 | trace_rcu_prep_idle("Cleanup after idle"); |
7cb92499 PM |
2104 | } |
2105 | ||
aea1b35e PM |
2106 | /* |
2107 | * Check to see if any RCU-related work can be done by the current CPU, | |
2108 | * and if so, schedule a softirq to get it done. This function is part | |
2109 | * of the RCU implementation; it is -not- an exported member of the RCU API. | |
8bd93a2c | 2110 | * |
aea1b35e PM |
2111 | * The idea is for the current CPU to clear out all work required by the |
2112 | * RCU core for the current grace period, so that this CPU can be permitted | |
2113 | * to enter dyntick-idle mode. In some cases, it will need to be awakened | |
2114 | * at the end of the grace period by whatever CPU ends the grace period. | |
2115 | * This allows CPUs to go dyntick-idle more quickly, and to reduce the | |
2116 | * number of wakeups by a modest integer factor. | |
a47cd880 PM |
2117 | * |
2118 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | |
2119 | * disabled, we do one pass of force_quiescent_state(), then do a | |
a46e0899 | 2120 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
5955f7ee | 2121 | * later. The ->dyntick_drain field controls the sequencing. |
aea1b35e PM |
2122 | * |
2123 | * The caller must have disabled interrupts. | |
8bd93a2c | 2124 | */ |
aea1b35e | 2125 | static void rcu_prepare_for_idle(int cpu) |
8bd93a2c | 2126 | { |
f511fc62 | 2127 | struct timer_list *tp; |
5955f7ee | 2128 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
f511fc62 | 2129 | |
c57afe80 PM |
2130 | /* |
2131 | * If this is an idle re-entry, for example, due to use of | |
2132 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | |
2133 | * loop, then don't take any state-machine actions, unless the | |
2134 | * momentary exit from idle queued additional non-lazy callbacks. | |
5955f7ee | 2135 | * Instead, repost the ->idle_gp_timer if this CPU has callbacks |
c57afe80 PM |
2136 | * pending. |
2137 | */ | |
5955f7ee PM |
2138 | if (!rdtp->idle_first_pass && |
2139 | (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { | |
f511fc62 | 2140 | if (rcu_cpu_has_callbacks(cpu)) { |
5955f7ee PM |
2141 | tp = &rdtp->idle_gp_timer; |
2142 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | |
f511fc62 | 2143 | } |
c57afe80 PM |
2144 | return; |
2145 | } | |
5955f7ee PM |
2146 | rdtp->idle_first_pass = 0; |
2147 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; | |
c57afe80 | 2148 | |
3084f2f8 | 2149 | /* |
f535a607 PM |
2150 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2151 | * Also reset state to avoid prejudicing later attempts. | |
3084f2f8 | 2152 | */ |
aea1b35e | 2153 | if (!rcu_cpu_has_callbacks(cpu)) { |
5955f7ee PM |
2154 | rdtp->dyntick_holdoff = jiffies - 1; |
2155 | rdtp->dyntick_drain = 0; | |
433cdddc | 2156 | trace_rcu_prep_idle("No callbacks"); |
aea1b35e | 2157 | return; |
77e38ed3 | 2158 | } |
3084f2f8 PM |
2159 | |
2160 | /* | |
2161 | * If in holdoff mode, just return. We will presumably have | |
2162 | * refrained from disabling the scheduling-clock tick. | |
2163 | */ | |
5955f7ee | 2164 | if (rdtp->dyntick_holdoff == jiffies) { |
433cdddc | 2165 | trace_rcu_prep_idle("In holdoff"); |
aea1b35e | 2166 | return; |
433cdddc | 2167 | } |
a47cd880 | 2168 | |
5955f7ee PM |
2169 | /* Check and update the ->dyntick_drain sequencing. */ |
2170 | if (rdtp->dyntick_drain <= 0) { | |
a47cd880 | 2171 | /* First time through, initialize the counter. */ |
5955f7ee PM |
2172 | rdtp->dyntick_drain = RCU_IDLE_FLUSHES; |
2173 | } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && | |
c3ce910b PM |
2174 | !rcu_pending(cpu) && |
2175 | !local_softirq_pending()) { | |
7cb92499 | 2176 | /* Can we go dyntick-idle despite still having callbacks? */ |
5955f7ee PM |
2177 | rdtp->dyntick_drain = 0; |
2178 | rdtp->dyntick_holdoff = jiffies; | |
fd4b3526 PM |
2179 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2180 | trace_rcu_prep_idle("Dyntick with callbacks"); | |
5955f7ee | 2181 | rdtp->idle_gp_timer_expires = |
c57afe80 | 2182 | jiffies + RCU_IDLE_GP_DELAY; |
fd4b3526 | 2183 | } else { |
5955f7ee | 2184 | rdtp->idle_gp_timer_expires = |
c57afe80 | 2185 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
fd4b3526 PM |
2186 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
2187 | } | |
5955f7ee PM |
2188 | tp = &rdtp->idle_gp_timer; |
2189 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | |
2190 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | |
f23f7fa1 | 2191 | return; /* Nothing more to do immediately. */ |
5955f7ee | 2192 | } else if (--(rdtp->dyntick_drain) <= 0) { |
a47cd880 | 2193 | /* We have hit the limit, so time to give up. */ |
5955f7ee | 2194 | rdtp->dyntick_holdoff = jiffies; |
433cdddc | 2195 | trace_rcu_prep_idle("Begin holdoff"); |
aea1b35e PM |
2196 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ |
2197 | return; | |
a47cd880 PM |
2198 | } |
2199 | ||
aea1b35e PM |
2200 | /* |
2201 | * Do one step of pushing the remaining RCU callbacks through | |
2202 | * the RCU core state machine. | |
2203 | */ | |
2204 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
2205 | if (per_cpu(rcu_preempt_data, cpu).nxtlist) { | |
2206 | rcu_preempt_qs(cpu); | |
2207 | force_quiescent_state(&rcu_preempt_state, 0); | |
aea1b35e PM |
2208 | } |
2209 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
a47cd880 PM |
2210 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { |
2211 | rcu_sched_qs(cpu); | |
2212 | force_quiescent_state(&rcu_sched_state, 0); | |
a47cd880 PM |
2213 | } |
2214 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | |
2215 | rcu_bh_qs(cpu); | |
2216 | force_quiescent_state(&rcu_bh_state, 0); | |
8bd93a2c PM |
2217 | } |
2218 | ||
433cdddc PM |
2219 | /* |
2220 | * If RCU callbacks are still pending, RCU still needs this CPU. | |
2221 | * So try forcing the callbacks through the grace period. | |
2222 | */ | |
3ad0decf | 2223 | if (rcu_cpu_has_callbacks(cpu)) { |
433cdddc | 2224 | trace_rcu_prep_idle("More callbacks"); |
a46e0899 | 2225 | invoke_rcu_core(); |
c0cfbbb0 | 2226 | } else |
433cdddc | 2227 | trace_rcu_prep_idle("Callbacks drained"); |
8bd93a2c PM |
2228 | } |
2229 | ||
c57afe80 | 2230 | /* |
98248a0e PM |
2231 | * Keep a running count of the number of non-lazy callbacks posted |
2232 | * on this CPU. This running counter (which is never decremented) allows | |
2233 | * rcu_prepare_for_idle() to detect when something out of the idle loop | |
2234 | * posts a callback, even if an equal number of callbacks are invoked. | |
2235 | * Of course, callbacks should only be posted from within a trace event | |
2236 | * designed to be called from idle or from within RCU_NONIDLE(). | |
c57afe80 PM |
2237 | */ |
2238 | static void rcu_idle_count_callbacks_posted(void) | |
2239 | { | |
5955f7ee | 2240 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
c57afe80 PM |
2241 | } |
2242 | ||
8bd93a2c | 2243 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
a858af28 PM |
2244 | |
2245 | #ifdef CONFIG_RCU_CPU_STALL_INFO | |
2246 | ||
2247 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
2248 | ||
2249 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
2250 | { | |
5955f7ee PM |
2251 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2252 | struct timer_list *tltp = &rdtp->idle_gp_timer; | |
a858af28 | 2253 | |
2ee3dc80 | 2254 | sprintf(cp, "drain=%d %c timer=%lu", |
5955f7ee PM |
2255 | rdtp->dyntick_drain, |
2256 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', | |
2ee3dc80 | 2257 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
a858af28 PM |
2258 | } |
2259 | ||
2260 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
2261 | ||
2262 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
2263 | { | |
2264 | } | |
2265 | ||
2266 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
2267 | ||
2268 | /* Initiate the stall-info list. */ | |
2269 | static void print_cpu_stall_info_begin(void) | |
2270 | { | |
2271 | printk(KERN_CONT "\n"); | |
2272 | } | |
2273 | ||
2274 | /* | |
2275 | * Print out diagnostic information for the specified stalled CPU. | |
2276 | * | |
2277 | * If the specified CPU is aware of the current RCU grace period | |
2278 | * (flavor specified by rsp), then print the number of scheduling | |
2279 | * clock interrupts the CPU has taken during the time that it has | |
2280 | * been aware. Otherwise, print the number of RCU grace periods | |
2281 | * that this CPU is ignorant of, for example, "1" if the CPU was | |
2282 | * aware of the previous grace period. | |
2283 | * | |
2284 | * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. | |
2285 | */ | |
2286 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) | |
2287 | { | |
2288 | char fast_no_hz[72]; | |
2289 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | |
2290 | struct rcu_dynticks *rdtp = rdp->dynticks; | |
2291 | char *ticks_title; | |
2292 | unsigned long ticks_value; | |
2293 | ||
2294 | if (rsp->gpnum == rdp->gpnum) { | |
2295 | ticks_title = "ticks this GP"; | |
2296 | ticks_value = rdp->ticks_this_gp; | |
2297 | } else { | |
2298 | ticks_title = "GPs behind"; | |
2299 | ticks_value = rsp->gpnum - rdp->gpnum; | |
2300 | } | |
2301 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); | |
2302 | printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n", | |
2303 | cpu, ticks_value, ticks_title, | |
2304 | atomic_read(&rdtp->dynticks) & 0xfff, | |
2305 | rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, | |
2306 | fast_no_hz); | |
2307 | } | |
2308 | ||
2309 | /* Terminate the stall-info list. */ | |
2310 | static void print_cpu_stall_info_end(void) | |
2311 | { | |
2312 | printk(KERN_ERR "\t"); | |
2313 | } | |
2314 | ||
2315 | /* Zero ->ticks_this_gp for all flavors of RCU. */ | |
2316 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |
2317 | { | |
2318 | rdp->ticks_this_gp = 0; | |
2319 | } | |
2320 | ||
2321 | /* Increment ->ticks_this_gp for all flavors of RCU. */ | |
2322 | static void increment_cpu_stall_ticks(void) | |
2323 | { | |
2324 | __get_cpu_var(rcu_sched_data).ticks_this_gp++; | |
2325 | __get_cpu_var(rcu_bh_data).ticks_this_gp++; | |
2326 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
2327 | __get_cpu_var(rcu_preempt_data).ticks_this_gp++; | |
2328 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
2329 | } | |
2330 | ||
2331 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ | |
2332 | ||
2333 | static void print_cpu_stall_info_begin(void) | |
2334 | { | |
2335 | printk(KERN_CONT " {"); | |
2336 | } | |
2337 | ||
2338 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) | |
2339 | { | |
2340 | printk(KERN_CONT " %d", cpu); | |
2341 | } | |
2342 | ||
2343 | static void print_cpu_stall_info_end(void) | |
2344 | { | |
2345 | printk(KERN_CONT "} "); | |
2346 | } | |
2347 | ||
2348 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |
2349 | { | |
2350 | } | |
2351 | ||
2352 | static void increment_cpu_stall_ticks(void) | |
2353 | { | |
2354 | } | |
2355 | ||
2356 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ |