rcu: Remove redundant code from rcu_cleanup_after_idle()
[deliverable/linux.git] / kernel / rcutree_plugin.h
CommitLineData
f41d911f
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
6cc68793 4 * or preemptible semantics.
f41d911f
PM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
d9a3da06 27#include <linux/delay.h>
3fbfbf7a 28#include <linux/gfp.h>
b626c1b6 29#include <linux/oom.h>
62ab7072 30#include <linux/smpboot.h>
0edd1b17 31#include "time/tick-internal.h"
f41d911f 32
5b61b0ba
MG
33#define RCU_KTHREAD_PRIO 1
34
35#ifdef CONFIG_RCU_BOOST
36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37#else
38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39#endif
40
3fbfbf7a
PM
41#ifdef CONFIG_RCU_NOCB_CPU
42static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
43static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
1b0048a4 44static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
3fbfbf7a
PM
45static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47
26845c28
PM
48/*
49 * Check the RCU kernel configuration parameters and print informative
50 * messages about anything out of the ordinary. If you like #ifdef, you
51 * will love this function.
52 */
53static void __init rcu_bootup_announce_oddness(void)
54{
55#ifdef CONFIG_RCU_TRACE
efc151c3 56 pr_info("\tRCU debugfs-based tracing is enabled.\n");
26845c28
PM
57#endif
58#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
efc151c3 59 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
26845c28
PM
60 CONFIG_RCU_FANOUT);
61#endif
62#ifdef CONFIG_RCU_FANOUT_EXACT
efc151c3 63 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
26845c28
PM
64#endif
65#ifdef CONFIG_RCU_FAST_NO_HZ
efc151c3 66 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
26845c28
PM
67#endif
68#ifdef CONFIG_PROVE_RCU
efc151c3 69 pr_info("\tRCU lockdep checking is enabled.\n");
26845c28
PM
70#endif
71#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
efc151c3 72 pr_info("\tRCU torture testing starts during boot.\n");
26845c28 73#endif
81a294c4 74#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
efc151c3 75 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
a858af28
PM
76#endif
77#if defined(CONFIG_RCU_CPU_STALL_INFO)
efc151c3 78 pr_info("\tAdditional per-CPU info printed with stalls.\n");
26845c28
PM
79#endif
80#if NUM_RCU_LVL_4 != 0
efc151c3 81 pr_info("\tFour-level hierarchy is enabled.\n");
26845c28 82#endif
f885b7f2 83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
9a5739d7 84 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
cca6f393 85 if (nr_cpu_ids != NR_CPUS)
efc151c3 86 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
3fbfbf7a 87#ifdef CONFIG_RCU_NOCB_CPU
911af505
PM
88#ifndef CONFIG_RCU_NOCB_CPU_NONE
89 if (!have_rcu_nocb_mask) {
615ee544 90 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
911af505
PM
91 have_rcu_nocb_mask = true;
92 }
93#ifdef CONFIG_RCU_NOCB_CPU_ZERO
9a5739d7 94 pr_info("\tOffload RCU callbacks from CPU 0\n");
911af505
PM
95 cpumask_set_cpu(0, rcu_nocb_mask);
96#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
97#ifdef CONFIG_RCU_NOCB_CPU_ALL
9a5739d7 98 pr_info("\tOffload RCU callbacks from all CPUs\n");
911af505
PM
99 cpumask_setall(rcu_nocb_mask);
100#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
101#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
3fbfbf7a 102 if (have_rcu_nocb_mask) {
3fbfbf7a 103 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
9a5739d7 104 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
3fbfbf7a 105 if (rcu_nocb_poll)
9a5739d7 106 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
3fbfbf7a
PM
107 }
108#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
26845c28
PM
109}
110
f41d911f
PM
111#ifdef CONFIG_TREE_PREEMPT_RCU
112
a41bfeb2 113RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
27f4d280 114static struct rcu_state *rcu_state = &rcu_preempt_state;
f41d911f 115
d9a3da06
PM
116static int rcu_preempted_readers_exp(struct rcu_node *rnp);
117
f41d911f
PM
118/*
119 * Tell them what RCU they are running.
120 */
0e0fc1c2 121static void __init rcu_bootup_announce(void)
f41d911f 122{
efc151c3 123 pr_info("Preemptible hierarchical RCU implementation.\n");
26845c28 124 rcu_bootup_announce_oddness();
f41d911f
PM
125}
126
127/*
128 * Return the number of RCU-preempt batches processed thus far
129 * for debug and statistics.
130 */
131long rcu_batches_completed_preempt(void)
132{
133 return rcu_preempt_state.completed;
134}
135EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
136
137/*
138 * Return the number of RCU batches processed thus far for debug & stats.
139 */
140long rcu_batches_completed(void)
141{
142 return rcu_batches_completed_preempt();
143}
144EXPORT_SYMBOL_GPL(rcu_batches_completed);
145
bf66f18e
PM
146/*
147 * Force a quiescent state for preemptible RCU.
148 */
149void rcu_force_quiescent_state(void)
150{
4cdfc175 151 force_quiescent_state(&rcu_preempt_state);
bf66f18e
PM
152}
153EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
154
f41d911f 155/*
6cc68793 156 * Record a preemptible-RCU quiescent state for the specified CPU. Note
f41d911f
PM
157 * that this just means that the task currently running on the CPU is
158 * not in a quiescent state. There might be any number of tasks blocked
159 * while in an RCU read-side critical section.
25502a6c
PM
160 *
161 * Unlike the other rcu_*_qs() functions, callers to this function
162 * must disable irqs in order to protect the assignment to
163 * ->rcu_read_unlock_special.
f41d911f 164 */
c3422bea 165static void rcu_preempt_qs(int cpu)
f41d911f
PM
166{
167 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
25502a6c 168
e4cc1f22 169 if (rdp->passed_quiesce == 0)
f7f7bac9 170 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
e4cc1f22 171 rdp->passed_quiesce = 1;
25502a6c 172 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
173}
174
175/*
c3422bea
PM
176 * We have entered the scheduler, and the current task might soon be
177 * context-switched away from. If this task is in an RCU read-side
178 * critical section, we will no longer be able to rely on the CPU to
12f5f524
PM
179 * record that fact, so we enqueue the task on the blkd_tasks list.
180 * The task will dequeue itself when it exits the outermost enclosing
181 * RCU read-side critical section. Therefore, the current grace period
182 * cannot be permitted to complete until the blkd_tasks list entries
183 * predating the current grace period drain, in other words, until
184 * rnp->gp_tasks becomes NULL.
c3422bea
PM
185 *
186 * Caller must disable preemption.
f41d911f 187 */
cba6d0d6 188static void rcu_preempt_note_context_switch(int cpu)
f41d911f
PM
189{
190 struct task_struct *t = current;
c3422bea 191 unsigned long flags;
f41d911f
PM
192 struct rcu_data *rdp;
193 struct rcu_node *rnp;
194
10f39bb1 195 if (t->rcu_read_lock_nesting > 0 &&
f41d911f
PM
196 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
197
198 /* Possibly blocking in an RCU read-side critical section. */
cba6d0d6 199 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
f41d911f 200 rnp = rdp->mynode;
1304afb2 201 raw_spin_lock_irqsave(&rnp->lock, flags);
f41d911f 202 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
86848966 203 t->rcu_blocked_node = rnp;
f41d911f
PM
204
205 /*
206 * If this CPU has already checked in, then this task
207 * will hold up the next grace period rather than the
208 * current grace period. Queue the task accordingly.
209 * If the task is queued for the current grace period
210 * (i.e., this CPU has not yet passed through a quiescent
211 * state for the current grace period), then as long
212 * as that task remains queued, the current grace period
12f5f524
PM
213 * cannot end. Note that there is some uncertainty as
214 * to exactly when the current grace period started.
215 * We take a conservative approach, which can result
216 * in unnecessarily waiting on tasks that started very
217 * slightly after the current grace period began. C'est
218 * la vie!!!
b0e165c0
PM
219 *
220 * But first, note that the current CPU must still be
221 * on line!
f41d911f 222 */
b0e165c0 223 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
e7d8842e 224 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
12f5f524
PM
225 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
226 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
227 rnp->gp_tasks = &t->rcu_node_entry;
27f4d280
PM
228#ifdef CONFIG_RCU_BOOST
229 if (rnp->boost_tasks != NULL)
230 rnp->boost_tasks = rnp->gp_tasks;
231#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524
PM
232 } else {
233 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
234 if (rnp->qsmask & rdp->grpmask)
235 rnp->gp_tasks = &t->rcu_node_entry;
236 }
d4c08f2a
PM
237 trace_rcu_preempt_task(rdp->rsp->name,
238 t->pid,
239 (rnp->qsmask & rdp->grpmask)
240 ? rnp->gpnum
241 : rnp->gpnum + 1);
1304afb2 242 raw_spin_unlock_irqrestore(&rnp->lock, flags);
10f39bb1
PM
243 } else if (t->rcu_read_lock_nesting < 0 &&
244 t->rcu_read_unlock_special) {
245
246 /*
247 * Complete exit from RCU read-side critical section on
248 * behalf of preempted instance of __rcu_read_unlock().
249 */
250 rcu_read_unlock_special(t);
f41d911f
PM
251 }
252
253 /*
254 * Either we were not in an RCU read-side critical section to
255 * begin with, or we have now recorded that critical section
256 * globally. Either way, we can now note a quiescent state
257 * for this CPU. Again, if we were in an RCU read-side critical
258 * section, and if that critical section was blocking the current
259 * grace period, then the fact that the task has been enqueued
260 * means that we continue to block the current grace period.
261 */
e7d8842e 262 local_irq_save(flags);
cba6d0d6 263 rcu_preempt_qs(cpu);
e7d8842e 264 local_irq_restore(flags);
f41d911f
PM
265}
266
fc2219d4
PM
267/*
268 * Check for preempted RCU readers blocking the current grace period
269 * for the specified rcu_node structure. If the caller needs a reliable
270 * answer, it must hold the rcu_node's ->lock.
271 */
27f4d280 272static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4 273{
12f5f524 274 return rnp->gp_tasks != NULL;
fc2219d4
PM
275}
276
b668c9cf
PM
277/*
278 * Record a quiescent state for all tasks that were previously queued
279 * on the specified rcu_node structure and that were blocking the current
280 * RCU grace period. The caller must hold the specified rnp->lock with
281 * irqs disabled, and this lock is released upon return, but irqs remain
282 * disabled.
283 */
d3f6bad3 284static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf
PM
285 __releases(rnp->lock)
286{
287 unsigned long mask;
288 struct rcu_node *rnp_p;
289
27f4d280 290 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1304afb2 291 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
292 return; /* Still need more quiescent states! */
293 }
294
295 rnp_p = rnp->parent;
296 if (rnp_p == NULL) {
297 /*
298 * Either there is only one rcu_node in the tree,
299 * or tasks were kicked up to root rcu_node due to
300 * CPUs going offline.
301 */
d3f6bad3 302 rcu_report_qs_rsp(&rcu_preempt_state, flags);
b668c9cf
PM
303 return;
304 }
305
306 /* Report up the rest of the hierarchy. */
307 mask = rnp->grpmask;
1304afb2
PM
308 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
309 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
d3f6bad3 310 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
b668c9cf
PM
311}
312
12f5f524
PM
313/*
314 * Advance a ->blkd_tasks-list pointer to the next entry, instead
315 * returning NULL if at the end of the list.
316 */
317static struct list_head *rcu_next_node_entry(struct task_struct *t,
318 struct rcu_node *rnp)
319{
320 struct list_head *np;
321
322 np = t->rcu_node_entry.next;
323 if (np == &rnp->blkd_tasks)
324 np = NULL;
325 return np;
326}
327
b668c9cf
PM
328/*
329 * Handle special cases during rcu_read_unlock(), such as needing to
330 * notify RCU core processing or task having blocked during the RCU
331 * read-side critical section.
332 */
2a3fa843 333void rcu_read_unlock_special(struct task_struct *t)
f41d911f
PM
334{
335 int empty;
d9a3da06 336 int empty_exp;
389abd48 337 int empty_exp_now;
f41d911f 338 unsigned long flags;
12f5f524 339 struct list_head *np;
82e78d80
PM
340#ifdef CONFIG_RCU_BOOST
341 struct rt_mutex *rbmp = NULL;
342#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
343 struct rcu_node *rnp;
344 int special;
345
346 /* NMI handlers cannot block and cannot safely manipulate state. */
347 if (in_nmi())
348 return;
349
350 local_irq_save(flags);
351
352 /*
353 * If RCU core is waiting for this CPU to exit critical section,
354 * let it know that we have done so.
355 */
356 special = t->rcu_read_unlock_special;
357 if (special & RCU_READ_UNLOCK_NEED_QS) {
c3422bea 358 rcu_preempt_qs(smp_processor_id());
f41d911f
PM
359 }
360
361 /* Hardware IRQ handlers cannot block. */
ec433f0c 362 if (in_irq() || in_serving_softirq()) {
f41d911f
PM
363 local_irq_restore(flags);
364 return;
365 }
366
367 /* Clean up if blocked during RCU read-side critical section. */
368 if (special & RCU_READ_UNLOCK_BLOCKED) {
369 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
370
dd5d19ba
PM
371 /*
372 * Remove this task from the list it blocked on. The
373 * task can migrate while we acquire the lock, but at
374 * most one time. So at most two passes through loop.
375 */
376 for (;;) {
86848966 377 rnp = t->rcu_blocked_node;
1304afb2 378 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
86848966 379 if (rnp == t->rcu_blocked_node)
dd5d19ba 380 break;
1304afb2 381 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
dd5d19ba 382 }
27f4d280 383 empty = !rcu_preempt_blocked_readers_cgp(rnp);
d9a3da06
PM
384 empty_exp = !rcu_preempted_readers_exp(rnp);
385 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
12f5f524 386 np = rcu_next_node_entry(t, rnp);
f41d911f 387 list_del_init(&t->rcu_node_entry);
82e78d80 388 t->rcu_blocked_node = NULL;
f7f7bac9 389 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
d4c08f2a 390 rnp->gpnum, t->pid);
12f5f524
PM
391 if (&t->rcu_node_entry == rnp->gp_tasks)
392 rnp->gp_tasks = np;
393 if (&t->rcu_node_entry == rnp->exp_tasks)
394 rnp->exp_tasks = np;
27f4d280
PM
395#ifdef CONFIG_RCU_BOOST
396 if (&t->rcu_node_entry == rnp->boost_tasks)
397 rnp->boost_tasks = np;
82e78d80
PM
398 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
399 if (t->rcu_boost_mutex) {
400 rbmp = t->rcu_boost_mutex;
401 t->rcu_boost_mutex = NULL;
7765be2f 402 }
27f4d280 403#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
404
405 /*
406 * If this was the last task on the current list, and if
407 * we aren't waiting on any CPUs, report the quiescent state.
389abd48
PM
408 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
409 * so we must take a snapshot of the expedited state.
f41d911f 410 */
389abd48 411 empty_exp_now = !rcu_preempted_readers_exp(rnp);
d4c08f2a 412 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
f7f7bac9 413 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
d4c08f2a
PM
414 rnp->gpnum,
415 0, rnp->qsmask,
416 rnp->level,
417 rnp->grplo,
418 rnp->grphi,
419 !!rnp->gp_tasks);
d3f6bad3 420 rcu_report_unblock_qs_rnp(rnp, flags);
c701d5d9 421 } else {
d4c08f2a 422 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c701d5d9 423 }
d9a3da06 424
27f4d280
PM
425#ifdef CONFIG_RCU_BOOST
426 /* Unboost if we were boosted. */
82e78d80
PM
427 if (rbmp)
428 rt_mutex_unlock(rbmp);
27f4d280
PM
429#endif /* #ifdef CONFIG_RCU_BOOST */
430
d9a3da06
PM
431 /*
432 * If this was the last task on the expedited lists,
433 * then we need to report up the rcu_node hierarchy.
434 */
389abd48 435 if (!empty_exp && empty_exp_now)
b40d293e 436 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
b668c9cf
PM
437 } else {
438 local_irq_restore(flags);
f41d911f 439 }
f41d911f
PM
440}
441
1ed509a2
PM
442#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
443
444/*
445 * Dump detailed information for all tasks blocking the current RCU
446 * grace period on the specified rcu_node structure.
447 */
448static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
449{
450 unsigned long flags;
1ed509a2
PM
451 struct task_struct *t;
452
12f5f524 453 raw_spin_lock_irqsave(&rnp->lock, flags);
5fd4dc06
PM
454 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
455 raw_spin_unlock_irqrestore(&rnp->lock, flags);
456 return;
457 }
12f5f524
PM
458 t = list_entry(rnp->gp_tasks,
459 struct task_struct, rcu_node_entry);
460 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
461 sched_show_task(t);
462 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1ed509a2
PM
463}
464
465/*
466 * Dump detailed information for all tasks blocking the current RCU
467 * grace period.
468 */
469static void rcu_print_detail_task_stall(struct rcu_state *rsp)
470{
471 struct rcu_node *rnp = rcu_get_root(rsp);
472
473 rcu_print_detail_task_stall_rnp(rnp);
474 rcu_for_each_leaf_node(rsp, rnp)
475 rcu_print_detail_task_stall_rnp(rnp);
476}
477
478#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
479
480static void rcu_print_detail_task_stall(struct rcu_state *rsp)
481{
482}
483
484#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
485
a858af28
PM
486#ifdef CONFIG_RCU_CPU_STALL_INFO
487
488static void rcu_print_task_stall_begin(struct rcu_node *rnp)
489{
efc151c3 490 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
a858af28
PM
491 rnp->level, rnp->grplo, rnp->grphi);
492}
493
494static void rcu_print_task_stall_end(void)
495{
efc151c3 496 pr_cont("\n");
a858af28
PM
497}
498
499#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
500
501static void rcu_print_task_stall_begin(struct rcu_node *rnp)
502{
503}
504
505static void rcu_print_task_stall_end(void)
506{
507}
508
509#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
510
f41d911f
PM
511/*
512 * Scan the current list of tasks blocked within RCU read-side critical
513 * sections, printing out the tid of each.
514 */
9bc8b558 515static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 516{
f41d911f 517 struct task_struct *t;
9bc8b558 518 int ndetected = 0;
f41d911f 519
27f4d280 520 if (!rcu_preempt_blocked_readers_cgp(rnp))
9bc8b558 521 return 0;
a858af28 522 rcu_print_task_stall_begin(rnp);
12f5f524
PM
523 t = list_entry(rnp->gp_tasks,
524 struct task_struct, rcu_node_entry);
9bc8b558 525 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
efc151c3 526 pr_cont(" P%d", t->pid);
9bc8b558
PM
527 ndetected++;
528 }
a858af28 529 rcu_print_task_stall_end();
9bc8b558 530 return ndetected;
f41d911f
PM
531}
532
b0e165c0
PM
533/*
534 * Check that the list of blocked tasks for the newly completed grace
535 * period is in fact empty. It is a serious bug to complete a grace
536 * period that still has RCU readers blocked! This function must be
537 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
538 * must be held by the caller.
12f5f524
PM
539 *
540 * Also, if there are blocked tasks on the list, they automatically
541 * block the newly created grace period, so set up ->gp_tasks accordingly.
b0e165c0
PM
542 */
543static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
544{
27f4d280 545 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
12f5f524
PM
546 if (!list_empty(&rnp->blkd_tasks))
547 rnp->gp_tasks = rnp->blkd_tasks.next;
28ecd580 548 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
549}
550
33f76148
PM
551#ifdef CONFIG_HOTPLUG_CPU
552
dd5d19ba
PM
553/*
554 * Handle tasklist migration for case in which all CPUs covered by the
555 * specified rcu_node have gone offline. Move them up to the root
556 * rcu_node. The reason for not just moving them to the immediate
557 * parent is to remove the need for rcu_read_unlock_special() to
558 * make more than two attempts to acquire the target rcu_node's lock.
b668c9cf
PM
559 * Returns true if there were tasks blocking the current RCU grace
560 * period.
dd5d19ba 561 *
237c80c5
PM
562 * Returns 1 if there was previously a task blocking the current grace
563 * period on the specified rcu_node structure.
564 *
dd5d19ba
PM
565 * The caller must hold rnp->lock with irqs disabled.
566 */
237c80c5
PM
567static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
568 struct rcu_node *rnp,
569 struct rcu_data *rdp)
dd5d19ba 570{
dd5d19ba
PM
571 struct list_head *lp;
572 struct list_head *lp_root;
d9a3da06 573 int retval = 0;
dd5d19ba 574 struct rcu_node *rnp_root = rcu_get_root(rsp);
12f5f524 575 struct task_struct *t;
dd5d19ba 576
86848966
PM
577 if (rnp == rnp_root) {
578 WARN_ONCE(1, "Last CPU thought to be offlined?");
237c80c5 579 return 0; /* Shouldn't happen: at least one CPU online. */
86848966 580 }
12f5f524
PM
581
582 /* If we are on an internal node, complain bitterly. */
583 WARN_ON_ONCE(rnp != rdp->mynode);
dd5d19ba
PM
584
585 /*
12f5f524
PM
586 * Move tasks up to root rcu_node. Don't try to get fancy for
587 * this corner-case operation -- just put this node's tasks
588 * at the head of the root node's list, and update the root node's
589 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
590 * if non-NULL. This might result in waiting for more tasks than
591 * absolutely necessary, but this is a good performance/complexity
592 * tradeoff.
dd5d19ba 593 */
2036d94a 594 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
d9a3da06
PM
595 retval |= RCU_OFL_TASKS_NORM_GP;
596 if (rcu_preempted_readers_exp(rnp))
597 retval |= RCU_OFL_TASKS_EXP_GP;
12f5f524
PM
598 lp = &rnp->blkd_tasks;
599 lp_root = &rnp_root->blkd_tasks;
600 while (!list_empty(lp)) {
601 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
602 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
603 list_del(&t->rcu_node_entry);
604 t->rcu_blocked_node = rnp_root;
605 list_add(&t->rcu_node_entry, lp_root);
606 if (&t->rcu_node_entry == rnp->gp_tasks)
607 rnp_root->gp_tasks = rnp->gp_tasks;
608 if (&t->rcu_node_entry == rnp->exp_tasks)
609 rnp_root->exp_tasks = rnp->exp_tasks;
27f4d280
PM
610#ifdef CONFIG_RCU_BOOST
611 if (&t->rcu_node_entry == rnp->boost_tasks)
612 rnp_root->boost_tasks = rnp->boost_tasks;
613#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524 614 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
dd5d19ba 615 }
27f4d280 616
1e3fd2b3
PM
617 rnp->gp_tasks = NULL;
618 rnp->exp_tasks = NULL;
27f4d280 619#ifdef CONFIG_RCU_BOOST
1e3fd2b3 620 rnp->boost_tasks = NULL;
5cc900cf
PM
621 /*
622 * In case root is being boosted and leaf was not. Make sure
623 * that we boost the tasks blocking the current grace period
624 * in this case.
625 */
27f4d280
PM
626 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
627 if (rnp_root->boost_tasks != NULL &&
5cc900cf
PM
628 rnp_root->boost_tasks != rnp_root->gp_tasks &&
629 rnp_root->boost_tasks != rnp_root->exp_tasks)
27f4d280
PM
630 rnp_root->boost_tasks = rnp_root->gp_tasks;
631 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
632#endif /* #ifdef CONFIG_RCU_BOOST */
633
237c80c5 634 return retval;
dd5d19ba
PM
635}
636
e5601400
PM
637#endif /* #ifdef CONFIG_HOTPLUG_CPU */
638
f41d911f
PM
639/*
640 * Check for a quiescent state from the current CPU. When a task blocks,
641 * the task is recorded in the corresponding CPU's rcu_node structure,
642 * which is checked elsewhere.
643 *
644 * Caller must disable hard irqs.
645 */
646static void rcu_preempt_check_callbacks(int cpu)
647{
648 struct task_struct *t = current;
649
650 if (t->rcu_read_lock_nesting == 0) {
c3422bea 651 rcu_preempt_qs(cpu);
f41d911f
PM
652 return;
653 }
10f39bb1
PM
654 if (t->rcu_read_lock_nesting > 0 &&
655 per_cpu(rcu_preempt_data, cpu).qs_pending)
c3422bea 656 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
657}
658
a46e0899
PM
659#ifdef CONFIG_RCU_BOOST
660
09223371
SL
661static void rcu_preempt_do_callbacks(void)
662{
663 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
664}
665
a46e0899
PM
666#endif /* #ifdef CONFIG_RCU_BOOST */
667
f41d911f 668/*
6cc68793 669 * Queue a preemptible-RCU callback for invocation after a grace period.
f41d911f
PM
670 */
671void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
672{
3fbfbf7a 673 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
f41d911f
PM
674}
675EXPORT_SYMBOL_GPL(call_rcu);
676
486e2593
PM
677/*
678 * Queue an RCU callback for lazy invocation after a grace period.
679 * This will likely be later named something like "call_rcu_lazy()",
680 * but this change will require some way of tagging the lazy RCU
681 * callbacks in the list of pending callbacks. Until then, this
682 * function may only be called from __kfree_rcu().
683 */
684void kfree_call_rcu(struct rcu_head *head,
685 void (*func)(struct rcu_head *rcu))
686{
3fbfbf7a 687 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
486e2593
PM
688}
689EXPORT_SYMBOL_GPL(kfree_call_rcu);
690
6ebb237b
PM
691/**
692 * synchronize_rcu - wait until a grace period has elapsed.
693 *
694 * Control will return to the caller some time after a full grace
695 * period has elapsed, in other words after all currently executing RCU
77d8485a
PM
696 * read-side critical sections have completed. Note, however, that
697 * upon return from synchronize_rcu(), the caller might well be executing
698 * concurrently with new RCU read-side critical sections that began while
699 * synchronize_rcu() was waiting. RCU read-side critical sections are
700 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
f0a0e6f2
PM
701 *
702 * See the description of synchronize_sched() for more detailed information
703 * on memory ordering guarantees.
6ebb237b
PM
704 */
705void synchronize_rcu(void)
706{
fe15d706
PM
707 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
708 !lock_is_held(&rcu_lock_map) &&
709 !lock_is_held(&rcu_sched_lock_map),
710 "Illegal synchronize_rcu() in RCU read-side critical section");
6ebb237b
PM
711 if (!rcu_scheduler_active)
712 return;
3705b88d
AM
713 if (rcu_expedited)
714 synchronize_rcu_expedited();
715 else
716 wait_rcu_gp(call_rcu);
6ebb237b
PM
717}
718EXPORT_SYMBOL_GPL(synchronize_rcu);
719
d9a3da06 720static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
bcfa57ce 721static unsigned long sync_rcu_preempt_exp_count;
d9a3da06
PM
722static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
723
724/*
725 * Return non-zero if there are any tasks in RCU read-side critical
726 * sections blocking the current preemptible-RCU expedited grace period.
727 * If there is no preemptible-RCU expedited grace period currently in
728 * progress, returns zero unconditionally.
729 */
730static int rcu_preempted_readers_exp(struct rcu_node *rnp)
731{
12f5f524 732 return rnp->exp_tasks != NULL;
d9a3da06
PM
733}
734
735/*
736 * return non-zero if there is no RCU expedited grace period in progress
737 * for the specified rcu_node structure, in other words, if all CPUs and
738 * tasks covered by the specified rcu_node structure have done their bit
739 * for the current expedited grace period. Works only for preemptible
740 * RCU -- other RCU implementation use other means.
741 *
742 * Caller must hold sync_rcu_preempt_exp_mutex.
743 */
744static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
745{
746 return !rcu_preempted_readers_exp(rnp) &&
747 ACCESS_ONCE(rnp->expmask) == 0;
748}
749
750/*
751 * Report the exit from RCU read-side critical section for the last task
752 * that queued itself during or before the current expedited preemptible-RCU
753 * grace period. This event is reported either to the rcu_node structure on
754 * which the task was queued or to one of that rcu_node structure's ancestors,
755 * recursively up the tree. (Calm down, calm down, we do the recursion
756 * iteratively!)
757 *
b40d293e
TG
758 * Most callers will set the "wake" flag, but the task initiating the
759 * expedited grace period need not wake itself.
760 *
d9a3da06
PM
761 * Caller must hold sync_rcu_preempt_exp_mutex.
762 */
b40d293e
TG
763static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
764 bool wake)
d9a3da06
PM
765{
766 unsigned long flags;
767 unsigned long mask;
768
1304afb2 769 raw_spin_lock_irqsave(&rnp->lock, flags);
d9a3da06 770 for (;;) {
131906b0
PM
771 if (!sync_rcu_preempt_exp_done(rnp)) {
772 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06 773 break;
131906b0 774 }
d9a3da06 775 if (rnp->parent == NULL) {
131906b0 776 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b40d293e
TG
777 if (wake)
778 wake_up(&sync_rcu_preempt_exp_wq);
d9a3da06
PM
779 break;
780 }
781 mask = rnp->grpmask;
1304afb2 782 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
d9a3da06 783 rnp = rnp->parent;
1304afb2 784 raw_spin_lock(&rnp->lock); /* irqs already disabled */
d9a3da06
PM
785 rnp->expmask &= ~mask;
786 }
d9a3da06
PM
787}
788
789/*
790 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
791 * grace period for the specified rcu_node structure. If there are no such
792 * tasks, report it up the rcu_node hierarchy.
793 *
7b2e6011
PM
794 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
795 * CPU hotplug operations.
d9a3da06
PM
796 */
797static void
798sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
799{
1217ed1b 800 unsigned long flags;
12f5f524 801 int must_wait = 0;
d9a3da06 802
1217ed1b 803 raw_spin_lock_irqsave(&rnp->lock, flags);
c701d5d9 804 if (list_empty(&rnp->blkd_tasks)) {
1217ed1b 805 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c701d5d9 806 } else {
12f5f524 807 rnp->exp_tasks = rnp->blkd_tasks.next;
1217ed1b 808 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
12f5f524
PM
809 must_wait = 1;
810 }
d9a3da06 811 if (!must_wait)
b40d293e 812 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
d9a3da06
PM
813}
814
236fefaf
PM
815/**
816 * synchronize_rcu_expedited - Brute-force RCU grace period
817 *
818 * Wait for an RCU-preempt grace period, but expedite it. The basic
819 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
820 * the ->blkd_tasks lists and wait for this list to drain. This consumes
821 * significant time on all CPUs and is unfriendly to real-time workloads,
822 * so is thus not recommended for any sort of common-case code.
823 * In fact, if you are using synchronize_rcu_expedited() in a loop,
824 * please restructure your code to batch your updates, and then Use a
825 * single synchronize_rcu() instead.
826 *
827 * Note that it is illegal to call this function while holding any lock
828 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
829 * to call this function from a CPU-hotplug notifier. Failing to observe
830 * these restriction will result in deadlock.
019129d5
PM
831 */
832void synchronize_rcu_expedited(void)
833{
d9a3da06
PM
834 unsigned long flags;
835 struct rcu_node *rnp;
836 struct rcu_state *rsp = &rcu_preempt_state;
bcfa57ce 837 unsigned long snap;
d9a3da06
PM
838 int trycount = 0;
839
840 smp_mb(); /* Caller's modifications seen first by other CPUs. */
841 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
842 smp_mb(); /* Above access cannot bleed into critical section. */
843
1943c89d
PM
844 /*
845 * Block CPU-hotplug operations. This means that any CPU-hotplug
846 * operation that finds an rcu_node structure with tasks in the
847 * process of being boosted will know that all tasks blocking
848 * this expedited grace period will already be in the process of
849 * being boosted. This simplifies the process of moving tasks
850 * from leaf to root rcu_node structures.
851 */
852 get_online_cpus();
853
d9a3da06
PM
854 /*
855 * Acquire lock, falling back to synchronize_rcu() if too many
856 * lock-acquisition failures. Of course, if someone does the
857 * expedited grace period for us, just leave.
858 */
859 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
1943c89d
PM
860 if (ULONG_CMP_LT(snap,
861 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
862 put_online_cpus();
863 goto mb_ret; /* Others did our work for us. */
864 }
c701d5d9 865 if (trycount++ < 10) {
d9a3da06 866 udelay(trycount * num_online_cpus());
c701d5d9 867 } else {
1943c89d 868 put_online_cpus();
3705b88d 869 wait_rcu_gp(call_rcu);
d9a3da06
PM
870 return;
871 }
d9a3da06 872 }
1943c89d
PM
873 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
874 put_online_cpus();
d9a3da06 875 goto unlock_mb_ret; /* Others did our work for us. */
1943c89d 876 }
d9a3da06 877
12f5f524 878 /* force all RCU readers onto ->blkd_tasks lists. */
d9a3da06
PM
879 synchronize_sched_expedited();
880
d9a3da06
PM
881 /* Initialize ->expmask for all non-leaf rcu_node structures. */
882 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
1943c89d 883 raw_spin_lock_irqsave(&rnp->lock, flags);
d9a3da06 884 rnp->expmask = rnp->qsmaskinit;
1943c89d 885 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06
PM
886 }
887
12f5f524 888 /* Snapshot current state of ->blkd_tasks lists. */
d9a3da06
PM
889 rcu_for_each_leaf_node(rsp, rnp)
890 sync_rcu_preempt_exp_init(rsp, rnp);
891 if (NUM_RCU_NODES > 1)
892 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
893
1943c89d 894 put_online_cpus();
d9a3da06 895
12f5f524 896 /* Wait for snapshotted ->blkd_tasks lists to drain. */
d9a3da06
PM
897 rnp = rcu_get_root(rsp);
898 wait_event(sync_rcu_preempt_exp_wq,
899 sync_rcu_preempt_exp_done(rnp));
900
901 /* Clean up and exit. */
902 smp_mb(); /* ensure expedited GP seen before counter increment. */
903 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
904unlock_mb_ret:
905 mutex_unlock(&sync_rcu_preempt_exp_mutex);
906mb_ret:
907 smp_mb(); /* ensure subsequent action seen after grace period. */
019129d5
PM
908}
909EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
910
e74f4c45
PM
911/**
912 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
f0a0e6f2
PM
913 *
914 * Note that this primitive does not necessarily wait for an RCU grace period
915 * to complete. For example, if there are no RCU callbacks queued anywhere
916 * in the system, then rcu_barrier() is within its rights to return
917 * immediately, without waiting for anything, much less an RCU grace period.
e74f4c45
PM
918 */
919void rcu_barrier(void)
920{
037b64ed 921 _rcu_barrier(&rcu_preempt_state);
e74f4c45
PM
922}
923EXPORT_SYMBOL_GPL(rcu_barrier);
924
1eba8f84 925/*
6cc68793 926 * Initialize preemptible RCU's state structures.
1eba8f84
PM
927 */
928static void __init __rcu_init_preempt(void)
929{
394f99a9 930 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
1eba8f84
PM
931}
932
2439b696
PM
933/*
934 * Check for a task exiting while in a preemptible-RCU read-side
935 * critical section, clean up if so. No need to issue warnings,
936 * as debug_check_no_locks_held() already does this if lockdep
937 * is enabled.
938 */
939void exit_rcu(void)
940{
941 struct task_struct *t = current;
942
943 if (likely(list_empty(&current->rcu_node_entry)))
944 return;
945 t->rcu_read_lock_nesting = 1;
946 barrier();
947 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
948 __rcu_read_unlock();
949}
950
f41d911f
PM
951#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
952
27f4d280
PM
953static struct rcu_state *rcu_state = &rcu_sched_state;
954
f41d911f
PM
955/*
956 * Tell them what RCU they are running.
957 */
0e0fc1c2 958static void __init rcu_bootup_announce(void)
f41d911f 959{
efc151c3 960 pr_info("Hierarchical RCU implementation.\n");
26845c28 961 rcu_bootup_announce_oddness();
f41d911f
PM
962}
963
964/*
965 * Return the number of RCU batches processed thus far for debug & stats.
966 */
967long rcu_batches_completed(void)
968{
969 return rcu_batches_completed_sched();
970}
971EXPORT_SYMBOL_GPL(rcu_batches_completed);
972
bf66f18e
PM
973/*
974 * Force a quiescent state for RCU, which, because there is no preemptible
975 * RCU, becomes the same as rcu-sched.
976 */
977void rcu_force_quiescent_state(void)
978{
979 rcu_sched_force_quiescent_state();
980}
981EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
982
cba6d0d6
PM
983/*
984 * Because preemptible RCU does not exist, we never have to check for
985 * CPUs being in quiescent states.
986 */
987static void rcu_preempt_note_context_switch(int cpu)
988{
989}
990
fc2219d4 991/*
6cc68793 992 * Because preemptible RCU does not exist, there are never any preempted
fc2219d4
PM
993 * RCU readers.
994 */
27f4d280 995static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4
PM
996{
997 return 0;
998}
999
b668c9cf
PM
1000#ifdef CONFIG_HOTPLUG_CPU
1001
1002/* Because preemptible RCU does not exist, no quieting of tasks. */
d3f6bad3 1003static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf 1004{
1304afb2 1005 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
1006}
1007
1008#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1009
1ed509a2 1010/*
6cc68793 1011 * Because preemptible RCU does not exist, we never have to check for
1ed509a2
PM
1012 * tasks blocked within RCU read-side critical sections.
1013 */
1014static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1015{
1016}
1017
f41d911f 1018/*
6cc68793 1019 * Because preemptible RCU does not exist, we never have to check for
f41d911f
PM
1020 * tasks blocked within RCU read-side critical sections.
1021 */
9bc8b558 1022static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 1023{
9bc8b558 1024 return 0;
f41d911f
PM
1025}
1026
b0e165c0 1027/*
6cc68793 1028 * Because there is no preemptible RCU, there can be no readers blocked,
49e29126
PM
1029 * so there is no need to check for blocked tasks. So check only for
1030 * bogus qsmask values.
b0e165c0
PM
1031 */
1032static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1033{
49e29126 1034 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
1035}
1036
33f76148
PM
1037#ifdef CONFIG_HOTPLUG_CPU
1038
dd5d19ba 1039/*
6cc68793 1040 * Because preemptible RCU does not exist, it never needs to migrate
237c80c5
PM
1041 * tasks that were blocked within RCU read-side critical sections, and
1042 * such non-existent tasks cannot possibly have been blocking the current
1043 * grace period.
dd5d19ba 1044 */
237c80c5
PM
1045static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1046 struct rcu_node *rnp,
1047 struct rcu_data *rdp)
dd5d19ba 1048{
237c80c5 1049 return 0;
dd5d19ba
PM
1050}
1051
e5601400
PM
1052#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1053
f41d911f 1054/*
6cc68793 1055 * Because preemptible RCU does not exist, it never has any callbacks
f41d911f
PM
1056 * to check.
1057 */
1eba8f84 1058static void rcu_preempt_check_callbacks(int cpu)
f41d911f
PM
1059{
1060}
1061
486e2593
PM
1062/*
1063 * Queue an RCU callback for lazy invocation after a grace period.
1064 * This will likely be later named something like "call_rcu_lazy()",
1065 * but this change will require some way of tagging the lazy RCU
1066 * callbacks in the list of pending callbacks. Until then, this
1067 * function may only be called from __kfree_rcu().
1068 *
1069 * Because there is no preemptible RCU, we use RCU-sched instead.
1070 */
1071void kfree_call_rcu(struct rcu_head *head,
1072 void (*func)(struct rcu_head *rcu))
1073{
3fbfbf7a 1074 __call_rcu(head, func, &rcu_sched_state, -1, 1);
486e2593
PM
1075}
1076EXPORT_SYMBOL_GPL(kfree_call_rcu);
1077
019129d5
PM
1078/*
1079 * Wait for an rcu-preempt grace period, but make it happen quickly.
6cc68793 1080 * But because preemptible RCU does not exist, map to rcu-sched.
019129d5
PM
1081 */
1082void synchronize_rcu_expedited(void)
1083{
1084 synchronize_sched_expedited();
1085}
1086EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1087
d9a3da06
PM
1088#ifdef CONFIG_HOTPLUG_CPU
1089
1090/*
6cc68793 1091 * Because preemptible RCU does not exist, there is never any need to
d9a3da06
PM
1092 * report on tasks preempted in RCU read-side critical sections during
1093 * expedited RCU grace periods.
1094 */
b40d293e
TG
1095static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1096 bool wake)
d9a3da06 1097{
d9a3da06
PM
1098}
1099
1100#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1101
e74f4c45 1102/*
6cc68793 1103 * Because preemptible RCU does not exist, rcu_barrier() is just
e74f4c45
PM
1104 * another name for rcu_barrier_sched().
1105 */
1106void rcu_barrier(void)
1107{
1108 rcu_barrier_sched();
1109}
1110EXPORT_SYMBOL_GPL(rcu_barrier);
1111
1eba8f84 1112/*
6cc68793 1113 * Because preemptible RCU does not exist, it need not be initialized.
1eba8f84
PM
1114 */
1115static void __init __rcu_init_preempt(void)
1116{
1117}
1118
2439b696
PM
1119/*
1120 * Because preemptible RCU does not exist, tasks cannot possibly exit
1121 * while in preemptible RCU read-side critical sections.
1122 */
1123void exit_rcu(void)
1124{
1125}
1126
f41d911f 1127#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
8bd93a2c 1128
27f4d280
PM
1129#ifdef CONFIG_RCU_BOOST
1130
1131#include "rtmutex_common.h"
1132
0ea1f2eb
PM
1133#ifdef CONFIG_RCU_TRACE
1134
1135static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1136{
1137 if (list_empty(&rnp->blkd_tasks))
1138 rnp->n_balk_blkd_tasks++;
1139 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1140 rnp->n_balk_exp_gp_tasks++;
1141 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1142 rnp->n_balk_boost_tasks++;
1143 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1144 rnp->n_balk_notblocked++;
1145 else if (rnp->gp_tasks != NULL &&
a9f4793d 1146 ULONG_CMP_LT(jiffies, rnp->boost_time))
0ea1f2eb
PM
1147 rnp->n_balk_notyet++;
1148 else
1149 rnp->n_balk_nos++;
1150}
1151
1152#else /* #ifdef CONFIG_RCU_TRACE */
1153
1154static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1155{
1156}
1157
1158#endif /* #else #ifdef CONFIG_RCU_TRACE */
1159
5d01bbd1
TG
1160static void rcu_wake_cond(struct task_struct *t, int status)
1161{
1162 /*
1163 * If the thread is yielding, only wake it when this
1164 * is invoked from idle
1165 */
1166 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1167 wake_up_process(t);
1168}
1169
27f4d280
PM
1170/*
1171 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1172 * or ->boost_tasks, advancing the pointer to the next task in the
1173 * ->blkd_tasks list.
1174 *
1175 * Note that irqs must be enabled: boosting the task can block.
1176 * Returns 1 if there are more tasks needing to be boosted.
1177 */
1178static int rcu_boost(struct rcu_node *rnp)
1179{
1180 unsigned long flags;
1181 struct rt_mutex mtx;
1182 struct task_struct *t;
1183 struct list_head *tb;
1184
1185 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1186 return 0; /* Nothing left to boost. */
1187
1188 raw_spin_lock_irqsave(&rnp->lock, flags);
1189
1190 /*
1191 * Recheck under the lock: all tasks in need of boosting
1192 * might exit their RCU read-side critical sections on their own.
1193 */
1194 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1196 return 0;
1197 }
1198
1199 /*
1200 * Preferentially boost tasks blocking expedited grace periods.
1201 * This cannot starve the normal grace periods because a second
1202 * expedited grace period must boost all blocked tasks, including
1203 * those blocking the pre-existing normal grace period.
1204 */
0ea1f2eb 1205 if (rnp->exp_tasks != NULL) {
27f4d280 1206 tb = rnp->exp_tasks;
0ea1f2eb
PM
1207 rnp->n_exp_boosts++;
1208 } else {
27f4d280 1209 tb = rnp->boost_tasks;
0ea1f2eb
PM
1210 rnp->n_normal_boosts++;
1211 }
1212 rnp->n_tasks_boosted++;
27f4d280
PM
1213
1214 /*
1215 * We boost task t by manufacturing an rt_mutex that appears to
1216 * be held by task t. We leave a pointer to that rt_mutex where
1217 * task t can find it, and task t will release the mutex when it
1218 * exits its outermost RCU read-side critical section. Then
1219 * simply acquiring this artificial rt_mutex will boost task
1220 * t's priority. (Thanks to tglx for suggesting this approach!)
1221 *
1222 * Note that task t must acquire rnp->lock to remove itself from
1223 * the ->blkd_tasks list, which it will do from exit() if from
1224 * nowhere else. We therefore are guaranteed that task t will
1225 * stay around at least until we drop rnp->lock. Note that
1226 * rnp->lock also resolves races between our priority boosting
1227 * and task t's exiting its outermost RCU read-side critical
1228 * section.
1229 */
1230 t = container_of(tb, struct task_struct, rcu_node_entry);
1231 rt_mutex_init_proxy_locked(&mtx, t);
1232 t->rcu_boost_mutex = &mtx;
27f4d280
PM
1233 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1234 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1235 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1236
4f89b336
PM
1237 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1238 ACCESS_ONCE(rnp->boost_tasks) != NULL;
27f4d280
PM
1239}
1240
27f4d280
PM
1241/*
1242 * Priority-boosting kthread. One per leaf rcu_node and one for the
1243 * root rcu_node.
1244 */
1245static int rcu_boost_kthread(void *arg)
1246{
1247 struct rcu_node *rnp = (struct rcu_node *)arg;
1248 int spincnt = 0;
1249 int more2boost;
1250
f7f7bac9 1251 trace_rcu_utilization(TPS("Start boost kthread@init"));
27f4d280 1252 for (;;) {
d71df90e 1253 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
f7f7bac9 1254 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
08bca60a 1255 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
f7f7bac9 1256 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
d71df90e 1257 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
27f4d280
PM
1258 more2boost = rcu_boost(rnp);
1259 if (more2boost)
1260 spincnt++;
1261 else
1262 spincnt = 0;
1263 if (spincnt > 10) {
5d01bbd1 1264 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
f7f7bac9 1265 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
5d01bbd1 1266 schedule_timeout_interruptible(2);
f7f7bac9 1267 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
27f4d280
PM
1268 spincnt = 0;
1269 }
1270 }
1217ed1b 1271 /* NOTREACHED */
f7f7bac9 1272 trace_rcu_utilization(TPS("End boost kthread@notreached"));
27f4d280
PM
1273 return 0;
1274}
1275
1276/*
1277 * Check to see if it is time to start boosting RCU readers that are
1278 * blocking the current grace period, and, if so, tell the per-rcu_node
1279 * kthread to start boosting them. If there is an expedited grace
1280 * period in progress, it is always time to boost.
1281 *
b065a853
PM
1282 * The caller must hold rnp->lock, which this function releases.
1283 * The ->boost_kthread_task is immortal, so we don't need to worry
1284 * about it going away.
27f4d280 1285 */
1217ed1b 1286static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280
PM
1287{
1288 struct task_struct *t;
1289
0ea1f2eb
PM
1290 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1291 rnp->n_balk_exp_gp_tasks++;
1217ed1b 1292 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280 1293 return;
0ea1f2eb 1294 }
27f4d280
PM
1295 if (rnp->exp_tasks != NULL ||
1296 (rnp->gp_tasks != NULL &&
1297 rnp->boost_tasks == NULL &&
1298 rnp->qsmask == 0 &&
1299 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1300 if (rnp->exp_tasks == NULL)
1301 rnp->boost_tasks = rnp->gp_tasks;
1217ed1b 1302 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280 1303 t = rnp->boost_kthread_task;
5d01bbd1
TG
1304 if (t)
1305 rcu_wake_cond(t, rnp->boost_kthread_status);
1217ed1b 1306 } else {
0ea1f2eb 1307 rcu_initiate_boost_trace(rnp);
1217ed1b
PM
1308 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1309 }
27f4d280
PM
1310}
1311
a46e0899
PM
1312/*
1313 * Wake up the per-CPU kthread to invoke RCU callbacks.
1314 */
1315static void invoke_rcu_callbacks_kthread(void)
1316{
1317 unsigned long flags;
1318
1319 local_irq_save(flags);
1320 __this_cpu_write(rcu_cpu_has_work, 1);
1eb52121 1321 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
5d01bbd1
TG
1322 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1323 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1324 __this_cpu_read(rcu_cpu_kthread_status));
1325 }
a46e0899
PM
1326 local_irq_restore(flags);
1327}
1328
dff1672d
PM
1329/*
1330 * Is the current CPU running the RCU-callbacks kthread?
1331 * Caller must have preemption disabled.
1332 */
1333static bool rcu_is_callbacks_kthread(void)
1334{
1335 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1336}
1337
27f4d280
PM
1338#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1339
1340/*
1341 * Do priority-boost accounting for the start of a new grace period.
1342 */
1343static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1344{
1345 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1346}
1347
27f4d280
PM
1348/*
1349 * Create an RCU-boost kthread for the specified node if one does not
1350 * already exist. We only create this kthread for preemptible RCU.
1351 * Returns zero if all is well, a negated errno otherwise.
1352 */
49fb4c62 1353static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
5d01bbd1 1354 struct rcu_node *rnp)
27f4d280 1355{
5d01bbd1 1356 int rnp_index = rnp - &rsp->node[0];
27f4d280
PM
1357 unsigned long flags;
1358 struct sched_param sp;
1359 struct task_struct *t;
1360
1361 if (&rcu_preempt_state != rsp)
1362 return 0;
5d01bbd1
TG
1363
1364 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1365 return 0;
1366
a46e0899 1367 rsp->boost = 1;
27f4d280
PM
1368 if (rnp->boost_kthread_task != NULL)
1369 return 0;
1370 t = kthread_create(rcu_boost_kthread, (void *)rnp,
5b61b0ba 1371 "rcub/%d", rnp_index);
27f4d280
PM
1372 if (IS_ERR(t))
1373 return PTR_ERR(t);
1374 raw_spin_lock_irqsave(&rnp->lock, flags);
1375 rnp->boost_kthread_task = t;
1376 raw_spin_unlock_irqrestore(&rnp->lock, flags);
5b61b0ba 1377 sp.sched_priority = RCU_BOOST_PRIO;
27f4d280 1378 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
9a432736 1379 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
27f4d280
PM
1380 return 0;
1381}
1382
f8b7fc6b
PM
1383static void rcu_kthread_do_work(void)
1384{
1385 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1386 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1387 rcu_preempt_do_callbacks();
1388}
1389
62ab7072 1390static void rcu_cpu_kthread_setup(unsigned int cpu)
f8b7fc6b 1391{
f8b7fc6b 1392 struct sched_param sp;
f8b7fc6b 1393
62ab7072
PM
1394 sp.sched_priority = RCU_KTHREAD_PRIO;
1395 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
f8b7fc6b
PM
1396}
1397
62ab7072 1398static void rcu_cpu_kthread_park(unsigned int cpu)
f8b7fc6b 1399{
62ab7072 1400 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
f8b7fc6b
PM
1401}
1402
62ab7072 1403static int rcu_cpu_kthread_should_run(unsigned int cpu)
f8b7fc6b 1404{
62ab7072 1405 return __get_cpu_var(rcu_cpu_has_work);
f8b7fc6b
PM
1406}
1407
1408/*
1409 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
e0f23060
PM
1410 * RCU softirq used in flavors and configurations of RCU that do not
1411 * support RCU priority boosting.
f8b7fc6b 1412 */
62ab7072 1413static void rcu_cpu_kthread(unsigned int cpu)
f8b7fc6b 1414{
62ab7072
PM
1415 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1416 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1417 int spincnt;
f8b7fc6b 1418
62ab7072 1419 for (spincnt = 0; spincnt < 10; spincnt++) {
f7f7bac9 1420 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
f8b7fc6b 1421 local_bh_disable();
f8b7fc6b 1422 *statusp = RCU_KTHREAD_RUNNING;
62ab7072
PM
1423 this_cpu_inc(rcu_cpu_kthread_loops);
1424 local_irq_disable();
f8b7fc6b
PM
1425 work = *workp;
1426 *workp = 0;
62ab7072 1427 local_irq_enable();
f8b7fc6b
PM
1428 if (work)
1429 rcu_kthread_do_work();
1430 local_bh_enable();
62ab7072 1431 if (*workp == 0) {
f7f7bac9 1432 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
62ab7072
PM
1433 *statusp = RCU_KTHREAD_WAITING;
1434 return;
f8b7fc6b
PM
1435 }
1436 }
62ab7072 1437 *statusp = RCU_KTHREAD_YIELDING;
f7f7bac9 1438 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
62ab7072 1439 schedule_timeout_interruptible(2);
f7f7bac9 1440 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
62ab7072 1441 *statusp = RCU_KTHREAD_WAITING;
f8b7fc6b
PM
1442}
1443
1444/*
1445 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1446 * served by the rcu_node in question. The CPU hotplug lock is still
1447 * held, so the value of rnp->qsmaskinit will be stable.
1448 *
1449 * We don't include outgoingcpu in the affinity set, use -1 if there is
1450 * no outgoing CPU. If there are no CPUs left in the affinity set,
1451 * this function allows the kthread to execute on any CPU.
1452 */
5d01bbd1 1453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b 1454{
5d01bbd1
TG
1455 struct task_struct *t = rnp->boost_kthread_task;
1456 unsigned long mask = rnp->qsmaskinit;
f8b7fc6b
PM
1457 cpumask_var_t cm;
1458 int cpu;
f8b7fc6b 1459
5d01bbd1 1460 if (!t)
f8b7fc6b 1461 return;
5d01bbd1 1462 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
f8b7fc6b 1463 return;
f8b7fc6b
PM
1464 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1465 if ((mask & 0x1) && cpu != outgoingcpu)
1466 cpumask_set_cpu(cpu, cm);
1467 if (cpumask_weight(cm) == 0) {
1468 cpumask_setall(cm);
1469 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1470 cpumask_clear_cpu(cpu, cm);
1471 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1472 }
5d01bbd1 1473 set_cpus_allowed_ptr(t, cm);
f8b7fc6b
PM
1474 free_cpumask_var(cm);
1475}
1476
62ab7072
PM
1477static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1478 .store = &rcu_cpu_kthread_task,
1479 .thread_should_run = rcu_cpu_kthread_should_run,
1480 .thread_fn = rcu_cpu_kthread,
1481 .thread_comm = "rcuc/%u",
1482 .setup = rcu_cpu_kthread_setup,
1483 .park = rcu_cpu_kthread_park,
1484};
f8b7fc6b
PM
1485
1486/*
1487 * Spawn all kthreads -- called as soon as the scheduler is running.
1488 */
1489static int __init rcu_spawn_kthreads(void)
1490{
f8b7fc6b 1491 struct rcu_node *rnp;
5d01bbd1 1492 int cpu;
f8b7fc6b 1493
b0d30417 1494 rcu_scheduler_fully_active = 1;
62ab7072 1495 for_each_possible_cpu(cpu)
f8b7fc6b 1496 per_cpu(rcu_cpu_has_work, cpu) = 0;
62ab7072 1497 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
f8b7fc6b 1498 rnp = rcu_get_root(rcu_state);
5d01bbd1 1499 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1500 if (NUM_RCU_NODES > 1) {
1501 rcu_for_each_leaf_node(rcu_state, rnp)
5d01bbd1 1502 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1503 }
1504 return 0;
1505}
1506early_initcall(rcu_spawn_kthreads);
1507
49fb4c62 1508static void rcu_prepare_kthreads(int cpu)
f8b7fc6b
PM
1509{
1510 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1511 struct rcu_node *rnp = rdp->mynode;
1512
1513 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
62ab7072 1514 if (rcu_scheduler_fully_active)
5d01bbd1 1515 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1516}
1517
27f4d280
PM
1518#else /* #ifdef CONFIG_RCU_BOOST */
1519
1217ed1b 1520static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280 1521{
1217ed1b 1522 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280
PM
1523}
1524
a46e0899 1525static void invoke_rcu_callbacks_kthread(void)
27f4d280 1526{
a46e0899 1527 WARN_ON_ONCE(1);
27f4d280
PM
1528}
1529
dff1672d
PM
1530static bool rcu_is_callbacks_kthread(void)
1531{
1532 return false;
1533}
1534
27f4d280
PM
1535static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1536{
1537}
1538
5d01bbd1 1539static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b
PM
1540{
1541}
1542
b0d30417
PM
1543static int __init rcu_scheduler_really_started(void)
1544{
1545 rcu_scheduler_fully_active = 1;
1546 return 0;
1547}
1548early_initcall(rcu_scheduler_really_started);
1549
49fb4c62 1550static void rcu_prepare_kthreads(int cpu)
f8b7fc6b
PM
1551{
1552}
1553
27f4d280
PM
1554#endif /* #else #ifdef CONFIG_RCU_BOOST */
1555
8bd93a2c
PM
1556#if !defined(CONFIG_RCU_FAST_NO_HZ)
1557
1558/*
1559 * Check to see if any future RCU-related work will need to be done
1560 * by the current CPU, even if none need be done immediately, returning
1561 * 1 if so. This function is part of the RCU implementation; it is -not-
1562 * an exported member of the RCU API.
1563 *
7cb92499
PM
1564 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1565 * any flavor of RCU.
8bd93a2c 1566 */
aa9b1630 1567int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
8bd93a2c 1568{
aa9b1630 1569 *delta_jiffies = ULONG_MAX;
c0f4dfd4 1570 return rcu_cpu_has_callbacks(cpu, NULL);
7cb92499
PM
1571}
1572
1573/*
1574 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1575 * after it.
1576 */
1577static void rcu_cleanup_after_idle(int cpu)
1578{
1579}
1580
aea1b35e 1581/*
a858af28 1582 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
aea1b35e
PM
1583 * is nothing.
1584 */
1585static void rcu_prepare_for_idle(int cpu)
1586{
1587}
1588
c57afe80
PM
1589/*
1590 * Don't bother keeping a running count of the number of RCU callbacks
1591 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1592 */
1593static void rcu_idle_count_callbacks_posted(void)
1594{
1595}
1596
8bd93a2c
PM
1597#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1598
f23f7fa1
PM
1599/*
1600 * This code is invoked when a CPU goes idle, at which point we want
1601 * to have the CPU do everything required for RCU so that it can enter
1602 * the energy-efficient dyntick-idle mode. This is handled by a
1603 * state machine implemented by rcu_prepare_for_idle() below.
1604 *
1605 * The following three proprocessor symbols control this state machine:
1606 *
f23f7fa1
PM
1607 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1608 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1609 * is sized to be roughly one RCU grace period. Those energy-efficiency
1610 * benchmarkers who might otherwise be tempted to set this to a large
1611 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1612 * system. And if you are -that- concerned about energy efficiency,
1613 * just power the system down and be done with it!
778d250a
PM
1614 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1615 * permitted to sleep in dyntick-idle mode with only lazy RCU
1616 * callbacks pending. Setting this too high can OOM your system.
f23f7fa1
PM
1617 *
1618 * The values below work well in practice. If future workloads require
1619 * adjustment, they can be converted into kernel config parameters, though
1620 * making the state machine smarter might be a better option.
1621 */
e84c48ae 1622#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
778d250a 1623#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
f23f7fa1 1624
5e44ce35
PM
1625static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1626module_param(rcu_idle_gp_delay, int, 0644);
1627static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1628module_param(rcu_idle_lazy_gp_delay, int, 0644);
486e2593 1629
9d2ad243 1630extern int tick_nohz_enabled;
486e2593
PM
1631
1632/*
c0f4dfd4
PM
1633 * Try to advance callbacks for all flavors of RCU on the current CPU.
1634 * Afterwards, if there are any callbacks ready for immediate invocation,
1635 * return true.
486e2593 1636 */
c0f4dfd4 1637static bool rcu_try_advance_all_cbs(void)
486e2593 1638{
c0f4dfd4
PM
1639 bool cbs_ready = false;
1640 struct rcu_data *rdp;
1641 struct rcu_node *rnp;
1642 struct rcu_state *rsp;
486e2593 1643
c0f4dfd4
PM
1644 for_each_rcu_flavor(rsp) {
1645 rdp = this_cpu_ptr(rsp->rda);
1646 rnp = rdp->mynode;
486e2593 1647
c0f4dfd4
PM
1648 /*
1649 * Don't bother checking unless a grace period has
1650 * completed since we last checked and there are
1651 * callbacks not yet ready to invoke.
1652 */
1653 if (rdp->completed != rnp->completed &&
1654 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
470716fc 1655 note_gp_changes(rsp, rdp);
486e2593 1656
c0f4dfd4
PM
1657 if (cpu_has_callbacks_ready_to_invoke(rdp))
1658 cbs_ready = true;
1659 }
1660 return cbs_ready;
486e2593
PM
1661}
1662
aa9b1630 1663/*
c0f4dfd4
PM
1664 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1665 * to invoke. If the CPU has callbacks, try to advance them. Tell the
1666 * caller to set the timeout based on whether or not there are non-lazy
1667 * callbacks.
aa9b1630 1668 *
c0f4dfd4 1669 * The caller must have disabled interrupts.
aa9b1630 1670 */
c0f4dfd4 1671int rcu_needs_cpu(int cpu, unsigned long *dj)
aa9b1630
PM
1672{
1673 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1674
c0f4dfd4
PM
1675 /* Snapshot to detect later posting of non-lazy callback. */
1676 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1677
aa9b1630 1678 /* If no callbacks, RCU doesn't need the CPU. */
c0f4dfd4
PM
1679 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1680 *dj = ULONG_MAX;
aa9b1630
PM
1681 return 0;
1682 }
c0f4dfd4
PM
1683
1684 /* Attempt to advance callbacks. */
1685 if (rcu_try_advance_all_cbs()) {
1686 /* Some ready to invoke, so initiate later invocation. */
1687 invoke_rcu_core();
aa9b1630
PM
1688 return 1;
1689 }
c0f4dfd4
PM
1690 rdtp->last_accelerate = jiffies;
1691
1692 /* Request timer delay depending on laziness, and round. */
6faf7283 1693 if (!rdtp->all_lazy) {
c0f4dfd4
PM
1694 *dj = round_up(rcu_idle_gp_delay + jiffies,
1695 rcu_idle_gp_delay) - jiffies;
e84c48ae 1696 } else {
c0f4dfd4 1697 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
e84c48ae 1698 }
aa9b1630
PM
1699 return 0;
1700}
1701
21e52e15 1702/*
c0f4dfd4
PM
1703 * Prepare a CPU for idle from an RCU perspective. The first major task
1704 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1705 * The second major task is to check to see if a non-lazy callback has
1706 * arrived at a CPU that previously had only lazy callbacks. The third
1707 * major task is to accelerate (that is, assign grace-period numbers to)
1708 * any recently arrived callbacks.
aea1b35e
PM
1709 *
1710 * The caller must have disabled interrupts.
8bd93a2c 1711 */
aea1b35e 1712static void rcu_prepare_for_idle(int cpu)
8bd93a2c 1713{
c0f4dfd4 1714 struct rcu_data *rdp;
5955f7ee 1715 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
c0f4dfd4
PM
1716 struct rcu_node *rnp;
1717 struct rcu_state *rsp;
9d2ad243
PM
1718 int tne;
1719
1720 /* Handle nohz enablement switches conservatively. */
1721 tne = ACCESS_ONCE(tick_nohz_enabled);
1722 if (tne != rdtp->tick_nohz_enabled_snap) {
c0f4dfd4 1723 if (rcu_cpu_has_callbacks(cpu, NULL))
9d2ad243
PM
1724 invoke_rcu_core(); /* force nohz to see update. */
1725 rdtp->tick_nohz_enabled_snap = tne;
1726 return;
1727 }
1728 if (!tne)
1729 return;
f511fc62 1730
c0f4dfd4 1731 /* If this is a no-CBs CPU, no callbacks, just return. */
534c97b0 1732 if (rcu_is_nocb_cpu(cpu))
9a0c6fef 1733 return;
9a0c6fef 1734
c57afe80 1735 /*
c0f4dfd4
PM
1736 * If a non-lazy callback arrived at a CPU having only lazy
1737 * callbacks, invoke RCU core for the side-effect of recalculating
1738 * idle duration on re-entry to idle.
c57afe80 1739 */
c0f4dfd4
PM
1740 if (rdtp->all_lazy &&
1741 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1742 invoke_rcu_core();
c57afe80
PM
1743 return;
1744 }
c57afe80 1745
3084f2f8 1746 /*
c0f4dfd4
PM
1747 * If we have not yet accelerated this jiffy, accelerate all
1748 * callbacks on this CPU.
3084f2f8 1749 */
c0f4dfd4 1750 if (rdtp->last_accelerate == jiffies)
aea1b35e 1751 return;
c0f4dfd4
PM
1752 rdtp->last_accelerate = jiffies;
1753 for_each_rcu_flavor(rsp) {
1754 rdp = per_cpu_ptr(rsp->rda, cpu);
1755 if (!*rdp->nxttail[RCU_DONE_TAIL])
1756 continue;
1757 rnp = rdp->mynode;
1758 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1759 rcu_accelerate_cbs(rsp, rnp, rdp);
1760 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77e38ed3 1761 }
c0f4dfd4 1762}
3084f2f8 1763
c0f4dfd4
PM
1764/*
1765 * Clean up for exit from idle. Attempt to advance callbacks based on
1766 * any grace periods that elapsed while the CPU was idle, and if any
1767 * callbacks are now ready to invoke, initiate invocation.
1768 */
1769static void rcu_cleanup_after_idle(int cpu)
1770{
a47cd880 1771
534c97b0 1772 if (rcu_is_nocb_cpu(cpu))
aea1b35e 1773 return;
7a497c96
PM
1774 if (rcu_try_advance_all_cbs())
1775 invoke_rcu_core();
8bd93a2c
PM
1776}
1777
c57afe80 1778/*
98248a0e
PM
1779 * Keep a running count of the number of non-lazy callbacks posted
1780 * on this CPU. This running counter (which is never decremented) allows
1781 * rcu_prepare_for_idle() to detect when something out of the idle loop
1782 * posts a callback, even if an equal number of callbacks are invoked.
1783 * Of course, callbacks should only be posted from within a trace event
1784 * designed to be called from idle or from within RCU_NONIDLE().
c57afe80
PM
1785 */
1786static void rcu_idle_count_callbacks_posted(void)
1787{
5955f7ee 1788 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
c57afe80
PM
1789}
1790
b626c1b6
PM
1791/*
1792 * Data for flushing lazy RCU callbacks at OOM time.
1793 */
1794static atomic_t oom_callback_count;
1795static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1796
1797/*
1798 * RCU OOM callback -- decrement the outstanding count and deliver the
1799 * wake-up if we are the last one.
1800 */
1801static void rcu_oom_callback(struct rcu_head *rhp)
1802{
1803 if (atomic_dec_and_test(&oom_callback_count))
1804 wake_up(&oom_callback_wq);
1805}
1806
1807/*
1808 * Post an rcu_oom_notify callback on the current CPU if it has at
1809 * least one lazy callback. This will unnecessarily post callbacks
1810 * to CPUs that already have a non-lazy callback at the end of their
1811 * callback list, but this is an infrequent operation, so accept some
1812 * extra overhead to keep things simple.
1813 */
1814static void rcu_oom_notify_cpu(void *unused)
1815{
1816 struct rcu_state *rsp;
1817 struct rcu_data *rdp;
1818
1819 for_each_rcu_flavor(rsp) {
1820 rdp = __this_cpu_ptr(rsp->rda);
1821 if (rdp->qlen_lazy != 0) {
1822 atomic_inc(&oom_callback_count);
1823 rsp->call(&rdp->oom_head, rcu_oom_callback);
1824 }
1825 }
1826}
1827
1828/*
1829 * If low on memory, ensure that each CPU has a non-lazy callback.
1830 * This will wake up CPUs that have only lazy callbacks, in turn
1831 * ensuring that they free up the corresponding memory in a timely manner.
1832 * Because an uncertain amount of memory will be freed in some uncertain
1833 * timeframe, we do not claim to have freed anything.
1834 */
1835static int rcu_oom_notify(struct notifier_block *self,
1836 unsigned long notused, void *nfreed)
1837{
1838 int cpu;
1839
1840 /* Wait for callbacks from earlier instance to complete. */
1841 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1842
1843 /*
1844 * Prevent premature wakeup: ensure that all increments happen
1845 * before there is a chance of the counter reaching zero.
1846 */
1847 atomic_set(&oom_callback_count, 1);
1848
1849 get_online_cpus();
1850 for_each_online_cpu(cpu) {
1851 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1852 cond_resched();
1853 }
1854 put_online_cpus();
1855
1856 /* Unconditionally decrement: no need to wake ourselves up. */
1857 atomic_dec(&oom_callback_count);
1858
1859 return NOTIFY_OK;
1860}
1861
1862static struct notifier_block rcu_oom_nb = {
1863 .notifier_call = rcu_oom_notify
1864};
1865
1866static int __init rcu_register_oom_notifier(void)
1867{
1868 register_oom_notifier(&rcu_oom_nb);
1869 return 0;
1870}
1871early_initcall(rcu_register_oom_notifier);
1872
8bd93a2c 1873#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
a858af28
PM
1874
1875#ifdef CONFIG_RCU_CPU_STALL_INFO
1876
1877#ifdef CONFIG_RCU_FAST_NO_HZ
1878
1879static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1880{
5955f7ee 1881 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
c0f4dfd4 1882 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
a858af28 1883
c0f4dfd4
PM
1884 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1885 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1886 ulong2long(nlpd),
1887 rdtp->all_lazy ? 'L' : '.',
1888 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
a858af28
PM
1889}
1890
1891#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1892
1893static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1894{
1c17e4d4 1895 *cp = '\0';
a858af28
PM
1896}
1897
1898#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1899
1900/* Initiate the stall-info list. */
1901static void print_cpu_stall_info_begin(void)
1902{
efc151c3 1903 pr_cont("\n");
a858af28
PM
1904}
1905
1906/*
1907 * Print out diagnostic information for the specified stalled CPU.
1908 *
1909 * If the specified CPU is aware of the current RCU grace period
1910 * (flavor specified by rsp), then print the number of scheduling
1911 * clock interrupts the CPU has taken during the time that it has
1912 * been aware. Otherwise, print the number of RCU grace periods
1913 * that this CPU is ignorant of, for example, "1" if the CPU was
1914 * aware of the previous grace period.
1915 *
1916 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1917 */
1918static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1919{
1920 char fast_no_hz[72];
1921 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1922 struct rcu_dynticks *rdtp = rdp->dynticks;
1923 char *ticks_title;
1924 unsigned long ticks_value;
1925
1926 if (rsp->gpnum == rdp->gpnum) {
1927 ticks_title = "ticks this GP";
1928 ticks_value = rdp->ticks_this_gp;
1929 } else {
1930 ticks_title = "GPs behind";
1931 ticks_value = rsp->gpnum - rdp->gpnum;
1932 }
1933 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
efc151c3 1934 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
a858af28
PM
1935 cpu, ticks_value, ticks_title,
1936 atomic_read(&rdtp->dynticks) & 0xfff,
1937 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
6231069b 1938 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
a858af28
PM
1939 fast_no_hz);
1940}
1941
1942/* Terminate the stall-info list. */
1943static void print_cpu_stall_info_end(void)
1944{
efc151c3 1945 pr_err("\t");
a858af28
PM
1946}
1947
1948/* Zero ->ticks_this_gp for all flavors of RCU. */
1949static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1950{
1951 rdp->ticks_this_gp = 0;
6231069b 1952 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
a858af28
PM
1953}
1954
1955/* Increment ->ticks_this_gp for all flavors of RCU. */
1956static void increment_cpu_stall_ticks(void)
1957{
115f7a7c
PM
1958 struct rcu_state *rsp;
1959
1960 for_each_rcu_flavor(rsp)
1961 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
a858af28
PM
1962}
1963
1964#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1965
1966static void print_cpu_stall_info_begin(void)
1967{
efc151c3 1968 pr_cont(" {");
a858af28
PM
1969}
1970
1971static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1972{
efc151c3 1973 pr_cont(" %d", cpu);
a858af28
PM
1974}
1975
1976static void print_cpu_stall_info_end(void)
1977{
efc151c3 1978 pr_cont("} ");
a858af28
PM
1979}
1980
1981static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1982{
1983}
1984
1985static void increment_cpu_stall_ticks(void)
1986{
1987}
1988
1989#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
3fbfbf7a
PM
1990
1991#ifdef CONFIG_RCU_NOCB_CPU
1992
1993/*
1994 * Offload callback processing from the boot-time-specified set of CPUs
1995 * specified by rcu_nocb_mask. For each CPU in the set, there is a
1996 * kthread created that pulls the callbacks from the corresponding CPU,
1997 * waits for a grace period to elapse, and invokes the callbacks.
1998 * The no-CBs CPUs do a wake_up() on their kthread when they insert
1999 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2000 * has been specified, in which case each kthread actively polls its
2001 * CPU. (Which isn't so great for energy efficiency, but which does
2002 * reduce RCU's overhead on that CPU.)
2003 *
2004 * This is intended to be used in conjunction with Frederic Weisbecker's
2005 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2006 * running CPU-bound user-mode computations.
2007 *
2008 * Offloading of callback processing could also in theory be used as
2009 * an energy-efficiency measure because CPUs with no RCU callbacks
2010 * queued are more aggressive about entering dyntick-idle mode.
2011 */
2012
2013
2014/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2015static int __init rcu_nocb_setup(char *str)
2016{
2017 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2018 have_rcu_nocb_mask = true;
2019 cpulist_parse(str, rcu_nocb_mask);
2020 return 1;
2021}
2022__setup("rcu_nocbs=", rcu_nocb_setup);
2023
1b0048a4
PG
2024static int __init parse_rcu_nocb_poll(char *arg)
2025{
2026 rcu_nocb_poll = 1;
2027 return 0;
2028}
2029early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2030
34ed6246 2031/*
dae6e64d
PM
2032 * Do any no-CBs CPUs need another grace period?
2033 *
2034 * Interrupts must be disabled. If the caller does not hold the root
2035 * rnp_node structure's ->lock, the results are advisory only.
2036 */
2037static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2038{
2039 struct rcu_node *rnp = rcu_get_root(rsp);
2040
8b425aa8 2041 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
dae6e64d
PM
2042}
2043
2044/*
0446be48
PM
2045 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2046 * grace period.
dae6e64d 2047 */
0446be48 2048static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
dae6e64d 2049{
0446be48 2050 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
dae6e64d
PM
2051}
2052
2053/*
8b425aa8 2054 * Set the root rcu_node structure's ->need_future_gp field
dae6e64d
PM
2055 * based on the sum of those of all rcu_node structures. This does
2056 * double-count the root rcu_node structure's requests, but this
2057 * is necessary to handle the possibility of a rcu_nocb_kthread()
2058 * having awakened during the time that the rcu_node structures
2059 * were being updated for the end of the previous grace period.
34ed6246 2060 */
dae6e64d
PM
2061static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2062{
8b425aa8 2063 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
dae6e64d
PM
2064}
2065
2066static void rcu_init_one_nocb(struct rcu_node *rnp)
34ed6246 2067{
dae6e64d
PM
2068 init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2069 init_waitqueue_head(&rnp->nocb_gp_wq[1]);
34ed6246
PM
2070}
2071
3fbfbf7a 2072/* Is the specified CPU a no-CPUs CPU? */
d1e43fa5 2073bool rcu_is_nocb_cpu(int cpu)
3fbfbf7a
PM
2074{
2075 if (have_rcu_nocb_mask)
2076 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2077 return false;
2078}
2079
2080/*
2081 * Enqueue the specified string of rcu_head structures onto the specified
2082 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2083 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2084 * counts are supplied by rhcount and rhcount_lazy.
2085 *
2086 * If warranted, also wake up the kthread servicing this CPUs queues.
2087 */
2088static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2089 struct rcu_head *rhp,
2090 struct rcu_head **rhtp,
2091 int rhcount, int rhcount_lazy)
2092{
2093 int len;
2094 struct rcu_head **old_rhpp;
2095 struct task_struct *t;
2096
2097 /* Enqueue the callback on the nocb list and update counts. */
2098 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2099 ACCESS_ONCE(*old_rhpp) = rhp;
2100 atomic_long_add(rhcount, &rdp->nocb_q_count);
2101 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2102
2103 /* If we are not being polled and there is a kthread, awaken it ... */
2104 t = ACCESS_ONCE(rdp->nocb_kthread);
2105 if (rcu_nocb_poll | !t)
2106 return;
2107 len = atomic_long_read(&rdp->nocb_q_count);
2108 if (old_rhpp == &rdp->nocb_head) {
2109 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2110 rdp->qlen_last_fqs_check = 0;
2111 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2112 wake_up_process(t); /* ... or if many callbacks queued. */
2113 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2114 }
2115 return;
2116}
2117
2118/*
2119 * This is a helper for __call_rcu(), which invokes this when the normal
2120 * callback queue is inoperable. If this is not a no-CBs CPU, this
2121 * function returns failure back to __call_rcu(), which can complain
2122 * appropriately.
2123 *
2124 * Otherwise, this function queues the callback where the corresponding
2125 * "rcuo" kthread can find it.
2126 */
2127static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2128 bool lazy)
2129{
2130
d1e43fa5 2131 if (!rcu_is_nocb_cpu(rdp->cpu))
3fbfbf7a
PM
2132 return 0;
2133 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
21e7a608
PM
2134 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2135 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2136 (unsigned long)rhp->func,
2137 rdp->qlen_lazy, rdp->qlen);
2138 else
2139 trace_rcu_callback(rdp->rsp->name, rhp,
2140 rdp->qlen_lazy, rdp->qlen);
3fbfbf7a
PM
2141 return 1;
2142}
2143
2144/*
2145 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2146 * not a no-CBs CPU.
2147 */
2148static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2149 struct rcu_data *rdp)
2150{
2151 long ql = rsp->qlen;
2152 long qll = rsp->qlen_lazy;
2153
2154 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
d1e43fa5 2155 if (!rcu_is_nocb_cpu(smp_processor_id()))
3fbfbf7a
PM
2156 return 0;
2157 rsp->qlen = 0;
2158 rsp->qlen_lazy = 0;
2159
2160 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2161 if (rsp->orphan_donelist != NULL) {
2162 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2163 rsp->orphan_donetail, ql, qll);
2164 ql = qll = 0;
2165 rsp->orphan_donelist = NULL;
2166 rsp->orphan_donetail = &rsp->orphan_donelist;
2167 }
2168 if (rsp->orphan_nxtlist != NULL) {
2169 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2170 rsp->orphan_nxttail, ql, qll);
2171 ql = qll = 0;
2172 rsp->orphan_nxtlist = NULL;
2173 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2174 }
2175 return 1;
2176}
2177
2178/*
34ed6246
PM
2179 * If necessary, kick off a new grace period, and either way wait
2180 * for a subsequent grace period to complete.
3fbfbf7a 2181 */
34ed6246 2182static void rcu_nocb_wait_gp(struct rcu_data *rdp)
3fbfbf7a 2183{
34ed6246 2184 unsigned long c;
dae6e64d 2185 bool d;
34ed6246 2186 unsigned long flags;
34ed6246
PM
2187 struct rcu_node *rnp = rdp->mynode;
2188
2189 raw_spin_lock_irqsave(&rnp->lock, flags);
0446be48
PM
2190 c = rcu_start_future_gp(rnp, rdp);
2191 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3fbfbf7a
PM
2192
2193 /*
34ed6246
PM
2194 * Wait for the grace period. Do so interruptibly to avoid messing
2195 * up the load average.
3fbfbf7a 2196 */
f7f7bac9 2197 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
34ed6246 2198 for (;;) {
dae6e64d
PM
2199 wait_event_interruptible(
2200 rnp->nocb_gp_wq[c & 0x1],
2201 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2202 if (likely(d))
34ed6246 2203 break;
dae6e64d 2204 flush_signals(current);
f7f7bac9 2205 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
34ed6246 2206 }
f7f7bac9 2207 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
34ed6246 2208 smp_mb(); /* Ensure that CB invocation happens after GP end. */
3fbfbf7a
PM
2209}
2210
2211/*
2212 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2213 * callbacks queued by the corresponding no-CBs CPU.
2214 */
2215static int rcu_nocb_kthread(void *arg)
2216{
2217 int c, cl;
2218 struct rcu_head *list;
2219 struct rcu_head *next;
2220 struct rcu_head **tail;
2221 struct rcu_data *rdp = arg;
2222
2223 /* Each pass through this loop invokes one batch of callbacks */
2224 for (;;) {
2225 /* If not polling, wait for next batch of callbacks. */
2226 if (!rcu_nocb_poll)
353af9c9 2227 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
3fbfbf7a
PM
2228 list = ACCESS_ONCE(rdp->nocb_head);
2229 if (!list) {
2230 schedule_timeout_interruptible(1);
353af9c9 2231 flush_signals(current);
3fbfbf7a
PM
2232 continue;
2233 }
2234
2235 /*
2236 * Extract queued callbacks, update counts, and wait
2237 * for a grace period to elapse.
2238 */
2239 ACCESS_ONCE(rdp->nocb_head) = NULL;
2240 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2241 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2242 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2243 ACCESS_ONCE(rdp->nocb_p_count) += c;
2244 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
34ed6246 2245 rcu_nocb_wait_gp(rdp);
3fbfbf7a
PM
2246
2247 /* Each pass through the following loop invokes a callback. */
2248 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2249 c = cl = 0;
2250 while (list) {
2251 next = list->next;
2252 /* Wait for enqueuing to complete, if needed. */
2253 while (next == NULL && &list->next != tail) {
2254 schedule_timeout_interruptible(1);
2255 next = list->next;
2256 }
2257 debug_rcu_head_unqueue(list);
2258 local_bh_disable();
2259 if (__rcu_reclaim(rdp->rsp->name, list))
2260 cl++;
2261 c++;
2262 local_bh_enable();
2263 list = next;
2264 }
2265 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2266 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2267 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
c635a4e1 2268 rdp->n_nocbs_invoked += c;
3fbfbf7a
PM
2269 }
2270 return 0;
2271}
2272
2273/* Initialize per-rcu_data variables for no-CBs CPUs. */
2274static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2275{
2276 rdp->nocb_tail = &rdp->nocb_head;
2277 init_waitqueue_head(&rdp->nocb_wq);
2278}
2279
2280/* Create a kthread for each RCU flavor for each no-CBs CPU. */
2281static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2282{
2283 int cpu;
2284 struct rcu_data *rdp;
2285 struct task_struct *t;
2286
2287 if (rcu_nocb_mask == NULL)
2288 return;
2289 for_each_cpu(cpu, rcu_nocb_mask) {
2290 rdp = per_cpu_ptr(rsp->rda, cpu);
a4889858
PM
2291 t = kthread_run(rcu_nocb_kthread, rdp,
2292 "rcuo%c/%d", rsp->abbr, cpu);
3fbfbf7a
PM
2293 BUG_ON(IS_ERR(t));
2294 ACCESS_ONCE(rdp->nocb_kthread) = t;
2295 }
2296}
2297
2298/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
34ed6246 2299static bool init_nocb_callback_list(struct rcu_data *rdp)
3fbfbf7a
PM
2300{
2301 if (rcu_nocb_mask == NULL ||
2302 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
34ed6246 2303 return false;
3fbfbf7a 2304 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
34ed6246 2305 return true;
3fbfbf7a
PM
2306}
2307
34ed6246
PM
2308#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2309
dae6e64d
PM
2310static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2311{
2312 return 0;
3fbfbf7a
PM
2313}
2314
0446be48 2315static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
3fbfbf7a 2316{
3fbfbf7a
PM
2317}
2318
dae6e64d
PM
2319static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2320{
2321}
2322
2323static void rcu_init_one_nocb(struct rcu_node *rnp)
2324{
2325}
3fbfbf7a 2326
3fbfbf7a
PM
2327static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2328 bool lazy)
2329{
2330 return 0;
2331}
2332
2333static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2334 struct rcu_data *rdp)
2335{
2336 return 0;
2337}
2338
3fbfbf7a
PM
2339static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2340{
2341}
2342
2343static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2344{
2345}
2346
34ed6246 2347static bool init_nocb_callback_list(struct rcu_data *rdp)
3fbfbf7a 2348{
34ed6246 2349 return false;
3fbfbf7a
PM
2350}
2351
2352#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
65d798f0
PM
2353
2354/*
2355 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2356 * arbitrarily long period of time with the scheduling-clock tick turned
2357 * off. RCU will be paying attention to this CPU because it is in the
2358 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2359 * machine because the scheduling-clock tick has been disabled. Therefore,
2360 * if an adaptive-ticks CPU is failing to respond to the current grace
2361 * period and has not be idle from an RCU perspective, kick it.
2362 */
2363static void rcu_kick_nohz_cpu(int cpu)
2364{
2365#ifdef CONFIG_NO_HZ_FULL
2366 if (tick_nohz_full_cpu(cpu))
2367 smp_send_reschedule(cpu);
2368#endif /* #ifdef CONFIG_NO_HZ_FULL */
2369}
2333210b
PM
2370
2371
2372#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2373
d4bd54fb
PM
2374/*
2375 * Define RCU flavor that holds sysidle state. This needs to be the
2376 * most active flavor of RCU.
2377 */
2378#ifdef CONFIG_PREEMPT_RCU
0edd1b17 2379static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
d4bd54fb 2380#else /* #ifdef CONFIG_PREEMPT_RCU */
0edd1b17 2381static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
d4bd54fb
PM
2382#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
2383
0edd1b17 2384static int full_sysidle_state; /* Current system-idle state. */
d4bd54fb
PM
2385#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
2386#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
2387#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */
2388#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */
2389#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */
2390
eb348b89
PM
2391/*
2392 * Invoked to note exit from irq or task transition to idle. Note that
2393 * usermode execution does -not- count as idle here! After all, we want
2394 * to detect full-system idle states, not RCU quiescent states and grace
2395 * periods. The caller must have disabled interrupts.
2396 */
2397static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2398{
2399 unsigned long j;
2400
2401 /* Adjust nesting, check for fully idle. */
2402 if (irq) {
2403 rdtp->dynticks_idle_nesting--;
2404 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2405 if (rdtp->dynticks_idle_nesting != 0)
2406 return; /* Still not fully idle. */
2407 } else {
2408 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2409 DYNTICK_TASK_NEST_VALUE) {
2410 rdtp->dynticks_idle_nesting = 0;
2411 } else {
2412 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2413 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2414 return; /* Still not fully idle. */
2415 }
2416 }
2417
2418 /* Record start of fully idle period. */
2419 j = jiffies;
2420 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2421 smp_mb__before_atomic_inc();
2422 atomic_inc(&rdtp->dynticks_idle);
2423 smp_mb__after_atomic_inc();
2424 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2425}
2426
0edd1b17
PM
2427/*
2428 * Unconditionally force exit from full system-idle state. This is
2429 * invoked when a normal CPU exits idle, but must be called separately
2430 * for the timekeeping CPU (tick_do_timer_cpu). The reason for this
2431 * is that the timekeeping CPU is permitted to take scheduling-clock
2432 * interrupts while the system is in system-idle state, and of course
2433 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
2434 * interrupt from any other type of interrupt.
2435 */
2436void rcu_sysidle_force_exit(void)
2437{
2438 int oldstate = ACCESS_ONCE(full_sysidle_state);
2439 int newoldstate;
2440
2441 /*
2442 * Each pass through the following loop attempts to exit full
2443 * system-idle state. If contention proves to be a problem,
2444 * a trylock-based contention tree could be used here.
2445 */
2446 while (oldstate > RCU_SYSIDLE_SHORT) {
2447 newoldstate = cmpxchg(&full_sysidle_state,
2448 oldstate, RCU_SYSIDLE_NOT);
2449 if (oldstate == newoldstate &&
2450 oldstate == RCU_SYSIDLE_FULL_NOTED) {
2451 rcu_kick_nohz_cpu(tick_do_timer_cpu);
2452 return; /* We cleared it, done! */
2453 }
2454 oldstate = newoldstate;
2455 }
2456 smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
2457}
2458
eb348b89
PM
2459/*
2460 * Invoked to note entry to irq or task transition from idle. Note that
2461 * usermode execution does -not- count as idle here! The caller must
2462 * have disabled interrupts.
2463 */
2464static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2465{
2466 /* Adjust nesting, check for already non-idle. */
2467 if (irq) {
2468 rdtp->dynticks_idle_nesting++;
2469 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2470 if (rdtp->dynticks_idle_nesting != 1)
2471 return; /* Already non-idle. */
2472 } else {
2473 /*
2474 * Allow for irq misnesting. Yes, it really is possible
2475 * to enter an irq handler then never leave it, and maybe
2476 * also vice versa. Handle both possibilities.
2477 */
2478 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2479 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2480 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2481 return; /* Already non-idle. */
2482 } else {
2483 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2484 }
2485 }
2486
2487 /* Record end of idle period. */
2488 smp_mb__before_atomic_inc();
2489 atomic_inc(&rdtp->dynticks_idle);
2490 smp_mb__after_atomic_inc();
2491 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
0edd1b17
PM
2492
2493 /*
2494 * If we are the timekeeping CPU, we are permitted to be non-idle
2495 * during a system-idle state. This must be the case, because
2496 * the timekeeping CPU has to take scheduling-clock interrupts
2497 * during the time that the system is transitioning to full
2498 * system-idle state. This means that the timekeeping CPU must
2499 * invoke rcu_sysidle_force_exit() directly if it does anything
2500 * more than take a scheduling-clock interrupt.
2501 */
2502 if (smp_processor_id() == tick_do_timer_cpu)
2503 return;
2504
2505 /* Update system-idle state: We are clearly no longer fully idle! */
2506 rcu_sysidle_force_exit();
2507}
2508
2509/*
2510 * Check to see if the current CPU is idle. Note that usermode execution
2511 * does not count as idle. The caller must have disabled interrupts.
2512 */
2513static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2514 unsigned long *maxj)
2515{
2516 int cur;
2517 unsigned long j;
2518 struct rcu_dynticks *rdtp = rdp->dynticks;
2519
2520 /*
2521 * If some other CPU has already reported non-idle, if this is
2522 * not the flavor of RCU that tracks sysidle state, or if this
2523 * is an offline or the timekeeping CPU, nothing to do.
2524 */
2525 if (!*isidle || rdp->rsp != rcu_sysidle_state ||
2526 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2527 return;
eb75767b
PM
2528 if (rcu_gp_in_progress(rdp->rsp))
2529 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
0edd1b17
PM
2530
2531 /* Pick up current idle and NMI-nesting counter and check. */
2532 cur = atomic_read(&rdtp->dynticks_idle);
2533 if (cur & 0x1) {
2534 *isidle = false; /* We are not idle! */
2535 return;
2536 }
2537 smp_mb(); /* Read counters before timestamps. */
2538
2539 /* Pick up timestamps. */
2540 j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
2541 /* If this CPU entered idle more recently, update maxj timestamp. */
2542 if (ULONG_CMP_LT(*maxj, j))
2543 *maxj = j;
2544}
2545
2546/*
2547 * Is this the flavor of RCU that is handling full-system idle?
2548 */
2549static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2550{
2551 return rsp == rcu_sysidle_state;
2552}
2553
eb75767b
PM
2554/*
2555 * Bind the grace-period kthread for the sysidle flavor of RCU to the
2556 * timekeeping CPU.
2557 */
2558static void rcu_bind_gp_kthread(void)
2559{
2560 int cpu = ACCESS_ONCE(tick_do_timer_cpu);
2561
2562 if (cpu < 0 || cpu >= nr_cpu_ids)
2563 return;
2564 if (raw_smp_processor_id() != cpu)
2565 set_cpus_allowed_ptr(current, cpumask_of(cpu));
2566}
2567
0edd1b17
PM
2568/*
2569 * Return a delay in jiffies based on the number of CPUs, rcu_node
2570 * leaf fanout, and jiffies tick rate. The idea is to allow larger
2571 * systems more time to transition to full-idle state in order to
2572 * avoid the cache thrashing that otherwise occur on the state variable.
2573 * Really small systems (less than a couple of tens of CPUs) should
2574 * instead use a single global atomically incremented counter, and later
2575 * versions of this will automatically reconfigure themselves accordingly.
2576 */
2577static unsigned long rcu_sysidle_delay(void)
2578{
2579 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2580 return 0;
2581 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2582}
2583
2584/*
2585 * Advance the full-system-idle state. This is invoked when all of
2586 * the non-timekeeping CPUs are idle.
2587 */
2588static void rcu_sysidle(unsigned long j)
2589{
2590 /* Check the current state. */
2591 switch (ACCESS_ONCE(full_sysidle_state)) {
2592 case RCU_SYSIDLE_NOT:
2593
2594 /* First time all are idle, so note a short idle period. */
2595 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
2596 break;
2597
2598 case RCU_SYSIDLE_SHORT:
2599
2600 /*
2601 * Idle for a bit, time to advance to next state?
2602 * cmpxchg failure means race with non-idle, let them win.
2603 */
2604 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2605 (void)cmpxchg(&full_sysidle_state,
2606 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2607 break;
2608
2609 case RCU_SYSIDLE_LONG:
2610
2611 /*
2612 * Do an additional check pass before advancing to full.
2613 * cmpxchg failure means race with non-idle, let them win.
2614 */
2615 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2616 (void)cmpxchg(&full_sysidle_state,
2617 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2618 break;
2619
2620 default:
2621 break;
2622 }
2623}
2624
2625/*
2626 * Found a non-idle non-timekeeping CPU, so kick the system-idle state
2627 * back to the beginning.
2628 */
2629static void rcu_sysidle_cancel(void)
2630{
2631 smp_mb();
2632 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
2633}
2634
2635/*
2636 * Update the sysidle state based on the results of a force-quiescent-state
2637 * scan of the CPUs' dyntick-idle state.
2638 */
2639static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2640 unsigned long maxj, bool gpkt)
2641{
2642 if (rsp != rcu_sysidle_state)
2643 return; /* Wrong flavor, ignore. */
2644 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2645 return; /* Running state machine from timekeeping CPU. */
2646 if (isidle)
2647 rcu_sysidle(maxj); /* More idle! */
2648 else
2649 rcu_sysidle_cancel(); /* Idle is over. */
2650}
2651
2652/*
2653 * Wrapper for rcu_sysidle_report() when called from the grace-period
2654 * kthread's context.
2655 */
2656static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2657 unsigned long maxj)
2658{
2659 rcu_sysidle_report(rsp, isidle, maxj, true);
2660}
2661
2662/* Callback and function for forcing an RCU grace period. */
2663struct rcu_sysidle_head {
2664 struct rcu_head rh;
2665 int inuse;
2666};
2667
2668static void rcu_sysidle_cb(struct rcu_head *rhp)
2669{
2670 struct rcu_sysidle_head *rshp;
2671
2672 /*
2673 * The following memory barrier is needed to replace the
2674 * memory barriers that would normally be in the memory
2675 * allocator.
2676 */
2677 smp_mb(); /* grace period precedes setting inuse. */
2678
2679 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2680 ACCESS_ONCE(rshp->inuse) = 0;
2681}
2682
2683/*
2684 * Check to see if the system is fully idle, other than the timekeeping CPU.
2685 * The caller must have disabled interrupts.
2686 */
2687bool rcu_sys_is_idle(void)
2688{
2689 static struct rcu_sysidle_head rsh;
2690 int rss = ACCESS_ONCE(full_sysidle_state);
2691
2692 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2693 return false;
2694
2695 /* Handle small-system case by doing a full scan of CPUs. */
2696 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2697 int oldrss = rss - 1;
2698
2699 /*
2700 * One pass to advance to each state up to _FULL.
2701 * Give up if any pass fails to advance the state.
2702 */
2703 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2704 int cpu;
2705 bool isidle = true;
2706 unsigned long maxj = jiffies - ULONG_MAX / 4;
2707 struct rcu_data *rdp;
2708
2709 /* Scan all the CPUs looking for nonidle CPUs. */
2710 for_each_possible_cpu(cpu) {
2711 rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
2712 rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2713 if (!isidle)
2714 break;
2715 }
2716 rcu_sysidle_report(rcu_sysidle_state,
2717 isidle, maxj, false);
2718 oldrss = rss;
2719 rss = ACCESS_ONCE(full_sysidle_state);
2720 }
2721 }
2722
2723 /* If this is the first observation of an idle period, record it. */
2724 if (rss == RCU_SYSIDLE_FULL) {
2725 rss = cmpxchg(&full_sysidle_state,
2726 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2727 return rss == RCU_SYSIDLE_FULL;
2728 }
2729
2730 smp_mb(); /* ensure rss load happens before later caller actions. */
2731
2732 /* If already fully idle, tell the caller (in case of races). */
2733 if (rss == RCU_SYSIDLE_FULL_NOTED)
2734 return true;
2735
2736 /*
2737 * If we aren't there yet, and a grace period is not in flight,
2738 * initiate a grace period. Either way, tell the caller that
2739 * we are not there yet. We use an xchg() rather than an assignment
2740 * to make up for the memory barriers that would otherwise be
2741 * provided by the memory allocator.
2742 */
2743 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2744 !rcu_gp_in_progress(rcu_sysidle_state) &&
2745 !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2746 call_rcu(&rsh.rh, rcu_sysidle_cb);
2747 return false;
eb348b89
PM
2748}
2749
2333210b
PM
2750/*
2751 * Initialize dynticks sysidle state for CPUs coming online.
2752 */
2753static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2754{
2755 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
2756}
2757
2758#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2759
eb348b89
PM
2760static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2761{
2762}
2763
2764static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2765{
2766}
2767
0edd1b17
PM
2768static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2769 unsigned long *maxj)
2770{
2771}
2772
2773static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2774{
2775 return false;
2776}
2777
eb75767b
PM
2778static void rcu_bind_gp_kthread(void)
2779{
2780}
2781
0edd1b17
PM
2782static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2783 unsigned long maxj)
2784{
2785}
2786
2333210b
PM
2787static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2788{
2789}
2790
2791#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
This page took 0.427102 seconds and 5 git commands to generate.