2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/oom.h>
30 #define RCU_KTHREAD_PRIO 1
32 #ifdef CONFIG_RCU_BOOST
33 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
35 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
43 static void __init
rcu_bootup_announce_oddness(void)
45 #ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
48 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
52 #ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
55 #ifdef CONFIG_RCU_FAST_NO_HZ
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
59 #ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
62 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
65 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO
"\tDump stacks of tasks blocking RCU-preempt GP.\n");
68 #if defined(CONFIG_RCU_CPU_STALL_INFO)
69 printk(KERN_INFO
"\tAdditional per-CPU info printed with stalls.\n");
71 #if NUM_RCU_LVL_4 != 0
72 printk(KERN_INFO
"\tFour-level hierarchy is enabled.\n");
74 if (rcu_fanout_leaf
!= CONFIG_RCU_FANOUT_LEAF
)
75 printk(KERN_INFO
"\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf
);
76 if (nr_cpu_ids
!= NR_CPUS
)
77 printk(KERN_INFO
"\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS
, nr_cpu_ids
);
80 #ifdef CONFIG_TREE_PREEMPT_RCU
82 struct rcu_state rcu_preempt_state
=
83 RCU_STATE_INITIALIZER(rcu_preempt
, call_rcu
);
84 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
85 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
87 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
90 * Tell them what RCU they are running.
92 static void __init
rcu_bootup_announce(void)
94 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
95 rcu_bootup_announce_oddness();
99 * Return the number of RCU-preempt batches processed thus far
100 * for debug and statistics.
102 long rcu_batches_completed_preempt(void)
104 return rcu_preempt_state
.completed
;
106 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
109 * Return the number of RCU batches processed thus far for debug & stats.
111 long rcu_batches_completed(void)
113 return rcu_batches_completed_preempt();
115 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
118 * Force a quiescent state for preemptible RCU.
120 void rcu_force_quiescent_state(void)
122 force_quiescent_state(&rcu_preempt_state
, 0);
124 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
127 * Record a preemptible-RCU quiescent state for the specified CPU. Note
128 * that this just means that the task currently running on the CPU is
129 * not in a quiescent state. There might be any number of tasks blocked
130 * while in an RCU read-side critical section.
132 * Unlike the other rcu_*_qs() functions, callers to this function
133 * must disable irqs in order to protect the assignment to
134 * ->rcu_read_unlock_special.
136 static void rcu_preempt_qs(int cpu
)
138 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
140 rdp
->passed_quiesce_gpnum
= rdp
->gpnum
;
142 if (rdp
->passed_quiesce
== 0)
143 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
144 rdp
->passed_quiesce
= 1;
145 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
149 * We have entered the scheduler, and the current task might soon be
150 * context-switched away from. If this task is in an RCU read-side
151 * critical section, we will no longer be able to rely on the CPU to
152 * record that fact, so we enqueue the task on the blkd_tasks list.
153 * The task will dequeue itself when it exits the outermost enclosing
154 * RCU read-side critical section. Therefore, the current grace period
155 * cannot be permitted to complete until the blkd_tasks list entries
156 * predating the current grace period drain, in other words, until
157 * rnp->gp_tasks becomes NULL.
159 * Caller must disable preemption.
161 static void rcu_preempt_note_context_switch(int cpu
)
163 struct task_struct
*t
= current
;
165 struct rcu_data
*rdp
;
166 struct rcu_node
*rnp
;
168 if (t
->rcu_read_lock_nesting
> 0 &&
169 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
171 /* Possibly blocking in an RCU read-side critical section. */
172 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
174 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
175 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
176 t
->rcu_blocked_node
= rnp
;
179 * If this CPU has already checked in, then this task
180 * will hold up the next grace period rather than the
181 * current grace period. Queue the task accordingly.
182 * If the task is queued for the current grace period
183 * (i.e., this CPU has not yet passed through a quiescent
184 * state for the current grace period), then as long
185 * as that task remains queued, the current grace period
186 * cannot end. Note that there is some uncertainty as
187 * to exactly when the current grace period started.
188 * We take a conservative approach, which can result
189 * in unnecessarily waiting on tasks that started very
190 * slightly after the current grace period began. C'est
193 * But first, note that the current CPU must still be
196 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
197 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
198 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
199 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
200 rnp
->gp_tasks
= &t
->rcu_node_entry
;
201 #ifdef CONFIG_RCU_BOOST
202 if (rnp
->boost_tasks
!= NULL
)
203 rnp
->boost_tasks
= rnp
->gp_tasks
;
204 #endif /* #ifdef CONFIG_RCU_BOOST */
206 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
207 if (rnp
->qsmask
& rdp
->grpmask
)
208 rnp
->gp_tasks
= &t
->rcu_node_entry
;
210 trace_rcu_preempt_task(rdp
->rsp
->name
,
212 (rnp
->qsmask
& rdp
->grpmask
)
215 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
216 } else if (t
->rcu_read_lock_nesting
< 0 &&
217 t
->rcu_read_unlock_special
) {
220 * Complete exit from RCU read-side critical section on
221 * behalf of preempted instance of __rcu_read_unlock().
223 rcu_read_unlock_special(t
);
227 * Either we were not in an RCU read-side critical section to
228 * begin with, or we have now recorded that critical section
229 * globally. Either way, we can now note a quiescent state
230 * for this CPU. Again, if we were in an RCU read-side critical
231 * section, and if that critical section was blocking the current
232 * grace period, then the fact that the task has been enqueued
233 * means that we continue to block the current grace period.
235 local_irq_save(flags
);
237 local_irq_restore(flags
);
241 * Check for preempted RCU readers blocking the current grace period
242 * for the specified rcu_node structure. If the caller needs a reliable
243 * answer, it must hold the rcu_node's ->lock.
245 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
247 return rnp
->gp_tasks
!= NULL
;
251 * Record a quiescent state for all tasks that were previously queued
252 * on the specified rcu_node structure and that were blocking the current
253 * RCU grace period. The caller must hold the specified rnp->lock with
254 * irqs disabled, and this lock is released upon return, but irqs remain
257 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
258 __releases(rnp
->lock
)
261 struct rcu_node
*rnp_p
;
263 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
264 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
265 return; /* Still need more quiescent states! */
271 * Either there is only one rcu_node in the tree,
272 * or tasks were kicked up to root rcu_node due to
273 * CPUs going offline.
275 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
279 /* Report up the rest of the hierarchy. */
281 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
282 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
283 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
287 * Advance a ->blkd_tasks-list pointer to the next entry, instead
288 * returning NULL if at the end of the list.
290 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
291 struct rcu_node
*rnp
)
293 struct list_head
*np
;
295 np
= t
->rcu_node_entry
.next
;
296 if (np
== &rnp
->blkd_tasks
)
302 * Handle special cases during rcu_read_unlock(), such as needing to
303 * notify RCU core processing or task having blocked during the RCU
304 * read-side critical section.
306 void rcu_read_unlock_special(struct task_struct
*t
)
312 struct list_head
*np
;
313 #ifdef CONFIG_RCU_BOOST
314 struct rt_mutex
*rbmp
= NULL
;
315 #endif /* #ifdef CONFIG_RCU_BOOST */
316 struct rcu_node
*rnp
;
319 /* NMI handlers cannot block and cannot safely manipulate state. */
323 local_irq_save(flags
);
326 * If RCU core is waiting for this CPU to exit critical section,
327 * let it know that we have done so.
329 special
= t
->rcu_read_unlock_special
;
330 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
331 rcu_preempt_qs(smp_processor_id());
334 /* Hardware IRQ handlers cannot block. */
335 if (in_irq() || in_serving_softirq()) {
336 local_irq_restore(flags
);
340 /* Clean up if blocked during RCU read-side critical section. */
341 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
342 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
345 * Remove this task from the list it blocked on. The
346 * task can migrate while we acquire the lock, but at
347 * most one time. So at most two passes through loop.
350 rnp
= t
->rcu_blocked_node
;
351 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
352 if (rnp
== t
->rcu_blocked_node
)
354 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
356 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
357 empty_exp
= !rcu_preempted_readers_exp(rnp
);
358 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
359 np
= rcu_next_node_entry(t
, rnp
);
360 list_del_init(&t
->rcu_node_entry
);
361 t
->rcu_blocked_node
= NULL
;
362 trace_rcu_unlock_preempted_task("rcu_preempt",
364 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
366 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
368 #ifdef CONFIG_RCU_BOOST
369 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
370 rnp
->boost_tasks
= np
;
371 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
372 if (t
->rcu_boost_mutex
) {
373 rbmp
= t
->rcu_boost_mutex
;
374 t
->rcu_boost_mutex
= NULL
;
376 #endif /* #ifdef CONFIG_RCU_BOOST */
379 * If this was the last task on the current list, and if
380 * we aren't waiting on any CPUs, report the quiescent state.
381 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
382 * so we must take a snapshot of the expedited state.
384 empty_exp_now
= !rcu_preempted_readers_exp(rnp
);
385 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
386 trace_rcu_quiescent_state_report("preempt_rcu",
393 rcu_report_unblock_qs_rnp(rnp
, flags
);
395 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
398 #ifdef CONFIG_RCU_BOOST
399 /* Unboost if we were boosted. */
401 rt_mutex_unlock(rbmp
);
402 #endif /* #ifdef CONFIG_RCU_BOOST */
405 * If this was the last task on the expedited lists,
406 * then we need to report up the rcu_node hierarchy.
408 if (!empty_exp
&& empty_exp_now
)
409 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
, true);
411 local_irq_restore(flags
);
415 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
418 * Dump detailed information for all tasks blocking the current RCU
419 * grace period on the specified rcu_node structure.
421 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
424 struct task_struct
*t
;
426 if (!rcu_preempt_blocked_readers_cgp(rnp
))
428 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
429 t
= list_entry(rnp
->gp_tasks
,
430 struct task_struct
, rcu_node_entry
);
431 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
433 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
437 * Dump detailed information for all tasks blocking the current RCU
440 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
442 struct rcu_node
*rnp
= rcu_get_root(rsp
);
444 rcu_print_detail_task_stall_rnp(rnp
);
445 rcu_for_each_leaf_node(rsp
, rnp
)
446 rcu_print_detail_task_stall_rnp(rnp
);
449 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
451 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
455 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
457 #ifdef CONFIG_RCU_CPU_STALL_INFO
459 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
461 printk(KERN_ERR
"\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
462 rnp
->level
, rnp
->grplo
, rnp
->grphi
);
465 static void rcu_print_task_stall_end(void)
467 printk(KERN_CONT
"\n");
470 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
472 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
476 static void rcu_print_task_stall_end(void)
480 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
483 * Scan the current list of tasks blocked within RCU read-side critical
484 * sections, printing out the tid of each.
486 static int rcu_print_task_stall(struct rcu_node
*rnp
)
488 struct task_struct
*t
;
491 if (!rcu_preempt_blocked_readers_cgp(rnp
))
493 rcu_print_task_stall_begin(rnp
);
494 t
= list_entry(rnp
->gp_tasks
,
495 struct task_struct
, rcu_node_entry
);
496 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
497 printk(KERN_CONT
" P%d", t
->pid
);
500 rcu_print_task_stall_end();
505 * Check that the list of blocked tasks for the newly completed grace
506 * period is in fact empty. It is a serious bug to complete a grace
507 * period that still has RCU readers blocked! This function must be
508 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
509 * must be held by the caller.
511 * Also, if there are blocked tasks on the list, they automatically
512 * block the newly created grace period, so set up ->gp_tasks accordingly.
514 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
516 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
517 if (!list_empty(&rnp
->blkd_tasks
))
518 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
519 WARN_ON_ONCE(rnp
->qsmask
);
522 #ifdef CONFIG_HOTPLUG_CPU
525 * Handle tasklist migration for case in which all CPUs covered by the
526 * specified rcu_node have gone offline. Move them up to the root
527 * rcu_node. The reason for not just moving them to the immediate
528 * parent is to remove the need for rcu_read_unlock_special() to
529 * make more than two attempts to acquire the target rcu_node's lock.
530 * Returns true if there were tasks blocking the current RCU grace
533 * Returns 1 if there was previously a task blocking the current grace
534 * period on the specified rcu_node structure.
536 * The caller must hold rnp->lock with irqs disabled.
538 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
539 struct rcu_node
*rnp
,
540 struct rcu_data
*rdp
)
542 struct list_head
*lp
;
543 struct list_head
*lp_root
;
545 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
546 struct task_struct
*t
;
548 if (rnp
== rnp_root
) {
549 WARN_ONCE(1, "Last CPU thought to be offlined?");
550 return 0; /* Shouldn't happen: at least one CPU online. */
553 /* If we are on an internal node, complain bitterly. */
554 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
557 * Move tasks up to root rcu_node. Don't try to get fancy for
558 * this corner-case operation -- just put this node's tasks
559 * at the head of the root node's list, and update the root node's
560 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
561 * if non-NULL. This might result in waiting for more tasks than
562 * absolutely necessary, but this is a good performance/complexity
565 if (rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->qsmask
== 0)
566 retval
|= RCU_OFL_TASKS_NORM_GP
;
567 if (rcu_preempted_readers_exp(rnp
))
568 retval
|= RCU_OFL_TASKS_EXP_GP
;
569 lp
= &rnp
->blkd_tasks
;
570 lp_root
= &rnp_root
->blkd_tasks
;
571 while (!list_empty(lp
)) {
572 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
573 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
574 list_del(&t
->rcu_node_entry
);
575 t
->rcu_blocked_node
= rnp_root
;
576 list_add(&t
->rcu_node_entry
, lp_root
);
577 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
578 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
579 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
580 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
581 #ifdef CONFIG_RCU_BOOST
582 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
583 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
584 #endif /* #ifdef CONFIG_RCU_BOOST */
585 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
588 #ifdef CONFIG_RCU_BOOST
589 /* In case root is being boosted and leaf is not. */
590 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
591 if (rnp_root
->boost_tasks
!= NULL
&&
592 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
)
593 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
594 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
595 #endif /* #ifdef CONFIG_RCU_BOOST */
597 rnp
->gp_tasks
= NULL
;
598 rnp
->exp_tasks
= NULL
;
602 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
605 * Check for a quiescent state from the current CPU. When a task blocks,
606 * the task is recorded in the corresponding CPU's rcu_node structure,
607 * which is checked elsewhere.
609 * Caller must disable hard irqs.
611 static void rcu_preempt_check_callbacks(int cpu
)
613 struct task_struct
*t
= current
;
615 if (t
->rcu_read_lock_nesting
== 0) {
619 if (t
->rcu_read_lock_nesting
> 0 &&
620 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
621 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
624 #ifdef CONFIG_RCU_BOOST
626 static void rcu_preempt_do_callbacks(void)
628 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
631 #endif /* #ifdef CONFIG_RCU_BOOST */
634 * Queue a preemptible-RCU callback for invocation after a grace period.
636 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
638 __call_rcu(head
, func
, &rcu_preempt_state
, 0);
640 EXPORT_SYMBOL_GPL(call_rcu
);
643 * Queue an RCU callback for lazy invocation after a grace period.
644 * This will likely be later named something like "call_rcu_lazy()",
645 * but this change will require some way of tagging the lazy RCU
646 * callbacks in the list of pending callbacks. Until then, this
647 * function may only be called from __kfree_rcu().
649 void kfree_call_rcu(struct rcu_head
*head
,
650 void (*func
)(struct rcu_head
*rcu
))
652 __call_rcu(head
, func
, &rcu_preempt_state
, 1);
654 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
657 * synchronize_rcu - wait until a grace period has elapsed.
659 * Control will return to the caller some time after a full grace
660 * period has elapsed, in other words after all currently executing RCU
661 * read-side critical sections have completed. Note, however, that
662 * upon return from synchronize_rcu(), the caller might well be executing
663 * concurrently with new RCU read-side critical sections that began while
664 * synchronize_rcu() was waiting. RCU read-side critical sections are
665 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
667 void synchronize_rcu(void)
669 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map
) &&
670 !lock_is_held(&rcu_lock_map
) &&
671 !lock_is_held(&rcu_sched_lock_map
),
672 "Illegal synchronize_rcu() in RCU read-side critical section");
673 if (!rcu_scheduler_active
)
675 wait_rcu_gp(call_rcu
);
677 EXPORT_SYMBOL_GPL(synchronize_rcu
);
679 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
680 static long sync_rcu_preempt_exp_count
;
681 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
684 * Return non-zero if there are any tasks in RCU read-side critical
685 * sections blocking the current preemptible-RCU expedited grace period.
686 * If there is no preemptible-RCU expedited grace period currently in
687 * progress, returns zero unconditionally.
689 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
691 return rnp
->exp_tasks
!= NULL
;
695 * return non-zero if there is no RCU expedited grace period in progress
696 * for the specified rcu_node structure, in other words, if all CPUs and
697 * tasks covered by the specified rcu_node structure have done their bit
698 * for the current expedited grace period. Works only for preemptible
699 * RCU -- other RCU implementation use other means.
701 * Caller must hold sync_rcu_preempt_exp_mutex.
703 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
705 return !rcu_preempted_readers_exp(rnp
) &&
706 ACCESS_ONCE(rnp
->expmask
) == 0;
710 * Report the exit from RCU read-side critical section for the last task
711 * that queued itself during or before the current expedited preemptible-RCU
712 * grace period. This event is reported either to the rcu_node structure on
713 * which the task was queued or to one of that rcu_node structure's ancestors,
714 * recursively up the tree. (Calm down, calm down, we do the recursion
717 * Most callers will set the "wake" flag, but the task initiating the
718 * expedited grace period need not wake itself.
720 * Caller must hold sync_rcu_preempt_exp_mutex.
722 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
728 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
730 if (!sync_rcu_preempt_exp_done(rnp
)) {
731 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
734 if (rnp
->parent
== NULL
) {
735 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
737 wake_up(&sync_rcu_preempt_exp_wq
);
741 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
743 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
744 rnp
->expmask
&= ~mask
;
749 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
750 * grace period for the specified rcu_node structure. If there are no such
751 * tasks, report it up the rcu_node hierarchy.
753 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
756 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
761 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
762 if (list_empty(&rnp
->blkd_tasks
)) {
763 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
765 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
766 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
770 rcu_report_exp_rnp(rsp
, rnp
, false); /* Don't wake self. */
774 * synchronize_rcu_expedited - Brute-force RCU grace period
776 * Wait for an RCU-preempt grace period, but expedite it. The basic
777 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
778 * the ->blkd_tasks lists and wait for this list to drain. This consumes
779 * significant time on all CPUs and is unfriendly to real-time workloads,
780 * so is thus not recommended for any sort of common-case code.
781 * In fact, if you are using synchronize_rcu_expedited() in a loop,
782 * please restructure your code to batch your updates, and then Use a
783 * single synchronize_rcu() instead.
785 * Note that it is illegal to call this function while holding any lock
786 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
787 * to call this function from a CPU-hotplug notifier. Failing to observe
788 * these restriction will result in deadlock.
790 void synchronize_rcu_expedited(void)
793 struct rcu_node
*rnp
;
794 struct rcu_state
*rsp
= &rcu_preempt_state
;
798 smp_mb(); /* Caller's modifications seen first by other CPUs. */
799 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
800 smp_mb(); /* Above access cannot bleed into critical section. */
803 * Acquire lock, falling back to synchronize_rcu() if too many
804 * lock-acquisition failures. Of course, if someone does the
805 * expedited grace period for us, just leave.
807 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
808 if (trycount
++ < 10) {
809 udelay(trycount
* num_online_cpus());
814 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
815 goto mb_ret
; /* Others did our work for us. */
817 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
818 goto unlock_mb_ret
; /* Others did our work for us. */
820 /* force all RCU readers onto ->blkd_tasks lists. */
821 synchronize_sched_expedited();
823 raw_spin_lock_irqsave(&rsp
->onofflock
, flags
);
825 /* Initialize ->expmask for all non-leaf rcu_node structures. */
826 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
827 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
828 rnp
->expmask
= rnp
->qsmaskinit
;
829 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
832 /* Snapshot current state of ->blkd_tasks lists. */
833 rcu_for_each_leaf_node(rsp
, rnp
)
834 sync_rcu_preempt_exp_init(rsp
, rnp
);
835 if (NUM_RCU_NODES
> 1)
836 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
838 raw_spin_unlock_irqrestore(&rsp
->onofflock
, flags
);
840 /* Wait for snapshotted ->blkd_tasks lists to drain. */
841 rnp
= rcu_get_root(rsp
);
842 wait_event(sync_rcu_preempt_exp_wq
,
843 sync_rcu_preempt_exp_done(rnp
));
845 /* Clean up and exit. */
846 smp_mb(); /* ensure expedited GP seen before counter increment. */
847 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
849 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
851 smp_mb(); /* ensure subsequent action seen after grace period. */
853 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
856 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
858 void rcu_barrier(void)
860 _rcu_barrier(&rcu_preempt_state
);
862 EXPORT_SYMBOL_GPL(rcu_barrier
);
865 * Initialize preemptible RCU's state structures.
867 static void __init
__rcu_init_preempt(void)
869 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
872 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
874 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
877 * Tell them what RCU they are running.
879 static void __init
rcu_bootup_announce(void)
881 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
882 rcu_bootup_announce_oddness();
886 * Return the number of RCU batches processed thus far for debug & stats.
888 long rcu_batches_completed(void)
890 return rcu_batches_completed_sched();
892 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
895 * Force a quiescent state for RCU, which, because there is no preemptible
896 * RCU, becomes the same as rcu-sched.
898 void rcu_force_quiescent_state(void)
900 rcu_sched_force_quiescent_state();
902 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
905 * Because preemptible RCU does not exist, we never have to check for
906 * CPUs being in quiescent states.
908 static void rcu_preempt_note_context_switch(int cpu
)
913 * Because preemptible RCU does not exist, there are never any preempted
916 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
921 #ifdef CONFIG_HOTPLUG_CPU
923 /* Because preemptible RCU does not exist, no quieting of tasks. */
924 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
926 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
929 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
932 * Because preemptible RCU does not exist, we never have to check for
933 * tasks blocked within RCU read-side critical sections.
935 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
940 * Because preemptible RCU does not exist, we never have to check for
941 * tasks blocked within RCU read-side critical sections.
943 static int rcu_print_task_stall(struct rcu_node
*rnp
)
949 * Because there is no preemptible RCU, there can be no readers blocked,
950 * so there is no need to check for blocked tasks. So check only for
951 * bogus qsmask values.
953 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
955 WARN_ON_ONCE(rnp
->qsmask
);
958 #ifdef CONFIG_HOTPLUG_CPU
961 * Because preemptible RCU does not exist, it never needs to migrate
962 * tasks that were blocked within RCU read-side critical sections, and
963 * such non-existent tasks cannot possibly have been blocking the current
966 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
967 struct rcu_node
*rnp
,
968 struct rcu_data
*rdp
)
973 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
976 * Because preemptible RCU does not exist, it never has any callbacks
979 static void rcu_preempt_check_callbacks(int cpu
)
984 * Queue an RCU callback for lazy invocation after a grace period.
985 * This will likely be later named something like "call_rcu_lazy()",
986 * but this change will require some way of tagging the lazy RCU
987 * callbacks in the list of pending callbacks. Until then, this
988 * function may only be called from __kfree_rcu().
990 * Because there is no preemptible RCU, we use RCU-sched instead.
992 void kfree_call_rcu(struct rcu_head
*head
,
993 void (*func
)(struct rcu_head
*rcu
))
995 __call_rcu(head
, func
, &rcu_sched_state
, 1);
997 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
1000 * Wait for an rcu-preempt grace period, but make it happen quickly.
1001 * But because preemptible RCU does not exist, map to rcu-sched.
1003 void synchronize_rcu_expedited(void)
1005 synchronize_sched_expedited();
1007 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1009 #ifdef CONFIG_HOTPLUG_CPU
1012 * Because preemptible RCU does not exist, there is never any need to
1013 * report on tasks preempted in RCU read-side critical sections during
1014 * expedited RCU grace periods.
1016 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
1021 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1024 * Because preemptible RCU does not exist, rcu_barrier() is just
1025 * another name for rcu_barrier_sched().
1027 void rcu_barrier(void)
1029 rcu_barrier_sched();
1031 EXPORT_SYMBOL_GPL(rcu_barrier
);
1034 * Because preemptible RCU does not exist, it need not be initialized.
1036 static void __init
__rcu_init_preempt(void)
1040 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1042 #ifdef CONFIG_RCU_BOOST
1044 #include "rtmutex_common.h"
1046 #ifdef CONFIG_RCU_TRACE
1048 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1050 if (list_empty(&rnp
->blkd_tasks
))
1051 rnp
->n_balk_blkd_tasks
++;
1052 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1053 rnp
->n_balk_exp_gp_tasks
++;
1054 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1055 rnp
->n_balk_boost_tasks
++;
1056 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1057 rnp
->n_balk_notblocked
++;
1058 else if (rnp
->gp_tasks
!= NULL
&&
1059 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1060 rnp
->n_balk_notyet
++;
1065 #else /* #ifdef CONFIG_RCU_TRACE */
1067 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1071 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1074 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1075 * or ->boost_tasks, advancing the pointer to the next task in the
1076 * ->blkd_tasks list.
1078 * Note that irqs must be enabled: boosting the task can block.
1079 * Returns 1 if there are more tasks needing to be boosted.
1081 static int rcu_boost(struct rcu_node
*rnp
)
1083 unsigned long flags
;
1084 struct rt_mutex mtx
;
1085 struct task_struct
*t
;
1086 struct list_head
*tb
;
1088 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1089 return 0; /* Nothing left to boost. */
1091 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1094 * Recheck under the lock: all tasks in need of boosting
1095 * might exit their RCU read-side critical sections on their own.
1097 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1098 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1103 * Preferentially boost tasks blocking expedited grace periods.
1104 * This cannot starve the normal grace periods because a second
1105 * expedited grace period must boost all blocked tasks, including
1106 * those blocking the pre-existing normal grace period.
1108 if (rnp
->exp_tasks
!= NULL
) {
1109 tb
= rnp
->exp_tasks
;
1110 rnp
->n_exp_boosts
++;
1112 tb
= rnp
->boost_tasks
;
1113 rnp
->n_normal_boosts
++;
1115 rnp
->n_tasks_boosted
++;
1118 * We boost task t by manufacturing an rt_mutex that appears to
1119 * be held by task t. We leave a pointer to that rt_mutex where
1120 * task t can find it, and task t will release the mutex when it
1121 * exits its outermost RCU read-side critical section. Then
1122 * simply acquiring this artificial rt_mutex will boost task
1123 * t's priority. (Thanks to tglx for suggesting this approach!)
1125 * Note that task t must acquire rnp->lock to remove itself from
1126 * the ->blkd_tasks list, which it will do from exit() if from
1127 * nowhere else. We therefore are guaranteed that task t will
1128 * stay around at least until we drop rnp->lock. Note that
1129 * rnp->lock also resolves races between our priority boosting
1130 * and task t's exiting its outermost RCU read-side critical
1133 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1134 rt_mutex_init_proxy_locked(&mtx
, t
);
1135 t
->rcu_boost_mutex
= &mtx
;
1136 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1137 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1138 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1140 return ACCESS_ONCE(rnp
->exp_tasks
) != NULL
||
1141 ACCESS_ONCE(rnp
->boost_tasks
) != NULL
;
1145 * Timer handler to initiate waking up of boost kthreads that
1146 * have yielded the CPU due to excessive numbers of tasks to
1147 * boost. We wake up the per-rcu_node kthread, which in turn
1148 * will wake up the booster kthread.
1150 static void rcu_boost_kthread_timer(unsigned long arg
)
1152 invoke_rcu_node_kthread((struct rcu_node
*)arg
);
1156 * Priority-boosting kthread. One per leaf rcu_node and one for the
1159 static int rcu_boost_kthread(void *arg
)
1161 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1165 trace_rcu_utilization("Start boost kthread@init");
1167 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1168 trace_rcu_utilization("End boost kthread@rcu_wait");
1169 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1170 trace_rcu_utilization("Start boost kthread@rcu_wait");
1171 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1172 more2boost
= rcu_boost(rnp
);
1178 trace_rcu_utilization("End boost kthread@rcu_yield");
1179 rcu_yield(rcu_boost_kthread_timer
, (unsigned long)rnp
);
1180 trace_rcu_utilization("Start boost kthread@rcu_yield");
1185 trace_rcu_utilization("End boost kthread@notreached");
1190 * Check to see if it is time to start boosting RCU readers that are
1191 * blocking the current grace period, and, if so, tell the per-rcu_node
1192 * kthread to start boosting them. If there is an expedited grace
1193 * period in progress, it is always time to boost.
1195 * The caller must hold rnp->lock, which this function releases,
1196 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1197 * so we don't need to worry about it going away.
1199 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1201 struct task_struct
*t
;
1203 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1204 rnp
->n_balk_exp_gp_tasks
++;
1205 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1208 if (rnp
->exp_tasks
!= NULL
||
1209 (rnp
->gp_tasks
!= NULL
&&
1210 rnp
->boost_tasks
== NULL
&&
1212 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1213 if (rnp
->exp_tasks
== NULL
)
1214 rnp
->boost_tasks
= rnp
->gp_tasks
;
1215 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1216 t
= rnp
->boost_kthread_task
;
1220 rcu_initiate_boost_trace(rnp
);
1221 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1226 * Wake up the per-CPU kthread to invoke RCU callbacks.
1228 static void invoke_rcu_callbacks_kthread(void)
1230 unsigned long flags
;
1232 local_irq_save(flags
);
1233 __this_cpu_write(rcu_cpu_has_work
, 1);
1234 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1235 current
!= __this_cpu_read(rcu_cpu_kthread_task
))
1236 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task
));
1237 local_irq_restore(flags
);
1241 * Is the current CPU running the RCU-callbacks kthread?
1242 * Caller must have preemption disabled.
1244 static bool rcu_is_callbacks_kthread(void)
1246 return __get_cpu_var(rcu_cpu_kthread_task
) == current
;
1250 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1251 * held, so no one should be messing with the existence of the boost
1254 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
,
1257 struct task_struct
*t
;
1259 t
= rnp
->boost_kthread_task
;
1261 set_cpus_allowed_ptr(rnp
->boost_kthread_task
, cm
);
1264 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1267 * Do priority-boost accounting for the start of a new grace period.
1269 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1271 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1275 * Create an RCU-boost kthread for the specified node if one does not
1276 * already exist. We only create this kthread for preemptible RCU.
1277 * Returns zero if all is well, a negated errno otherwise.
1279 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1280 struct rcu_node
*rnp
,
1283 unsigned long flags
;
1284 struct sched_param sp
;
1285 struct task_struct
*t
;
1287 if (&rcu_preempt_state
!= rsp
)
1290 if (rnp
->boost_kthread_task
!= NULL
)
1292 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1293 "rcub/%d", rnp_index
);
1296 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1297 rnp
->boost_kthread_task
= t
;
1298 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1299 sp
.sched_priority
= RCU_BOOST_PRIO
;
1300 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1301 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1305 #ifdef CONFIG_HOTPLUG_CPU
1308 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1310 static void rcu_stop_cpu_kthread(int cpu
)
1312 struct task_struct
*t
;
1314 /* Stop the CPU's kthread. */
1315 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1317 per_cpu(rcu_cpu_kthread_task
, cpu
) = NULL
;
1322 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1324 static void rcu_kthread_do_work(void)
1326 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1327 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1328 rcu_preempt_do_callbacks();
1332 * Wake up the specified per-rcu_node-structure kthread.
1333 * Because the per-rcu_node kthreads are immortal, we don't need
1334 * to do anything to keep them alive.
1336 static void invoke_rcu_node_kthread(struct rcu_node
*rnp
)
1338 struct task_struct
*t
;
1340 t
= rnp
->node_kthread_task
;
1346 * Set the specified CPU's kthread to run RT or not, as specified by
1347 * the to_rt argument. The CPU-hotplug locks are held, so the task
1348 * is not going away.
1350 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1353 struct sched_param sp
;
1354 struct task_struct
*t
;
1356 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1360 policy
= SCHED_FIFO
;
1361 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1363 policy
= SCHED_NORMAL
;
1364 sp
.sched_priority
= 0;
1366 sched_setscheduler_nocheck(t
, policy
, &sp
);
1370 * Timer handler to initiate the waking up of per-CPU kthreads that
1371 * have yielded the CPU due to excess numbers of RCU callbacks.
1372 * We wake up the per-rcu_node kthread, which in turn will wake up
1373 * the booster kthread.
1375 static void rcu_cpu_kthread_timer(unsigned long arg
)
1377 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, arg
);
1378 struct rcu_node
*rnp
= rdp
->mynode
;
1380 atomic_or(rdp
->grpmask
, &rnp
->wakemask
);
1381 invoke_rcu_node_kthread(rnp
);
1385 * Drop to non-real-time priority and yield, but only after posting a
1386 * timer that will cause us to regain our real-time priority if we
1387 * remain preempted. Either way, we restore our real-time priority
1390 static void rcu_yield(void (*f
)(unsigned long), unsigned long arg
)
1392 struct sched_param sp
;
1393 struct timer_list yield_timer
;
1394 int prio
= current
->rt_priority
;
1396 setup_timer_on_stack(&yield_timer
, f
, arg
);
1397 mod_timer(&yield_timer
, jiffies
+ 2);
1398 sp
.sched_priority
= 0;
1399 sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sp
);
1400 set_user_nice(current
, 19);
1402 set_user_nice(current
, 0);
1403 sp
.sched_priority
= prio
;
1404 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1405 del_timer(&yield_timer
);
1409 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1410 * This can happen while the corresponding CPU is either coming online
1411 * or going offline. We cannot wait until the CPU is fully online
1412 * before starting the kthread, because the various notifier functions
1413 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1414 * the corresponding CPU is online.
1416 * Return 1 if the kthread needs to stop, 0 otherwise.
1418 * Caller must disable bh. This function can momentarily enable it.
1420 static int rcu_cpu_kthread_should_stop(int cpu
)
1422 while (cpu_is_offline(cpu
) ||
1423 !cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)) ||
1424 smp_processor_id() != cpu
) {
1425 if (kthread_should_stop())
1427 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1428 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = raw_smp_processor_id();
1430 schedule_timeout_uninterruptible(1);
1431 if (!cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)))
1432 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
1435 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1440 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1441 * RCU softirq used in flavors and configurations of RCU that do not
1442 * support RCU priority boosting.
1444 static int rcu_cpu_kthread(void *arg
)
1446 int cpu
= (int)(long)arg
;
1447 unsigned long flags
;
1449 unsigned int *statusp
= &per_cpu(rcu_cpu_kthread_status
, cpu
);
1451 char *workp
= &per_cpu(rcu_cpu_has_work
, cpu
);
1453 trace_rcu_utilization("Start CPU kthread@init");
1455 *statusp
= RCU_KTHREAD_WAITING
;
1456 trace_rcu_utilization("End CPU kthread@rcu_wait");
1457 rcu_wait(*workp
!= 0 || kthread_should_stop());
1458 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1460 if (rcu_cpu_kthread_should_stop(cpu
)) {
1464 *statusp
= RCU_KTHREAD_RUNNING
;
1465 per_cpu(rcu_cpu_kthread_loops
, cpu
)++;
1466 local_irq_save(flags
);
1469 local_irq_restore(flags
);
1471 rcu_kthread_do_work();
1478 *statusp
= RCU_KTHREAD_YIELDING
;
1479 trace_rcu_utilization("End CPU kthread@rcu_yield");
1480 rcu_yield(rcu_cpu_kthread_timer
, (unsigned long)cpu
);
1481 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1485 *statusp
= RCU_KTHREAD_STOPPED
;
1486 trace_rcu_utilization("End CPU kthread@term");
1491 * Spawn a per-CPU kthread, setting up affinity and priority.
1492 * Because the CPU hotplug lock is held, no other CPU will be attempting
1493 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1494 * attempting to access it during boot, but the locking in kthread_bind()
1495 * will enforce sufficient ordering.
1497 * Please note that we cannot simply refuse to wake up the per-CPU
1498 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1499 * which can result in softlockup complaints if the task ends up being
1500 * idle for more than a couple of minutes.
1502 * However, please note also that we cannot bind the per-CPU kthread to its
1503 * CPU until that CPU is fully online. We also cannot wait until the
1504 * CPU is fully online before we create its per-CPU kthread, as this would
1505 * deadlock the system when CPU notifiers tried waiting for grace
1506 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1507 * is online. If its CPU is not yet fully online, then the code in
1508 * rcu_cpu_kthread() will wait until it is fully online, and then do
1511 static int __cpuinit
rcu_spawn_one_cpu_kthread(int cpu
)
1513 struct sched_param sp
;
1514 struct task_struct
*t
;
1516 if (!rcu_scheduler_fully_active
||
1517 per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
)
1519 t
= kthread_create_on_node(rcu_cpu_kthread
,
1525 if (cpu_online(cpu
))
1526 kthread_bind(t
, cpu
);
1527 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1528 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
);
1529 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1530 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1531 per_cpu(rcu_cpu_kthread_task
, cpu
) = t
;
1532 wake_up_process(t
); /* Get to TASK_INTERRUPTIBLE quickly. */
1537 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1538 * kthreads when needed. We ignore requests to wake up kthreads
1539 * for offline CPUs, which is OK because force_quiescent_state()
1540 * takes care of this case.
1542 static int rcu_node_kthread(void *arg
)
1545 unsigned long flags
;
1547 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1548 struct sched_param sp
;
1549 struct task_struct
*t
;
1552 rnp
->node_kthread_status
= RCU_KTHREAD_WAITING
;
1553 rcu_wait(atomic_read(&rnp
->wakemask
) != 0);
1554 rnp
->node_kthread_status
= RCU_KTHREAD_RUNNING
;
1555 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1556 mask
= atomic_xchg(&rnp
->wakemask
, 0);
1557 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock. */
1558 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1) {
1559 if ((mask
& 0x1) == 0)
1562 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1563 if (!cpu_online(cpu
) || t
== NULL
) {
1567 per_cpu(rcu_cpu_has_work
, cpu
) = 1;
1568 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1569 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1574 rnp
->node_kthread_status
= RCU_KTHREAD_STOPPED
;
1579 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1580 * served by the rcu_node in question. The CPU hotplug lock is still
1581 * held, so the value of rnp->qsmaskinit will be stable.
1583 * We don't include outgoingcpu in the affinity set, use -1 if there is
1584 * no outgoing CPU. If there are no CPUs left in the affinity set,
1585 * this function allows the kthread to execute on any CPU.
1587 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1591 unsigned long mask
= rnp
->qsmaskinit
;
1593 if (rnp
->node_kthread_task
== NULL
)
1595 if (!alloc_cpumask_var(&cm
, GFP_KERNEL
))
1598 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1599 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1600 cpumask_set_cpu(cpu
, cm
);
1601 if (cpumask_weight(cm
) == 0) {
1603 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1604 cpumask_clear_cpu(cpu
, cm
);
1605 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1607 set_cpus_allowed_ptr(rnp
->node_kthread_task
, cm
);
1608 rcu_boost_kthread_setaffinity(rnp
, cm
);
1609 free_cpumask_var(cm
);
1613 * Spawn a per-rcu_node kthread, setting priority and affinity.
1614 * Called during boot before online/offline can happen, or, if
1615 * during runtime, with the main CPU-hotplug locks held. So only
1616 * one of these can be executing at a time.
1618 static int __cpuinit
rcu_spawn_one_node_kthread(struct rcu_state
*rsp
,
1619 struct rcu_node
*rnp
)
1621 unsigned long flags
;
1622 int rnp_index
= rnp
- &rsp
->node
[0];
1623 struct sched_param sp
;
1624 struct task_struct
*t
;
1626 if (!rcu_scheduler_fully_active
||
1627 rnp
->qsmaskinit
== 0)
1629 if (rnp
->node_kthread_task
== NULL
) {
1630 t
= kthread_create(rcu_node_kthread
, (void *)rnp
,
1631 "rcun/%d", rnp_index
);
1634 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1635 rnp
->node_kthread_task
= t
;
1636 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1637 sp
.sched_priority
= 99;
1638 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1639 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1641 return rcu_spawn_one_boost_kthread(rsp
, rnp
, rnp_index
);
1645 * Spawn all kthreads -- called as soon as the scheduler is running.
1647 static int __init
rcu_spawn_kthreads(void)
1650 struct rcu_node
*rnp
;
1652 rcu_scheduler_fully_active
= 1;
1653 for_each_possible_cpu(cpu
) {
1654 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1655 if (cpu_online(cpu
))
1656 (void)rcu_spawn_one_cpu_kthread(cpu
);
1658 rnp
= rcu_get_root(rcu_state
);
1659 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1660 if (NUM_RCU_NODES
> 1) {
1661 rcu_for_each_leaf_node(rcu_state
, rnp
)
1662 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1666 early_initcall(rcu_spawn_kthreads
);
1668 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1670 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1671 struct rcu_node
*rnp
= rdp
->mynode
;
1673 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1674 if (rcu_scheduler_fully_active
) {
1675 (void)rcu_spawn_one_cpu_kthread(cpu
);
1676 if (rnp
->node_kthread_task
== NULL
)
1677 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1681 #else /* #ifdef CONFIG_RCU_BOOST */
1683 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1685 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1688 static void invoke_rcu_callbacks_kthread(void)
1693 static bool rcu_is_callbacks_kthread(void)
1698 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1702 #ifdef CONFIG_HOTPLUG_CPU
1704 static void rcu_stop_cpu_kthread(int cpu
)
1708 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1710 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1714 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1718 static int __init
rcu_scheduler_really_started(void)
1720 rcu_scheduler_fully_active
= 1;
1723 early_initcall(rcu_scheduler_really_started
);
1725 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1729 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1731 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1734 * Check to see if any future RCU-related work will need to be done
1735 * by the current CPU, even if none need be done immediately, returning
1736 * 1 if so. This function is part of the RCU implementation; it is -not-
1737 * an exported member of the RCU API.
1739 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1740 * any flavor of RCU.
1742 int rcu_needs_cpu(int cpu
, unsigned long *delta_jiffies
)
1744 *delta_jiffies
= ULONG_MAX
;
1745 return rcu_cpu_has_callbacks(cpu
);
1749 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1751 static void rcu_prepare_for_idle_init(int cpu
)
1756 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1759 static void rcu_cleanup_after_idle(int cpu
)
1764 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1767 static void rcu_prepare_for_idle(int cpu
)
1772 * Don't bother keeping a running count of the number of RCU callbacks
1773 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1775 static void rcu_idle_count_callbacks_posted(void)
1779 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1782 * This code is invoked when a CPU goes idle, at which point we want
1783 * to have the CPU do everything required for RCU so that it can enter
1784 * the energy-efficient dyntick-idle mode. This is handled by a
1785 * state machine implemented by rcu_prepare_for_idle() below.
1787 * The following three proprocessor symbols control this state machine:
1789 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1790 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1791 * scheduling-clock interrupt than to loop through the state machine
1793 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1794 * optional if RCU does not need anything immediately from this
1795 * CPU, even if this CPU still has RCU callbacks queued. The first
1796 * times through the state machine are mandatory: we need to give
1797 * the state machine a chance to communicate a quiescent state
1799 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1800 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1801 * is sized to be roughly one RCU grace period. Those energy-efficiency
1802 * benchmarkers who might otherwise be tempted to set this to a large
1803 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1804 * system. And if you are -that- concerned about energy efficiency,
1805 * just power the system down and be done with it!
1806 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1807 * permitted to sleep in dyntick-idle mode with only lazy RCU
1808 * callbacks pending. Setting this too high can OOM your system.
1810 * The values below work well in practice. If future workloads require
1811 * adjustment, they can be converted into kernel config parameters, though
1812 * making the state machine smarter might be a better option.
1814 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1815 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1816 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1817 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1819 extern int tick_nohz_enabled
;
1822 * Does the specified flavor of RCU have non-lazy callbacks pending on
1823 * the specified CPU? Both RCU flavor and CPU are specified by the
1824 * rcu_data structure.
1826 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data
*rdp
)
1828 return rdp
->qlen
!= rdp
->qlen_lazy
;
1831 #ifdef CONFIG_TREE_PREEMPT_RCU
1834 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1835 * is no RCU-preempt in the kernel.)
1837 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
1839 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
1841 return __rcu_cpu_has_nonlazy_callbacks(rdp
);
1844 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1846 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
1851 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1854 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1856 static bool rcu_cpu_has_nonlazy_callbacks(int cpu
)
1858 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data
, cpu
)) ||
1859 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data
, cpu
)) ||
1860 rcu_preempt_cpu_has_nonlazy_callbacks(cpu
);
1864 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1865 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1866 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1867 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1868 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1869 * it is better to incur scheduling-clock interrupts than to spin
1870 * continuously for the same time duration!
1872 * The delta_jiffies argument is used to store the time when RCU is
1873 * going to need the CPU again if it still has callbacks. The reason
1874 * for this is that rcu_prepare_for_idle() might need to post a timer,
1875 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1876 * the wakeup time for this CPU. This means that RCU's timer can be
1877 * delayed until the wakeup time, which defeats the purpose of posting
1880 int rcu_needs_cpu(int cpu
, unsigned long *delta_jiffies
)
1882 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1884 /* Flag a new idle sojourn to the idle-entry state machine. */
1885 rdtp
->idle_first_pass
= 1;
1886 /* If no callbacks, RCU doesn't need the CPU. */
1887 if (!rcu_cpu_has_callbacks(cpu
)) {
1888 *delta_jiffies
= ULONG_MAX
;
1891 if (rdtp
->dyntick_holdoff
== jiffies
) {
1892 /* RCU recently tried and failed, so don't try again. */
1896 /* Set up for the possibility that RCU will post a timer. */
1897 if (rcu_cpu_has_nonlazy_callbacks(cpu
)) {
1898 *delta_jiffies
= round_up(RCU_IDLE_GP_DELAY
+ jiffies
,
1899 RCU_IDLE_GP_DELAY
) - jiffies
;
1901 *delta_jiffies
= jiffies
+ RCU_IDLE_LAZY_GP_DELAY
;
1902 *delta_jiffies
= round_jiffies(*delta_jiffies
) - jiffies
;
1908 * Handler for smp_call_function_single(). The only point of this
1909 * handler is to wake the CPU up, so the handler does only tracing.
1911 void rcu_idle_demigrate(void *unused
)
1913 trace_rcu_prep_idle("Demigrate");
1917 * Timer handler used to force CPU to start pushing its remaining RCU
1918 * callbacks in the case where it entered dyntick-idle mode with callbacks
1919 * pending. The hander doesn't really need to do anything because the
1920 * real work is done upon re-entry to idle, or by the next scheduling-clock
1921 * interrupt should idle not be re-entered.
1923 * One special case: the timer gets migrated without awakening the CPU
1924 * on which the timer was scheduled on. In this case, we must wake up
1925 * that CPU. We do so with smp_call_function_single().
1927 static void rcu_idle_gp_timer_func(unsigned long cpu_in
)
1929 int cpu
= (int)cpu_in
;
1931 trace_rcu_prep_idle("Timer");
1932 if (cpu
!= smp_processor_id())
1933 smp_call_function_single(cpu
, rcu_idle_demigrate
, NULL
, 0);
1935 WARN_ON_ONCE(1); /* Getting here can hang the system... */
1939 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1941 static void rcu_prepare_for_idle_init(int cpu
)
1943 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1945 rdtp
->dyntick_holdoff
= jiffies
- 1;
1946 setup_timer(&rdtp
->idle_gp_timer
, rcu_idle_gp_timer_func
, cpu
);
1947 rdtp
->idle_gp_timer_expires
= jiffies
- 1;
1948 rdtp
->idle_first_pass
= 1;
1952 * Clean up for exit from idle. Because we are exiting from idle, there
1953 * is no longer any point to ->idle_gp_timer, so cancel it. This will
1954 * do nothing if this timer is not active, so just cancel it unconditionally.
1956 static void rcu_cleanup_after_idle(int cpu
)
1958 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1960 del_timer(&rdtp
->idle_gp_timer
);
1961 trace_rcu_prep_idle("Cleanup after idle");
1962 rdtp
->tick_nohz_enabled_snap
= ACCESS_ONCE(tick_nohz_enabled
);
1966 * Check to see if any RCU-related work can be done by the current CPU,
1967 * and if so, schedule a softirq to get it done. This function is part
1968 * of the RCU implementation; it is -not- an exported member of the RCU API.
1970 * The idea is for the current CPU to clear out all work required by the
1971 * RCU core for the current grace period, so that this CPU can be permitted
1972 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1973 * at the end of the grace period by whatever CPU ends the grace period.
1974 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1975 * number of wakeups by a modest integer factor.
1977 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1978 * disabled, we do one pass of force_quiescent_state(), then do a
1979 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1980 * later. The ->dyntick_drain field controls the sequencing.
1982 * The caller must have disabled interrupts.
1984 static void rcu_prepare_for_idle(int cpu
)
1986 struct timer_list
*tp
;
1987 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1990 /* Handle nohz enablement switches conservatively. */
1991 tne
= ACCESS_ONCE(tick_nohz_enabled
);
1992 if (tne
!= rdtp
->tick_nohz_enabled_snap
) {
1993 if (rcu_cpu_has_callbacks(cpu
))
1994 invoke_rcu_core(); /* force nohz to see update. */
1995 rdtp
->tick_nohz_enabled_snap
= tne
;
2002 * If this is an idle re-entry, for example, due to use of
2003 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2004 * loop, then don't take any state-machine actions, unless the
2005 * momentary exit from idle queued additional non-lazy callbacks.
2006 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2009 if (!rdtp
->idle_first_pass
&&
2010 (rdtp
->nonlazy_posted
== rdtp
->nonlazy_posted_snap
)) {
2011 if (rcu_cpu_has_callbacks(cpu
)) {
2012 tp
= &rdtp
->idle_gp_timer
;
2013 mod_timer_pinned(tp
, rdtp
->idle_gp_timer_expires
);
2017 rdtp
->idle_first_pass
= 0;
2018 rdtp
->nonlazy_posted_snap
= rdtp
->nonlazy_posted
- 1;
2021 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2022 * Also reset state to avoid prejudicing later attempts.
2024 if (!rcu_cpu_has_callbacks(cpu
)) {
2025 rdtp
->dyntick_holdoff
= jiffies
- 1;
2026 rdtp
->dyntick_drain
= 0;
2027 trace_rcu_prep_idle("No callbacks");
2032 * If in holdoff mode, just return. We will presumably have
2033 * refrained from disabling the scheduling-clock tick.
2035 if (rdtp
->dyntick_holdoff
== jiffies
) {
2036 trace_rcu_prep_idle("In holdoff");
2040 /* Check and update the ->dyntick_drain sequencing. */
2041 if (rdtp
->dyntick_drain
<= 0) {
2042 /* First time through, initialize the counter. */
2043 rdtp
->dyntick_drain
= RCU_IDLE_FLUSHES
;
2044 } else if (rdtp
->dyntick_drain
<= RCU_IDLE_OPT_FLUSHES
&&
2045 !rcu_pending(cpu
) &&
2046 !local_softirq_pending()) {
2047 /* Can we go dyntick-idle despite still having callbacks? */
2048 rdtp
->dyntick_drain
= 0;
2049 rdtp
->dyntick_holdoff
= jiffies
;
2050 if (rcu_cpu_has_nonlazy_callbacks(cpu
)) {
2051 trace_rcu_prep_idle("Dyntick with callbacks");
2052 rdtp
->idle_gp_timer_expires
=
2053 round_up(jiffies
+ RCU_IDLE_GP_DELAY
,
2056 rdtp
->idle_gp_timer_expires
=
2057 round_jiffies(jiffies
+ RCU_IDLE_LAZY_GP_DELAY
);
2058 trace_rcu_prep_idle("Dyntick with lazy callbacks");
2060 tp
= &rdtp
->idle_gp_timer
;
2061 mod_timer_pinned(tp
, rdtp
->idle_gp_timer_expires
);
2062 rdtp
->nonlazy_posted_snap
= rdtp
->nonlazy_posted
;
2063 return; /* Nothing more to do immediately. */
2064 } else if (--(rdtp
->dyntick_drain
) <= 0) {
2065 /* We have hit the limit, so time to give up. */
2066 rdtp
->dyntick_holdoff
= jiffies
;
2067 trace_rcu_prep_idle("Begin holdoff");
2068 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2073 * Do one step of pushing the remaining RCU callbacks through
2074 * the RCU core state machine.
2076 #ifdef CONFIG_TREE_PREEMPT_RCU
2077 if (per_cpu(rcu_preempt_data
, cpu
).nxtlist
) {
2078 rcu_preempt_qs(cpu
);
2079 force_quiescent_state(&rcu_preempt_state
, 0);
2081 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2082 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
2084 force_quiescent_state(&rcu_sched_state
, 0);
2086 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
2088 force_quiescent_state(&rcu_bh_state
, 0);
2092 * If RCU callbacks are still pending, RCU still needs this CPU.
2093 * So try forcing the callbacks through the grace period.
2095 if (rcu_cpu_has_callbacks(cpu
)) {
2096 trace_rcu_prep_idle("More callbacks");
2099 trace_rcu_prep_idle("Callbacks drained");
2104 * Keep a running count of the number of non-lazy callbacks posted
2105 * on this CPU. This running counter (which is never decremented) allows
2106 * rcu_prepare_for_idle() to detect when something out of the idle loop
2107 * posts a callback, even if an equal number of callbacks are invoked.
2108 * Of course, callbacks should only be posted from within a trace event
2109 * designed to be called from idle or from within RCU_NONIDLE().
2111 static void rcu_idle_count_callbacks_posted(void)
2113 __this_cpu_add(rcu_dynticks
.nonlazy_posted
, 1);
2117 * Data for flushing lazy RCU callbacks at OOM time.
2119 static atomic_t oom_callback_count
;
2120 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq
);
2123 * RCU OOM callback -- decrement the outstanding count and deliver the
2124 * wake-up if we are the last one.
2126 static void rcu_oom_callback(struct rcu_head
*rhp
)
2128 if (atomic_dec_and_test(&oom_callback_count
))
2129 wake_up(&oom_callback_wq
);
2133 * Post an rcu_oom_notify callback on the current CPU if it has at
2134 * least one lazy callback. This will unnecessarily post callbacks
2135 * to CPUs that already have a non-lazy callback at the end of their
2136 * callback list, but this is an infrequent operation, so accept some
2137 * extra overhead to keep things simple.
2139 static void rcu_oom_notify_cpu(void *unused
)
2141 struct rcu_state
*rsp
;
2142 struct rcu_data
*rdp
;
2144 for_each_rcu_flavor(rsp
) {
2145 rdp
= __this_cpu_ptr(rsp
->rda
);
2146 if (rdp
->qlen_lazy
!= 0) {
2147 atomic_inc(&oom_callback_count
);
2148 rsp
->call(&rdp
->oom_head
, rcu_oom_callback
);
2154 * If low on memory, ensure that each CPU has a non-lazy callback.
2155 * This will wake up CPUs that have only lazy callbacks, in turn
2156 * ensuring that they free up the corresponding memory in a timely manner.
2157 * Because an uncertain amount of memory will be freed in some uncertain
2158 * timeframe, we do not claim to have freed anything.
2160 static int rcu_oom_notify(struct notifier_block
*self
,
2161 unsigned long notused
, void *nfreed
)
2165 /* Wait for callbacks from earlier instance to complete. */
2166 wait_event(oom_callback_wq
, atomic_read(&oom_callback_count
) == 0);
2169 * Prevent premature wakeup: ensure that all increments happen
2170 * before there is a chance of the counter reaching zero.
2172 atomic_set(&oom_callback_count
, 1);
2175 for_each_online_cpu(cpu
) {
2176 smp_call_function_single(cpu
, rcu_oom_notify_cpu
, NULL
, 1);
2181 /* Unconditionally decrement: no need to wake ourselves up. */
2182 atomic_dec(&oom_callback_count
);
2187 static struct notifier_block rcu_oom_nb
= {
2188 .notifier_call
= rcu_oom_notify
2191 static int __init
rcu_register_oom_notifier(void)
2193 register_oom_notifier(&rcu_oom_nb
);
2196 early_initcall(rcu_register_oom_notifier
);
2198 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2200 #ifdef CONFIG_RCU_CPU_STALL_INFO
2202 #ifdef CONFIG_RCU_FAST_NO_HZ
2204 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
2206 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
2207 struct timer_list
*tltp
= &rdtp
->idle_gp_timer
;
2209 sprintf(cp
, "drain=%d %c timer=%lu",
2210 rdtp
->dyntick_drain
,
2211 rdtp
->dyntick_holdoff
== jiffies
? 'H' : '.',
2212 timer_pending(tltp
) ? tltp
->expires
- jiffies
: -1);
2215 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2217 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
2222 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2224 /* Initiate the stall-info list. */
2225 static void print_cpu_stall_info_begin(void)
2227 printk(KERN_CONT
"\n");
2231 * Print out diagnostic information for the specified stalled CPU.
2233 * If the specified CPU is aware of the current RCU grace period
2234 * (flavor specified by rsp), then print the number of scheduling
2235 * clock interrupts the CPU has taken during the time that it has
2236 * been aware. Otherwise, print the number of RCU grace periods
2237 * that this CPU is ignorant of, for example, "1" if the CPU was
2238 * aware of the previous grace period.
2240 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2242 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2244 char fast_no_hz
[72];
2245 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
2246 struct rcu_dynticks
*rdtp
= rdp
->dynticks
;
2248 unsigned long ticks_value
;
2250 if (rsp
->gpnum
== rdp
->gpnum
) {
2251 ticks_title
= "ticks this GP";
2252 ticks_value
= rdp
->ticks_this_gp
;
2254 ticks_title
= "GPs behind";
2255 ticks_value
= rsp
->gpnum
- rdp
->gpnum
;
2257 print_cpu_stall_fast_no_hz(fast_no_hz
, cpu
);
2258 printk(KERN_ERR
"\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2259 cpu
, ticks_value
, ticks_title
,
2260 atomic_read(&rdtp
->dynticks
) & 0xfff,
2261 rdtp
->dynticks_nesting
, rdtp
->dynticks_nmi_nesting
,
2265 /* Terminate the stall-info list. */
2266 static void print_cpu_stall_info_end(void)
2268 printk(KERN_ERR
"\t");
2271 /* Zero ->ticks_this_gp for all flavors of RCU. */
2272 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2274 rdp
->ticks_this_gp
= 0;
2277 /* Increment ->ticks_this_gp for all flavors of RCU. */
2278 static void increment_cpu_stall_ticks(void)
2280 __get_cpu_var(rcu_sched_data
).ticks_this_gp
++;
2281 __get_cpu_var(rcu_bh_data
).ticks_this_gp
++;
2282 #ifdef CONFIG_TREE_PREEMPT_RCU
2283 __get_cpu_var(rcu_preempt_data
).ticks_this_gp
++;
2284 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2287 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2289 static void print_cpu_stall_info_begin(void)
2291 printk(KERN_CONT
" {");
2294 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2296 printk(KERN_CONT
" %d", cpu
);
2299 static void print_cpu_stall_info_end(void)
2301 printk(KERN_CONT
"} ");
2304 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2308 static void increment_cpu_stall_ticks(void)
2312 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */