2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
31 * Check the RCU kernel configuration parameters and print informative
32 * messages about anything out of the ordinary. If you like #ifdef, you
33 * will love this function.
35 static void __init
rcu_bootup_announce_oddness(void)
37 #ifdef CONFIG_RCU_TRACE
38 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
40 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
41 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
44 #ifdef CONFIG_RCU_FANOUT_EXACT
45 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
47 #ifdef CONFIG_RCU_FAST_NO_HZ
49 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
51 #ifdef CONFIG_PROVE_RCU
52 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
54 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
55 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
57 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
58 printk(KERN_INFO
"\tVerbose stalled-CPUs detection is disabled.\n");
60 #if NUM_RCU_LVL_4 != 0
61 printk(KERN_INFO
"\tExperimental four-level hierarchy is enabled.\n");
65 #ifdef CONFIG_TREE_PREEMPT_RCU
67 struct rcu_state rcu_preempt_state
= RCU_STATE_INITIALIZER(rcu_preempt
);
68 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
69 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
71 static void rcu_read_unlock_special(struct task_struct
*t
);
72 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
75 * Tell them what RCU they are running.
77 static void __init
rcu_bootup_announce(void)
79 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
80 rcu_bootup_announce_oddness();
84 * Return the number of RCU-preempt batches processed thus far
85 * for debug and statistics.
87 long rcu_batches_completed_preempt(void)
89 return rcu_preempt_state
.completed
;
91 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
94 * Return the number of RCU batches processed thus far for debug & stats.
96 long rcu_batches_completed(void)
98 return rcu_batches_completed_preempt();
100 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
103 * Force a quiescent state for preemptible RCU.
105 void rcu_force_quiescent_state(void)
107 force_quiescent_state(&rcu_preempt_state
, 0);
109 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
112 * Record a preemptible-RCU quiescent state for the specified CPU. Note
113 * that this just means that the task currently running on the CPU is
114 * not in a quiescent state. There might be any number of tasks blocked
115 * while in an RCU read-side critical section.
117 * Unlike the other rcu_*_qs() functions, callers to this function
118 * must disable irqs in order to protect the assignment to
119 * ->rcu_read_unlock_special.
121 static void rcu_preempt_qs(int cpu
)
123 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
125 rdp
->passed_quiesce_gpnum
= rdp
->gpnum
;
127 if (rdp
->passed_quiesce
== 0)
128 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
129 rdp
->passed_quiesce
= 1;
130 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
134 * We have entered the scheduler, and the current task might soon be
135 * context-switched away from. If this task is in an RCU read-side
136 * critical section, we will no longer be able to rely on the CPU to
137 * record that fact, so we enqueue the task on the blkd_tasks list.
138 * The task will dequeue itself when it exits the outermost enclosing
139 * RCU read-side critical section. Therefore, the current grace period
140 * cannot be permitted to complete until the blkd_tasks list entries
141 * predating the current grace period drain, in other words, until
142 * rnp->gp_tasks becomes NULL.
144 * Caller must disable preemption.
146 static void rcu_preempt_note_context_switch(int cpu
)
148 struct task_struct
*t
= current
;
150 struct rcu_data
*rdp
;
151 struct rcu_node
*rnp
;
153 if (t
->rcu_read_lock_nesting
> 0 &&
154 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
156 /* Possibly blocking in an RCU read-side critical section. */
157 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
159 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
160 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
161 t
->rcu_blocked_node
= rnp
;
164 * If this CPU has already checked in, then this task
165 * will hold up the next grace period rather than the
166 * current grace period. Queue the task accordingly.
167 * If the task is queued for the current grace period
168 * (i.e., this CPU has not yet passed through a quiescent
169 * state for the current grace period), then as long
170 * as that task remains queued, the current grace period
171 * cannot end. Note that there is some uncertainty as
172 * to exactly when the current grace period started.
173 * We take a conservative approach, which can result
174 * in unnecessarily waiting on tasks that started very
175 * slightly after the current grace period began. C'est
178 * But first, note that the current CPU must still be
181 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
182 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
183 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
184 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
185 rnp
->gp_tasks
= &t
->rcu_node_entry
;
186 #ifdef CONFIG_RCU_BOOST
187 if (rnp
->boost_tasks
!= NULL
)
188 rnp
->boost_tasks
= rnp
->gp_tasks
;
189 #endif /* #ifdef CONFIG_RCU_BOOST */
191 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
192 if (rnp
->qsmask
& rdp
->grpmask
)
193 rnp
->gp_tasks
= &t
->rcu_node_entry
;
195 trace_rcu_preempt_task(rdp
->rsp
->name
,
197 (rnp
->qsmask
& rdp
->grpmask
)
200 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
201 } else if (t
->rcu_read_lock_nesting
< 0 &&
202 t
->rcu_read_unlock_special
) {
205 * Complete exit from RCU read-side critical section on
206 * behalf of preempted instance of __rcu_read_unlock().
208 rcu_read_unlock_special(t
);
212 * Either we were not in an RCU read-side critical section to
213 * begin with, or we have now recorded that critical section
214 * globally. Either way, we can now note a quiescent state
215 * for this CPU. Again, if we were in an RCU read-side critical
216 * section, and if that critical section was blocking the current
217 * grace period, then the fact that the task has been enqueued
218 * means that we continue to block the current grace period.
220 local_irq_save(flags
);
222 local_irq_restore(flags
);
226 * Tree-preemptible RCU implementation for rcu_read_lock().
227 * Just increment ->rcu_read_lock_nesting, shared state will be updated
230 void __rcu_read_lock(void)
232 current
->rcu_read_lock_nesting
++;
233 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
235 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
238 * Check for preempted RCU readers blocking the current grace period
239 * for the specified rcu_node structure. If the caller needs a reliable
240 * answer, it must hold the rcu_node's ->lock.
242 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
244 return rnp
->gp_tasks
!= NULL
;
248 * Record a quiescent state for all tasks that were previously queued
249 * on the specified rcu_node structure and that were blocking the current
250 * RCU grace period. The caller must hold the specified rnp->lock with
251 * irqs disabled, and this lock is released upon return, but irqs remain
254 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
255 __releases(rnp
->lock
)
258 struct rcu_node
*rnp_p
;
260 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
261 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
262 return; /* Still need more quiescent states! */
268 * Either there is only one rcu_node in the tree,
269 * or tasks were kicked up to root rcu_node due to
270 * CPUs going offline.
272 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
276 /* Report up the rest of the hierarchy. */
278 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
279 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
280 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
284 * Advance a ->blkd_tasks-list pointer to the next entry, instead
285 * returning NULL if at the end of the list.
287 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
288 struct rcu_node
*rnp
)
290 struct list_head
*np
;
292 np
= t
->rcu_node_entry
.next
;
293 if (np
== &rnp
->blkd_tasks
)
299 * Handle special cases during rcu_read_unlock(), such as needing to
300 * notify RCU core processing or task having blocked during the RCU
301 * read-side critical section.
303 static noinline
void rcu_read_unlock_special(struct task_struct
*t
)
308 struct list_head
*np
;
309 struct rcu_node
*rnp
;
312 /* NMI handlers cannot block and cannot safely manipulate state. */
316 local_irq_save(flags
);
319 * If RCU core is waiting for this CPU to exit critical section,
320 * let it know that we have done so.
322 special
= t
->rcu_read_unlock_special
;
323 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
324 rcu_preempt_qs(smp_processor_id());
327 /* Hardware IRQ handlers cannot block. */
328 if (in_irq() || in_serving_softirq()) {
329 local_irq_restore(flags
);
333 /* Clean up if blocked during RCU read-side critical section. */
334 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
335 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
338 * Remove this task from the list it blocked on. The
339 * task can migrate while we acquire the lock, but at
340 * most one time. So at most two passes through loop.
343 rnp
= t
->rcu_blocked_node
;
344 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
345 if (rnp
== t
->rcu_blocked_node
)
347 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
349 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
350 empty_exp
= !rcu_preempted_readers_exp(rnp
);
351 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
352 np
= rcu_next_node_entry(t
, rnp
);
353 list_del_init(&t
->rcu_node_entry
);
354 trace_rcu_unlock_preempted_task("rcu_preempt",
356 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
358 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
360 #ifdef CONFIG_RCU_BOOST
361 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
362 rnp
->boost_tasks
= np
;
363 /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
364 if (t
->rcu_boosted
) {
365 special
|= RCU_READ_UNLOCK_BOOSTED
;
368 #endif /* #ifdef CONFIG_RCU_BOOST */
369 t
->rcu_blocked_node
= NULL
;
372 * If this was the last task on the current list, and if
373 * we aren't waiting on any CPUs, report the quiescent state.
374 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
376 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
377 trace_rcu_quiescent_state_report("preempt_rcu",
384 rcu_report_unblock_qs_rnp(rnp
, flags
);
386 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
388 #ifdef CONFIG_RCU_BOOST
389 /* Unboost if we were boosted. */
390 if (special
& RCU_READ_UNLOCK_BOOSTED
) {
391 rt_mutex_unlock(t
->rcu_boost_mutex
);
392 t
->rcu_boost_mutex
= NULL
;
394 #endif /* #ifdef CONFIG_RCU_BOOST */
397 * If this was the last task on the expedited lists,
398 * then we need to report up the rcu_node hierarchy.
400 if (!empty_exp
&& !rcu_preempted_readers_exp(rnp
))
401 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
);
403 local_irq_restore(flags
);
408 * Tree-preemptible RCU implementation for rcu_read_unlock().
409 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
410 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
411 * invoke rcu_read_unlock_special() to clean up after a context switch
412 * in an RCU read-side critical section and other special cases.
414 void __rcu_read_unlock(void)
416 struct task_struct
*t
= current
;
418 if (t
->rcu_read_lock_nesting
!= 1)
419 --t
->rcu_read_lock_nesting
;
421 barrier(); /* critical section before exit code. */
422 t
->rcu_read_lock_nesting
= INT_MIN
;
423 barrier(); /* assign before ->rcu_read_unlock_special load */
424 if (unlikely(ACCESS_ONCE(t
->rcu_read_unlock_special
)))
425 rcu_read_unlock_special(t
);
426 barrier(); /* ->rcu_read_unlock_special load before assign */
427 t
->rcu_read_lock_nesting
= 0;
429 #ifdef CONFIG_PROVE_LOCKING
431 int rrln
= ACCESS_ONCE(t
->rcu_read_lock_nesting
);
433 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
435 #endif /* #ifdef CONFIG_PROVE_LOCKING */
437 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
439 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
442 * Dump detailed information for all tasks blocking the current RCU
443 * grace period on the specified rcu_node structure.
445 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
448 struct task_struct
*t
;
450 if (!rcu_preempt_blocked_readers_cgp(rnp
))
452 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
453 t
= list_entry(rnp
->gp_tasks
,
454 struct task_struct
, rcu_node_entry
);
455 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
457 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
461 * Dump detailed information for all tasks blocking the current RCU
464 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
466 struct rcu_node
*rnp
= rcu_get_root(rsp
);
468 rcu_print_detail_task_stall_rnp(rnp
);
469 rcu_for_each_leaf_node(rsp
, rnp
)
470 rcu_print_detail_task_stall_rnp(rnp
);
473 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
475 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
479 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
482 * Scan the current list of tasks blocked within RCU read-side critical
483 * sections, printing out the tid of each.
485 static void rcu_print_task_stall(struct rcu_node
*rnp
)
487 struct task_struct
*t
;
489 if (!rcu_preempt_blocked_readers_cgp(rnp
))
491 t
= list_entry(rnp
->gp_tasks
,
492 struct task_struct
, rcu_node_entry
);
493 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
494 printk(" P%d", t
->pid
);
498 * Suppress preemptible RCU's CPU stall warnings by pushing the
499 * time of the next stall-warning message comfortably far into the
502 static void rcu_preempt_stall_reset(void)
504 rcu_preempt_state
.jiffies_stall
= jiffies
+ ULONG_MAX
/ 2;
508 * Check that the list of blocked tasks for the newly completed grace
509 * period is in fact empty. It is a serious bug to complete a grace
510 * period that still has RCU readers blocked! This function must be
511 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
512 * must be held by the caller.
514 * Also, if there are blocked tasks on the list, they automatically
515 * block the newly created grace period, so set up ->gp_tasks accordingly.
517 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
519 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
520 if (!list_empty(&rnp
->blkd_tasks
))
521 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
522 WARN_ON_ONCE(rnp
->qsmask
);
525 #ifdef CONFIG_HOTPLUG_CPU
528 * Handle tasklist migration for case in which all CPUs covered by the
529 * specified rcu_node have gone offline. Move them up to the root
530 * rcu_node. The reason for not just moving them to the immediate
531 * parent is to remove the need for rcu_read_unlock_special() to
532 * make more than two attempts to acquire the target rcu_node's lock.
533 * Returns true if there were tasks blocking the current RCU grace
536 * Returns 1 if there was previously a task blocking the current grace
537 * period on the specified rcu_node structure.
539 * The caller must hold rnp->lock with irqs disabled.
541 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
542 struct rcu_node
*rnp
,
543 struct rcu_data
*rdp
)
545 struct list_head
*lp
;
546 struct list_head
*lp_root
;
548 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
549 struct task_struct
*t
;
551 if (rnp
== rnp_root
) {
552 WARN_ONCE(1, "Last CPU thought to be offlined?");
553 return 0; /* Shouldn't happen: at least one CPU online. */
556 /* If we are on an internal node, complain bitterly. */
557 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
560 * Move tasks up to root rcu_node. Don't try to get fancy for
561 * this corner-case operation -- just put this node's tasks
562 * at the head of the root node's list, and update the root node's
563 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
564 * if non-NULL. This might result in waiting for more tasks than
565 * absolutely necessary, but this is a good performance/complexity
568 if (rcu_preempt_blocked_readers_cgp(rnp
))
569 retval
|= RCU_OFL_TASKS_NORM_GP
;
570 if (rcu_preempted_readers_exp(rnp
))
571 retval
|= RCU_OFL_TASKS_EXP_GP
;
572 lp
= &rnp
->blkd_tasks
;
573 lp_root
= &rnp_root
->blkd_tasks
;
574 while (!list_empty(lp
)) {
575 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
576 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
577 list_del(&t
->rcu_node_entry
);
578 t
->rcu_blocked_node
= rnp_root
;
579 list_add(&t
->rcu_node_entry
, lp_root
);
580 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
581 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
582 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
583 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
584 #ifdef CONFIG_RCU_BOOST
585 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
586 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
587 #endif /* #ifdef CONFIG_RCU_BOOST */
588 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
591 #ifdef CONFIG_RCU_BOOST
592 /* In case root is being boosted and leaf is not. */
593 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
594 if (rnp_root
->boost_tasks
!= NULL
&&
595 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
)
596 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
597 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
598 #endif /* #ifdef CONFIG_RCU_BOOST */
600 rnp
->gp_tasks
= NULL
;
601 rnp
->exp_tasks
= NULL
;
606 * Do CPU-offline processing for preemptible RCU.
608 static void rcu_preempt_offline_cpu(int cpu
)
610 __rcu_offline_cpu(cpu
, &rcu_preempt_state
);
613 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
616 * Check for a quiescent state from the current CPU. When a task blocks,
617 * the task is recorded in the corresponding CPU's rcu_node structure,
618 * which is checked elsewhere.
620 * Caller must disable hard irqs.
622 static void rcu_preempt_check_callbacks(int cpu
)
624 struct task_struct
*t
= current
;
626 if (t
->rcu_read_lock_nesting
== 0) {
630 if (t
->rcu_read_lock_nesting
> 0 &&
631 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
632 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
636 * Process callbacks for preemptible RCU.
638 static void rcu_preempt_process_callbacks(void)
640 __rcu_process_callbacks(&rcu_preempt_state
,
641 &__get_cpu_var(rcu_preempt_data
));
644 #ifdef CONFIG_RCU_BOOST
646 static void rcu_preempt_do_callbacks(void)
648 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
651 #endif /* #ifdef CONFIG_RCU_BOOST */
654 * Queue a preemptible-RCU callback for invocation after a grace period.
656 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
658 __call_rcu(head
, func
, &rcu_preempt_state
);
660 EXPORT_SYMBOL_GPL(call_rcu
);
663 * synchronize_rcu - wait until a grace period has elapsed.
665 * Control will return to the caller some time after a full grace
666 * period has elapsed, in other words after all currently executing RCU
667 * read-side critical sections have completed. Note, however, that
668 * upon return from synchronize_rcu(), the caller might well be executing
669 * concurrently with new RCU read-side critical sections that began while
670 * synchronize_rcu() was waiting. RCU read-side critical sections are
671 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
673 void synchronize_rcu(void)
675 if (!rcu_scheduler_active
)
677 wait_rcu_gp(call_rcu
);
679 EXPORT_SYMBOL_GPL(synchronize_rcu
);
681 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
682 static long sync_rcu_preempt_exp_count
;
683 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
686 * Return non-zero if there are any tasks in RCU read-side critical
687 * sections blocking the current preemptible-RCU expedited grace period.
688 * If there is no preemptible-RCU expedited grace period currently in
689 * progress, returns zero unconditionally.
691 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
693 return rnp
->exp_tasks
!= NULL
;
697 * return non-zero if there is no RCU expedited grace period in progress
698 * for the specified rcu_node structure, in other words, if all CPUs and
699 * tasks covered by the specified rcu_node structure have done their bit
700 * for the current expedited grace period. Works only for preemptible
701 * RCU -- other RCU implementation use other means.
703 * Caller must hold sync_rcu_preempt_exp_mutex.
705 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
707 return !rcu_preempted_readers_exp(rnp
) &&
708 ACCESS_ONCE(rnp
->expmask
) == 0;
712 * Report the exit from RCU read-side critical section for the last task
713 * that queued itself during or before the current expedited preemptible-RCU
714 * grace period. This event is reported either to the rcu_node structure on
715 * which the task was queued or to one of that rcu_node structure's ancestors,
716 * recursively up the tree. (Calm down, calm down, we do the recursion
719 * Caller must hold sync_rcu_preempt_exp_mutex.
721 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
726 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
728 if (!sync_rcu_preempt_exp_done(rnp
)) {
729 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
732 if (rnp
->parent
== NULL
) {
733 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
734 wake_up(&sync_rcu_preempt_exp_wq
);
738 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
740 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
741 rnp
->expmask
&= ~mask
;
746 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
747 * grace period for the specified rcu_node structure. If there are no such
748 * tasks, report it up the rcu_node hierarchy.
750 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
753 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
758 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
759 if (list_empty(&rnp
->blkd_tasks
))
760 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
762 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
763 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
767 rcu_report_exp_rnp(rsp
, rnp
);
771 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
772 * is to invoke synchronize_sched_expedited() to push all the tasks to
773 * the ->blkd_tasks lists and wait for this list to drain.
775 void synchronize_rcu_expedited(void)
778 struct rcu_node
*rnp
;
779 struct rcu_state
*rsp
= &rcu_preempt_state
;
783 smp_mb(); /* Caller's modifications seen first by other CPUs. */
784 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
785 smp_mb(); /* Above access cannot bleed into critical section. */
788 * Acquire lock, falling back to synchronize_rcu() if too many
789 * lock-acquisition failures. Of course, if someone does the
790 * expedited grace period for us, just leave.
792 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
794 udelay(trycount
* num_online_cpus());
799 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
800 goto mb_ret
; /* Others did our work for us. */
802 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
803 goto unlock_mb_ret
; /* Others did our work for us. */
805 /* force all RCU readers onto ->blkd_tasks lists. */
806 synchronize_sched_expedited();
808 raw_spin_lock_irqsave(&rsp
->onofflock
, flags
);
810 /* Initialize ->expmask for all non-leaf rcu_node structures. */
811 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
812 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
813 rnp
->expmask
= rnp
->qsmaskinit
;
814 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
817 /* Snapshot current state of ->blkd_tasks lists. */
818 rcu_for_each_leaf_node(rsp
, rnp
)
819 sync_rcu_preempt_exp_init(rsp
, rnp
);
820 if (NUM_RCU_NODES
> 1)
821 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
823 raw_spin_unlock_irqrestore(&rsp
->onofflock
, flags
);
825 /* Wait for snapshotted ->blkd_tasks lists to drain. */
826 rnp
= rcu_get_root(rsp
);
827 wait_event(sync_rcu_preempt_exp_wq
,
828 sync_rcu_preempt_exp_done(rnp
));
830 /* Clean up and exit. */
831 smp_mb(); /* ensure expedited GP seen before counter increment. */
832 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
834 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
836 smp_mb(); /* ensure subsequent action seen after grace period. */
838 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
841 * Check to see if there is any immediate preemptible-RCU-related work
844 static int rcu_preempt_pending(int cpu
)
846 return __rcu_pending(&rcu_preempt_state
,
847 &per_cpu(rcu_preempt_data
, cpu
));
851 * Does preemptible RCU need the CPU to stay out of dynticks mode?
853 static int rcu_preempt_needs_cpu(int cpu
)
855 return !!per_cpu(rcu_preempt_data
, cpu
).nxtlist
;
859 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
861 void rcu_barrier(void)
863 _rcu_barrier(&rcu_preempt_state
, call_rcu
);
865 EXPORT_SYMBOL_GPL(rcu_barrier
);
868 * Initialize preemptible RCU's per-CPU data.
870 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
872 rcu_init_percpu_data(cpu
, &rcu_preempt_state
, 1);
876 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
878 static void rcu_preempt_send_cbs_to_online(void)
880 rcu_send_cbs_to_online(&rcu_preempt_state
);
884 * Initialize preemptible RCU's state structures.
886 static void __init
__rcu_init_preempt(void)
888 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
892 * Check for a task exiting while in a preemptible-RCU read-side
893 * critical section, clean up if so. No need to issue warnings,
894 * as debug_check_no_locks_held() already does this if lockdep
899 struct task_struct
*t
= current
;
901 if (t
->rcu_read_lock_nesting
== 0)
903 t
->rcu_read_lock_nesting
= 1;
907 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
909 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
912 * Tell them what RCU they are running.
914 static void __init
rcu_bootup_announce(void)
916 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
917 rcu_bootup_announce_oddness();
921 * Return the number of RCU batches processed thus far for debug & stats.
923 long rcu_batches_completed(void)
925 return rcu_batches_completed_sched();
927 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
930 * Force a quiescent state for RCU, which, because there is no preemptible
931 * RCU, becomes the same as rcu-sched.
933 void rcu_force_quiescent_state(void)
935 rcu_sched_force_quiescent_state();
937 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
940 * Because preemptible RCU does not exist, we never have to check for
941 * CPUs being in quiescent states.
943 static void rcu_preempt_note_context_switch(int cpu
)
948 * Because preemptible RCU does not exist, there are never any preempted
951 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
956 #ifdef CONFIG_HOTPLUG_CPU
958 /* Because preemptible RCU does not exist, no quieting of tasks. */
959 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
961 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
964 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
967 * Because preemptible RCU does not exist, we never have to check for
968 * tasks blocked within RCU read-side critical sections.
970 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
975 * Because preemptible RCU does not exist, we never have to check for
976 * tasks blocked within RCU read-side critical sections.
978 static void rcu_print_task_stall(struct rcu_node
*rnp
)
983 * Because preemptible RCU does not exist, there is no need to suppress
984 * its CPU stall warnings.
986 static void rcu_preempt_stall_reset(void)
991 * Because there is no preemptible RCU, there can be no readers blocked,
992 * so there is no need to check for blocked tasks. So check only for
993 * bogus qsmask values.
995 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
997 WARN_ON_ONCE(rnp
->qsmask
);
1000 #ifdef CONFIG_HOTPLUG_CPU
1003 * Because preemptible RCU does not exist, it never needs to migrate
1004 * tasks that were blocked within RCU read-side critical sections, and
1005 * such non-existent tasks cannot possibly have been blocking the current
1008 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
1009 struct rcu_node
*rnp
,
1010 struct rcu_data
*rdp
)
1016 * Because preemptible RCU does not exist, it never needs CPU-offline
1019 static void rcu_preempt_offline_cpu(int cpu
)
1023 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1026 * Because preemptible RCU does not exist, it never has any callbacks
1029 static void rcu_preempt_check_callbacks(int cpu
)
1034 * Because preemptible RCU does not exist, it never has any callbacks
1037 static void rcu_preempt_process_callbacks(void)
1042 * Wait for an rcu-preempt grace period, but make it happen quickly.
1043 * But because preemptible RCU does not exist, map to rcu-sched.
1045 void synchronize_rcu_expedited(void)
1047 synchronize_sched_expedited();
1049 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1051 #ifdef CONFIG_HOTPLUG_CPU
1054 * Because preemptible RCU does not exist, there is never any need to
1055 * report on tasks preempted in RCU read-side critical sections during
1056 * expedited RCU grace periods.
1058 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
1063 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1066 * Because preemptible RCU does not exist, it never has any work to do.
1068 static int rcu_preempt_pending(int cpu
)
1074 * Because preemptible RCU does not exist, it never needs any CPU.
1076 static int rcu_preempt_needs_cpu(int cpu
)
1082 * Because preemptible RCU does not exist, rcu_barrier() is just
1083 * another name for rcu_barrier_sched().
1085 void rcu_barrier(void)
1087 rcu_barrier_sched();
1089 EXPORT_SYMBOL_GPL(rcu_barrier
);
1092 * Because preemptible RCU does not exist, there is no per-CPU
1093 * data to initialize.
1095 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
1100 * Because there is no preemptible RCU, there are no callbacks to move.
1102 static void rcu_preempt_send_cbs_to_online(void)
1107 * Because preemptible RCU does not exist, it need not be initialized.
1109 static void __init
__rcu_init_preempt(void)
1113 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1115 #ifdef CONFIG_RCU_BOOST
1117 #include "rtmutex_common.h"
1119 #ifdef CONFIG_RCU_TRACE
1121 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1123 if (list_empty(&rnp
->blkd_tasks
))
1124 rnp
->n_balk_blkd_tasks
++;
1125 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1126 rnp
->n_balk_exp_gp_tasks
++;
1127 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1128 rnp
->n_balk_boost_tasks
++;
1129 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1130 rnp
->n_balk_notblocked
++;
1131 else if (rnp
->gp_tasks
!= NULL
&&
1132 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1133 rnp
->n_balk_notyet
++;
1138 #else /* #ifdef CONFIG_RCU_TRACE */
1140 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1144 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1147 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1148 * or ->boost_tasks, advancing the pointer to the next task in the
1149 * ->blkd_tasks list.
1151 * Note that irqs must be enabled: boosting the task can block.
1152 * Returns 1 if there are more tasks needing to be boosted.
1154 static int rcu_boost(struct rcu_node
*rnp
)
1156 unsigned long flags
;
1157 struct rt_mutex mtx
;
1158 struct task_struct
*t
;
1159 struct list_head
*tb
;
1161 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1162 return 0; /* Nothing left to boost. */
1164 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1167 * Recheck under the lock: all tasks in need of boosting
1168 * might exit their RCU read-side critical sections on their own.
1170 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1171 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1176 * Preferentially boost tasks blocking expedited grace periods.
1177 * This cannot starve the normal grace periods because a second
1178 * expedited grace period must boost all blocked tasks, including
1179 * those blocking the pre-existing normal grace period.
1181 if (rnp
->exp_tasks
!= NULL
) {
1182 tb
= rnp
->exp_tasks
;
1183 rnp
->n_exp_boosts
++;
1185 tb
= rnp
->boost_tasks
;
1186 rnp
->n_normal_boosts
++;
1188 rnp
->n_tasks_boosted
++;
1191 * We boost task t by manufacturing an rt_mutex that appears to
1192 * be held by task t. We leave a pointer to that rt_mutex where
1193 * task t can find it, and task t will release the mutex when it
1194 * exits its outermost RCU read-side critical section. Then
1195 * simply acquiring this artificial rt_mutex will boost task
1196 * t's priority. (Thanks to tglx for suggesting this approach!)
1198 * Note that task t must acquire rnp->lock to remove itself from
1199 * the ->blkd_tasks list, which it will do from exit() if from
1200 * nowhere else. We therefore are guaranteed that task t will
1201 * stay around at least until we drop rnp->lock. Note that
1202 * rnp->lock also resolves races between our priority boosting
1203 * and task t's exiting its outermost RCU read-side critical
1206 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1207 rt_mutex_init_proxy_locked(&mtx
, t
);
1208 t
->rcu_boost_mutex
= &mtx
;
1210 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1211 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1212 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1214 return rnp
->exp_tasks
!= NULL
|| rnp
->boost_tasks
!= NULL
;
1218 * Timer handler to initiate waking up of boost kthreads that
1219 * have yielded the CPU due to excessive numbers of tasks to
1220 * boost. We wake up the per-rcu_node kthread, which in turn
1221 * will wake up the booster kthread.
1223 static void rcu_boost_kthread_timer(unsigned long arg
)
1225 invoke_rcu_node_kthread((struct rcu_node
*)arg
);
1229 * Priority-boosting kthread. One per leaf rcu_node and one for the
1232 static int rcu_boost_kthread(void *arg
)
1234 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1238 trace_rcu_utilization("Start boost kthread@init");
1240 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1241 trace_rcu_utilization("End boost kthread@rcu_wait");
1242 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1243 trace_rcu_utilization("Start boost kthread@rcu_wait");
1244 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1245 more2boost
= rcu_boost(rnp
);
1251 trace_rcu_utilization("End boost kthread@rcu_yield");
1252 rcu_yield(rcu_boost_kthread_timer
, (unsigned long)rnp
);
1253 trace_rcu_utilization("Start boost kthread@rcu_yield");
1258 trace_rcu_utilization("End boost kthread@notreached");
1263 * Check to see if it is time to start boosting RCU readers that are
1264 * blocking the current grace period, and, if so, tell the per-rcu_node
1265 * kthread to start boosting them. If there is an expedited grace
1266 * period in progress, it is always time to boost.
1268 * The caller must hold rnp->lock, which this function releases,
1269 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1270 * so we don't need to worry about it going away.
1272 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1274 struct task_struct
*t
;
1276 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1277 rnp
->n_balk_exp_gp_tasks
++;
1278 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1281 if (rnp
->exp_tasks
!= NULL
||
1282 (rnp
->gp_tasks
!= NULL
&&
1283 rnp
->boost_tasks
== NULL
&&
1285 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1286 if (rnp
->exp_tasks
== NULL
)
1287 rnp
->boost_tasks
= rnp
->gp_tasks
;
1288 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1289 t
= rnp
->boost_kthread_task
;
1293 rcu_initiate_boost_trace(rnp
);
1294 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1299 * Wake up the per-CPU kthread to invoke RCU callbacks.
1301 static void invoke_rcu_callbacks_kthread(void)
1303 unsigned long flags
;
1305 local_irq_save(flags
);
1306 __this_cpu_write(rcu_cpu_has_work
, 1);
1307 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1308 current
!= __this_cpu_read(rcu_cpu_kthread_task
))
1309 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task
));
1310 local_irq_restore(flags
);
1314 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1315 * held, so no one should be messing with the existence of the boost
1318 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
,
1321 struct task_struct
*t
;
1323 t
= rnp
->boost_kthread_task
;
1325 set_cpus_allowed_ptr(rnp
->boost_kthread_task
, cm
);
1328 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1331 * Do priority-boost accounting for the start of a new grace period.
1333 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1335 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1339 * Create an RCU-boost kthread for the specified node if one does not
1340 * already exist. We only create this kthread for preemptible RCU.
1341 * Returns zero if all is well, a negated errno otherwise.
1343 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1344 struct rcu_node
*rnp
,
1347 unsigned long flags
;
1348 struct sched_param sp
;
1349 struct task_struct
*t
;
1351 if (&rcu_preempt_state
!= rsp
)
1354 if (rnp
->boost_kthread_task
!= NULL
)
1356 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1357 "rcub%d", rnp_index
);
1360 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1361 rnp
->boost_kthread_task
= t
;
1362 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1363 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1364 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1365 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1369 #ifdef CONFIG_HOTPLUG_CPU
1372 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1374 static void rcu_stop_cpu_kthread(int cpu
)
1376 struct task_struct
*t
;
1378 /* Stop the CPU's kthread. */
1379 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1381 per_cpu(rcu_cpu_kthread_task
, cpu
) = NULL
;
1386 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1388 static void rcu_kthread_do_work(void)
1390 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1391 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1392 rcu_preempt_do_callbacks();
1396 * Wake up the specified per-rcu_node-structure kthread.
1397 * Because the per-rcu_node kthreads are immortal, we don't need
1398 * to do anything to keep them alive.
1400 static void invoke_rcu_node_kthread(struct rcu_node
*rnp
)
1402 struct task_struct
*t
;
1404 t
= rnp
->node_kthread_task
;
1410 * Set the specified CPU's kthread to run RT or not, as specified by
1411 * the to_rt argument. The CPU-hotplug locks are held, so the task
1412 * is not going away.
1414 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1417 struct sched_param sp
;
1418 struct task_struct
*t
;
1420 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1424 policy
= SCHED_FIFO
;
1425 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1427 policy
= SCHED_NORMAL
;
1428 sp
.sched_priority
= 0;
1430 sched_setscheduler_nocheck(t
, policy
, &sp
);
1434 * Timer handler to initiate the waking up of per-CPU kthreads that
1435 * have yielded the CPU due to excess numbers of RCU callbacks.
1436 * We wake up the per-rcu_node kthread, which in turn will wake up
1437 * the booster kthread.
1439 static void rcu_cpu_kthread_timer(unsigned long arg
)
1441 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, arg
);
1442 struct rcu_node
*rnp
= rdp
->mynode
;
1444 atomic_or(rdp
->grpmask
, &rnp
->wakemask
);
1445 invoke_rcu_node_kthread(rnp
);
1449 * Drop to non-real-time priority and yield, but only after posting a
1450 * timer that will cause us to regain our real-time priority if we
1451 * remain preempted. Either way, we restore our real-time priority
1454 static void rcu_yield(void (*f
)(unsigned long), unsigned long arg
)
1456 struct sched_param sp
;
1457 struct timer_list yield_timer
;
1459 setup_timer_on_stack(&yield_timer
, f
, arg
);
1460 mod_timer(&yield_timer
, jiffies
+ 2);
1461 sp
.sched_priority
= 0;
1462 sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sp
);
1463 set_user_nice(current
, 19);
1465 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1466 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1467 del_timer(&yield_timer
);
1471 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1472 * This can happen while the corresponding CPU is either coming online
1473 * or going offline. We cannot wait until the CPU is fully online
1474 * before starting the kthread, because the various notifier functions
1475 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1476 * the corresponding CPU is online.
1478 * Return 1 if the kthread needs to stop, 0 otherwise.
1480 * Caller must disable bh. This function can momentarily enable it.
1482 static int rcu_cpu_kthread_should_stop(int cpu
)
1484 while (cpu_is_offline(cpu
) ||
1485 !cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)) ||
1486 smp_processor_id() != cpu
) {
1487 if (kthread_should_stop())
1489 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1490 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = raw_smp_processor_id();
1492 schedule_timeout_uninterruptible(1);
1493 if (!cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)))
1494 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
1497 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1502 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1503 * RCU softirq used in flavors and configurations of RCU that do not
1504 * support RCU priority boosting.
1506 static int rcu_cpu_kthread(void *arg
)
1508 int cpu
= (int)(long)arg
;
1509 unsigned long flags
;
1511 unsigned int *statusp
= &per_cpu(rcu_cpu_kthread_status
, cpu
);
1513 char *workp
= &per_cpu(rcu_cpu_has_work
, cpu
);
1515 trace_rcu_utilization("Start CPU kthread@init");
1517 *statusp
= RCU_KTHREAD_WAITING
;
1518 trace_rcu_utilization("End CPU kthread@rcu_wait");
1519 rcu_wait(*workp
!= 0 || kthread_should_stop());
1520 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1522 if (rcu_cpu_kthread_should_stop(cpu
)) {
1526 *statusp
= RCU_KTHREAD_RUNNING
;
1527 per_cpu(rcu_cpu_kthread_loops
, cpu
)++;
1528 local_irq_save(flags
);
1531 local_irq_restore(flags
);
1533 rcu_kthread_do_work();
1540 *statusp
= RCU_KTHREAD_YIELDING
;
1541 trace_rcu_utilization("End CPU kthread@rcu_yield");
1542 rcu_yield(rcu_cpu_kthread_timer
, (unsigned long)cpu
);
1543 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1547 *statusp
= RCU_KTHREAD_STOPPED
;
1548 trace_rcu_utilization("End CPU kthread@term");
1553 * Spawn a per-CPU kthread, setting up affinity and priority.
1554 * Because the CPU hotplug lock is held, no other CPU will be attempting
1555 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1556 * attempting to access it during boot, but the locking in kthread_bind()
1557 * will enforce sufficient ordering.
1559 * Please note that we cannot simply refuse to wake up the per-CPU
1560 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1561 * which can result in softlockup complaints if the task ends up being
1562 * idle for more than a couple of minutes.
1564 * However, please note also that we cannot bind the per-CPU kthread to its
1565 * CPU until that CPU is fully online. We also cannot wait until the
1566 * CPU is fully online before we create its per-CPU kthread, as this would
1567 * deadlock the system when CPU notifiers tried waiting for grace
1568 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1569 * is online. If its CPU is not yet fully online, then the code in
1570 * rcu_cpu_kthread() will wait until it is fully online, and then do
1573 static int __cpuinit
rcu_spawn_one_cpu_kthread(int cpu
)
1575 struct sched_param sp
;
1576 struct task_struct
*t
;
1578 if (!rcu_scheduler_fully_active
||
1579 per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
)
1581 t
= kthread_create_on_node(rcu_cpu_kthread
,
1587 if (cpu_online(cpu
))
1588 kthread_bind(t
, cpu
);
1589 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1590 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
);
1591 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1592 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1593 per_cpu(rcu_cpu_kthread_task
, cpu
) = t
;
1594 wake_up_process(t
); /* Get to TASK_INTERRUPTIBLE quickly. */
1599 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1600 * kthreads when needed. We ignore requests to wake up kthreads
1601 * for offline CPUs, which is OK because force_quiescent_state()
1602 * takes care of this case.
1604 static int rcu_node_kthread(void *arg
)
1607 unsigned long flags
;
1609 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1610 struct sched_param sp
;
1611 struct task_struct
*t
;
1614 rnp
->node_kthread_status
= RCU_KTHREAD_WAITING
;
1615 rcu_wait(atomic_read(&rnp
->wakemask
) != 0);
1616 rnp
->node_kthread_status
= RCU_KTHREAD_RUNNING
;
1617 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1618 mask
= atomic_xchg(&rnp
->wakemask
, 0);
1619 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock. */
1620 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1) {
1621 if ((mask
& 0x1) == 0)
1624 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1625 if (!cpu_online(cpu
) || t
== NULL
) {
1629 per_cpu(rcu_cpu_has_work
, cpu
) = 1;
1630 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1631 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1636 rnp
->node_kthread_status
= RCU_KTHREAD_STOPPED
;
1641 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1642 * served by the rcu_node in question. The CPU hotplug lock is still
1643 * held, so the value of rnp->qsmaskinit will be stable.
1645 * We don't include outgoingcpu in the affinity set, use -1 if there is
1646 * no outgoing CPU. If there are no CPUs left in the affinity set,
1647 * this function allows the kthread to execute on any CPU.
1649 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1653 unsigned long mask
= rnp
->qsmaskinit
;
1655 if (rnp
->node_kthread_task
== NULL
)
1657 if (!alloc_cpumask_var(&cm
, GFP_KERNEL
))
1660 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1661 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1662 cpumask_set_cpu(cpu
, cm
);
1663 if (cpumask_weight(cm
) == 0) {
1665 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1666 cpumask_clear_cpu(cpu
, cm
);
1667 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1669 set_cpus_allowed_ptr(rnp
->node_kthread_task
, cm
);
1670 rcu_boost_kthread_setaffinity(rnp
, cm
);
1671 free_cpumask_var(cm
);
1675 * Spawn a per-rcu_node kthread, setting priority and affinity.
1676 * Called during boot before online/offline can happen, or, if
1677 * during runtime, with the main CPU-hotplug locks held. So only
1678 * one of these can be executing at a time.
1680 static int __cpuinit
rcu_spawn_one_node_kthread(struct rcu_state
*rsp
,
1681 struct rcu_node
*rnp
)
1683 unsigned long flags
;
1684 int rnp_index
= rnp
- &rsp
->node
[0];
1685 struct sched_param sp
;
1686 struct task_struct
*t
;
1688 if (!rcu_scheduler_fully_active
||
1689 rnp
->qsmaskinit
== 0)
1691 if (rnp
->node_kthread_task
== NULL
) {
1692 t
= kthread_create(rcu_node_kthread
, (void *)rnp
,
1693 "rcun%d", rnp_index
);
1696 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1697 rnp
->node_kthread_task
= t
;
1698 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1699 sp
.sched_priority
= 99;
1700 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1701 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1703 return rcu_spawn_one_boost_kthread(rsp
, rnp
, rnp_index
);
1707 * Spawn all kthreads -- called as soon as the scheduler is running.
1709 static int __init
rcu_spawn_kthreads(void)
1712 struct rcu_node
*rnp
;
1714 rcu_scheduler_fully_active
= 1;
1715 for_each_possible_cpu(cpu
) {
1716 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1717 if (cpu_online(cpu
))
1718 (void)rcu_spawn_one_cpu_kthread(cpu
);
1720 rnp
= rcu_get_root(rcu_state
);
1721 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1722 if (NUM_RCU_NODES
> 1) {
1723 rcu_for_each_leaf_node(rcu_state
, rnp
)
1724 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1728 early_initcall(rcu_spawn_kthreads
);
1730 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1732 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1733 struct rcu_node
*rnp
= rdp
->mynode
;
1735 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1736 if (rcu_scheduler_fully_active
) {
1737 (void)rcu_spawn_one_cpu_kthread(cpu
);
1738 if (rnp
->node_kthread_task
== NULL
)
1739 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1743 #else /* #ifdef CONFIG_RCU_BOOST */
1745 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1747 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1750 static void invoke_rcu_callbacks_kthread(void)
1755 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1759 #ifdef CONFIG_HOTPLUG_CPU
1761 static void rcu_stop_cpu_kthread(int cpu
)
1765 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1767 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1771 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1775 static int __init
rcu_scheduler_really_started(void)
1777 rcu_scheduler_fully_active
= 1;
1780 early_initcall(rcu_scheduler_really_started
);
1782 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1786 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1790 void synchronize_sched_expedited(void)
1794 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1796 #else /* #ifndef CONFIG_SMP */
1798 static atomic_t sync_sched_expedited_started
= ATOMIC_INIT(0);
1799 static atomic_t sync_sched_expedited_done
= ATOMIC_INIT(0);
1801 static int synchronize_sched_expedited_cpu_stop(void *data
)
1804 * There must be a full memory barrier on each affected CPU
1805 * between the time that try_stop_cpus() is called and the
1806 * time that it returns.
1808 * In the current initial implementation of cpu_stop, the
1809 * above condition is already met when the control reaches
1810 * this point and the following smp_mb() is not strictly
1811 * necessary. Do smp_mb() anyway for documentation and
1812 * robustness against future implementation changes.
1814 smp_mb(); /* See above comment block. */
1819 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1820 * approach to force grace period to end quickly. This consumes
1821 * significant time on all CPUs, and is thus not recommended for
1822 * any sort of common-case code.
1824 * Note that it is illegal to call this function while holding any
1825 * lock that is acquired by a CPU-hotplug notifier. Failing to
1826 * observe this restriction will result in deadlock.
1828 * This implementation can be thought of as an application of ticket
1829 * locking to RCU, with sync_sched_expedited_started and
1830 * sync_sched_expedited_done taking on the roles of the halves
1831 * of the ticket-lock word. Each task atomically increments
1832 * sync_sched_expedited_started upon entry, snapshotting the old value,
1833 * then attempts to stop all the CPUs. If this succeeds, then each
1834 * CPU will have executed a context switch, resulting in an RCU-sched
1835 * grace period. We are then done, so we use atomic_cmpxchg() to
1836 * update sync_sched_expedited_done to match our snapshot -- but
1837 * only if someone else has not already advanced past our snapshot.
1839 * On the other hand, if try_stop_cpus() fails, we check the value
1840 * of sync_sched_expedited_done. If it has advanced past our
1841 * initial snapshot, then someone else must have forced a grace period
1842 * some time after we took our snapshot. In this case, our work is
1843 * done for us, and we can simply return. Otherwise, we try again,
1844 * but keep our initial snapshot for purposes of checking for someone
1845 * doing our work for us.
1847 * If we fail too many times in a row, we fall back to synchronize_sched().
1849 void synchronize_sched_expedited(void)
1851 int firstsnap
, s
, snap
, trycount
= 0;
1853 /* Note that atomic_inc_return() implies full memory barrier. */
1854 firstsnap
= snap
= atomic_inc_return(&sync_sched_expedited_started
);
1858 * Each pass through the following loop attempts to force a
1859 * context switch on each CPU.
1861 while (try_stop_cpus(cpu_online_mask
,
1862 synchronize_sched_expedited_cpu_stop
,
1866 /* No joy, try again later. Or just synchronize_sched(). */
1867 if (trycount
++ < 10)
1868 udelay(trycount
* num_online_cpus());
1870 synchronize_sched();
1874 /* Check to see if someone else did our work for us. */
1875 s
= atomic_read(&sync_sched_expedited_done
);
1876 if (UINT_CMP_GE((unsigned)s
, (unsigned)firstsnap
)) {
1877 smp_mb(); /* ensure test happens before caller kfree */
1882 * Refetching sync_sched_expedited_started allows later
1883 * callers to piggyback on our grace period. We subtract
1884 * 1 to get the same token that the last incrementer got.
1885 * We retry after they started, so our grace period works
1886 * for them, and they started after our first try, so their
1887 * grace period works for us.
1890 snap
= atomic_read(&sync_sched_expedited_started
) - 1;
1891 smp_mb(); /* ensure read is before try_stop_cpus(). */
1895 * Everyone up to our most recent fetch is covered by our grace
1896 * period. Update the counter, but only if our work is still
1897 * relevant -- which it won't be if someone who started later
1898 * than we did beat us to the punch.
1901 s
= atomic_read(&sync_sched_expedited_done
);
1902 if (UINT_CMP_GE((unsigned)s
, (unsigned)snap
)) {
1903 smp_mb(); /* ensure test happens before caller kfree */
1906 } while (atomic_cmpxchg(&sync_sched_expedited_done
, s
, snap
) != s
);
1910 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1912 #endif /* #else #ifndef CONFIG_SMP */
1914 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1917 * Check to see if any future RCU-related work will need to be done
1918 * by the current CPU, even if none need be done immediately, returning
1919 * 1 if so. This function is part of the RCU implementation; it is -not-
1920 * an exported member of the RCU API.
1922 * Because we have preemptible RCU, just check whether this CPU needs
1923 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
1924 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
1926 int rcu_needs_cpu(int cpu
)
1928 return rcu_needs_cpu_quick_check(cpu
);
1932 * Check to see if we need to continue a callback-flush operations to
1933 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
1934 * entry is not configured, so we never do need to.
1936 static void rcu_needs_cpu_flush(void)
1940 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1942 #define RCU_NEEDS_CPU_FLUSHES 5
1943 static DEFINE_PER_CPU(int, rcu_dyntick_drain
);
1944 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff
);
1947 * Check to see if any future RCU-related work will need to be done
1948 * by the current CPU, even if none need be done immediately, returning
1949 * 1 if so. This function is part of the RCU implementation; it is -not-
1950 * an exported member of the RCU API.
1952 * Because we are not supporting preemptible RCU, attempt to accelerate
1953 * any current grace periods so that RCU no longer needs this CPU, but
1954 * only if all other CPUs are already in dynticks-idle mode. This will
1955 * allow the CPU cores to be powered down immediately, as opposed to after
1956 * waiting many milliseconds for grace periods to elapse.
1958 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1959 * disabled, we do one pass of force_quiescent_state(), then do a
1960 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1961 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1963 int rcu_needs_cpu(int cpu
)
1969 /* Check for being in the holdoff period. */
1970 if (per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
)
1971 return rcu_needs_cpu_quick_check(cpu
);
1973 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1974 for_each_online_cpu(thatcpu
) {
1977 snap
= atomic_add_return(0, &per_cpu(rcu_dynticks
,
1979 smp_mb(); /* Order sampling of snap with end of grace period. */
1980 if ((snap
& 0x1) != 0) {
1981 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
1982 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
- 1;
1983 return rcu_needs_cpu_quick_check(cpu
);
1987 /* Check and update the rcu_dyntick_drain sequencing. */
1988 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
1989 /* First time through, initialize the counter. */
1990 per_cpu(rcu_dyntick_drain
, cpu
) = RCU_NEEDS_CPU_FLUSHES
;
1991 } else if (--per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
1992 /* We have hit the limit, so time to give up. */
1993 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
;
1994 return rcu_needs_cpu_quick_check(cpu
);
1997 /* Do one step pushing remaining RCU callbacks through. */
1998 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
2000 force_quiescent_state(&rcu_sched_state
, 0);
2001 c
= c
|| per_cpu(rcu_sched_data
, cpu
).nxtlist
;
2003 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
2005 force_quiescent_state(&rcu_bh_state
, 0);
2006 c
= c
|| per_cpu(rcu_bh_data
, cpu
).nxtlist
;
2009 /* If RCU callbacks are still pending, RCU still needs this CPU. */
2016 * Check to see if we need to continue a callback-flush operations to
2017 * allow the last CPU to enter dyntick-idle mode.
2019 static void rcu_needs_cpu_flush(void)
2021 int cpu
= smp_processor_id();
2022 unsigned long flags
;
2024 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0)
2026 local_irq_save(flags
);
2027 (void)rcu_needs_cpu(cpu
);
2028 local_irq_restore(flags
);
2031 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */