2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/cpu.h>
49 #include <linux/mutex.h>
50 #include <linux/time.h>
52 #ifdef CONFIG_DEBUG_LOCK_ALLOC
53 static struct lock_class_key rcu_lock_key
;
54 struct lockdep_map rcu_lock_map
=
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key
);
56 EXPORT_SYMBOL_GPL(rcu_lock_map
);
60 /* Definition for rcupdate control block. */
61 static struct rcu_ctrlblk rcu_ctrlblk
= {
65 .lock
= __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk
.lock
),
66 .cpumask
= CPU_MASK_NONE
,
68 static struct rcu_ctrlblk rcu_bh_ctrlblk
= {
72 .lock
= __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk
.lock
),
73 .cpumask
= CPU_MASK_NONE
,
76 DEFINE_PER_CPU(struct rcu_data
, rcu_data
) = { 0L };
77 DEFINE_PER_CPU(struct rcu_data
, rcu_bh_data
) = { 0L };
79 static int blimit
= 10;
80 static int qhimark
= 10000;
81 static int qlowmark
= 100;
84 static void force_quiescent_state(struct rcu_data
*rdp
,
85 struct rcu_ctrlblk
*rcp
)
92 spin_lock_irqsave(&rcp
->lock
, flags
);
93 if (unlikely(!rcp
->signaled
)) {
96 * Don't send IPI to itself. With irqs disabled,
97 * rdp->cpu is the current cpu.
99 * cpu_online_map is updated by the _cpu_down()
100 * using __stop_machine(). Since we're in irqs disabled
101 * section, __stop_machine() is not exectuting, hence
102 * the cpu_online_map is stable.
104 * However, a cpu might have been offlined _just_ before
105 * we disabled irqs while entering here.
106 * And rcu subsystem might not yet have handled the CPU_DEAD
107 * notification, leading to the offlined cpu's bit
108 * being set in the rcp->cpumask.
110 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
111 * sending smp_reschedule() to an offlined CPU.
113 cpus_and(cpumask
, rcp
->cpumask
, cpu_online_map
);
114 cpu_clear(rdp
->cpu
, cpumask
);
115 for_each_cpu_mask_nr(cpu
, cpumask
)
116 smp_send_reschedule(cpu
);
118 spin_unlock_irqrestore(&rcp
->lock
, flags
);
121 static inline void force_quiescent_state(struct rcu_data
*rdp
,
122 struct rcu_ctrlblk
*rcp
)
128 static void __call_rcu(struct rcu_head
*head
, struct rcu_ctrlblk
*rcp
,
129 struct rcu_data
*rdp
)
134 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
137 * Determine the batch number of this callback.
139 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 * local variable "batch" and emits codes like this:
141 * 1) rdp->batch = rcp->cur + 1 # gets old value
143 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 * that batch# = rdp->batch, see the comment of struct rcu_data.
147 batch
= ACCESS_ONCE(rcp
->cur
) + 1;
149 if (rdp
->nxtlist
&& rcu_batch_after(batch
, rdp
->batch
)) {
150 /* process callbacks */
151 rdp
->nxttail
[0] = rdp
->nxttail
[1];
152 rdp
->nxttail
[1] = rdp
->nxttail
[2];
153 if (rcu_batch_after(batch
- 1, rdp
->batch
))
154 rdp
->nxttail
[0] = rdp
->nxttail
[2];
158 *rdp
->nxttail
[2] = head
;
159 rdp
->nxttail
[2] = &head
->next
;
161 if (unlikely(++rdp
->qlen
> qhimark
)) {
162 rdp
->blimit
= INT_MAX
;
163 force_quiescent_state(rdp
, &rcu_ctrlblk
);
168 * call_rcu - Queue an RCU callback for invocation after a grace period.
169 * @head: structure to be used for queueing the RCU updates.
170 * @func: actual update function to be invoked after the grace period
172 * The update function will be invoked some time after a full grace
173 * period elapses, in other words after all currently executing RCU
174 * read-side critical sections have completed. RCU read-side critical
175 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
178 void call_rcu(struct rcu_head
*head
,
179 void (*func
)(struct rcu_head
*rcu
))
184 local_irq_save(flags
);
185 __call_rcu(head
, &rcu_ctrlblk
, &__get_cpu_var(rcu_data
));
186 local_irq_restore(flags
);
188 EXPORT_SYMBOL_GPL(call_rcu
);
191 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
192 * @head: structure to be used for queueing the RCU updates.
193 * @func: actual update function to be invoked after the grace period
195 * The update function will be invoked some time after a full grace
196 * period elapses, in other words after all currently executing RCU
197 * read-side critical sections have completed. call_rcu_bh() assumes
198 * that the read-side critical sections end on completion of a softirq
199 * handler. This means that read-side critical sections in process
200 * context must not be interrupted by softirqs. This interface is to be
201 * used when most of the read-side critical sections are in softirq context.
202 * RCU read-side critical sections are delimited by rcu_read_lock() and
203 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
204 * and rcu_read_unlock_bh(), if in process context. These may be nested.
206 void call_rcu_bh(struct rcu_head
*head
,
207 void (*func
)(struct rcu_head
*rcu
))
212 local_irq_save(flags
);
213 __call_rcu(head
, &rcu_bh_ctrlblk
, &__get_cpu_var(rcu_bh_data
));
214 local_irq_restore(flags
);
216 EXPORT_SYMBOL_GPL(call_rcu_bh
);
219 * Return the number of RCU batches processed thus far. Useful
220 * for debug and statistics.
222 long rcu_batches_completed(void)
224 return rcu_ctrlblk
.completed
;
226 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
229 * Return the number of RCU batches processed thus far. Useful
230 * for debug and statistics.
232 long rcu_batches_completed_bh(void)
234 return rcu_bh_ctrlblk
.completed
;
236 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh
);
238 /* Raises the softirq for processing rcu_callbacks. */
239 static inline void raise_rcu_softirq(void)
241 raise_softirq(RCU_SOFTIRQ
);
245 * Invoke the completed RCU callbacks. They are expected to be in
248 static void rcu_do_batch(struct rcu_data
*rdp
)
250 struct rcu_head
*next
, *list
;
253 list
= rdp
->donelist
;
259 if (++count
>= rdp
->blimit
)
262 rdp
->donelist
= list
;
267 if (rdp
->blimit
== INT_MAX
&& rdp
->qlen
<= qlowmark
)
268 rdp
->blimit
= blimit
;
271 rdp
->donetail
= &rdp
->donelist
;
277 * Grace period handling:
278 * The grace period handling consists out of two steps:
279 * - A new grace period is started.
280 * This is done by rcu_start_batch. The start is not broadcasted to
281 * all cpus, they must pick this up by comparing rcp->cur with
282 * rdp->quiescbatch. All cpus are recorded in the
283 * rcu_ctrlblk.cpumask bitmap.
284 * - All cpus must go through a quiescent state.
285 * Since the start of the grace period is not broadcasted, at least two
286 * calls to rcu_check_quiescent_state are required:
287 * The first call just notices that a new grace period is running. The
288 * following calls check if there was a quiescent state since the beginning
289 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
290 * the bitmap is empty, then the grace period is completed.
291 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
292 * period (if necessary).
295 #ifdef CONFIG_DEBUG_RCU_STALL
297 static inline void record_gp_check_time(struct rcu_ctrlblk
*rcp
)
299 rcp
->gp_check
= get_seconds() + 3;
302 static void print_other_cpu_stall(struct rcu_ctrlblk
*rcp
)
308 /* Only let one CPU complain about others per time interval. */
310 spin_lock_irqsave(&rcp
->lock
, flags
);
311 delta
= get_seconds() - rcp
->gp_check
;
312 if (delta
< 2L || cpus_empty(rcp
->cpumask
)) {
313 spin_unlock(&rcp
->lock
);
316 rcp
->gp_check
= get_seconds() + 30;
317 spin_unlock_irqrestore(&rcp
->lock
, flags
);
319 /* OK, time to rat on our buddy... */
321 printk(KERN_ERR
"RCU detected CPU stalls:");
322 for_each_cpu_mask(cpu
, rcp
->cpumask
)
324 printk(" (detected by %d, t=%lu/%lu)\n",
325 smp_processor_id(), get_seconds(), rcp
->gp_check
);
328 static void print_cpu_stall(struct rcu_ctrlblk
*rcp
)
332 printk(KERN_ERR
"RCU detected CPU %d stall (t=%lu/%lu)\n",
333 smp_processor_id(), get_seconds(), rcp
->gp_check
);
335 spin_lock_irqsave(&rcp
->lock
, flags
);
336 if ((long)(get_seconds() - rcp
->gp_check
) >= 0L)
337 rcp
->gp_check
= get_seconds() + 30;
338 spin_unlock_irqrestore(&rcp
->lock
, flags
);
341 static void check_cpu_stall(struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
345 delta
= get_seconds() - rcp
->gp_check
;
346 if (cpu_isset(smp_processor_id(), rcp
->cpumask
) && delta
>= 0L) {
348 /* We haven't checked in, so go dump stack. */
350 print_cpu_stall(rcp
);
353 if (!cpus_empty(rcp
->cpumask
) && delta
>= 2L) {
354 /* They had two seconds to dump stack, so complain. */
355 print_other_cpu_stall(rcp
);
360 #else /* #ifdef CONFIG_DEBUG_RCU_STALL */
362 static inline void record_gp_check_time(struct rcu_ctrlblk
*rcp
)
367 check_cpu_stall(struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
371 #endif /* #else #ifdef CONFIG_DEBUG_RCU_STALL */
374 * Register a new batch of callbacks, and start it up if there is currently no
375 * active batch and the batch to be registered has not already occurred.
376 * Caller must hold rcu_ctrlblk.lock.
378 static void rcu_start_batch(struct rcu_ctrlblk
*rcp
)
380 if (rcp
->cur
!= rcp
->pending
&&
381 rcp
->completed
== rcp
->cur
) {
383 record_gp_check_time(rcp
);
386 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
387 * Barrier Otherwise it can cause tickless idle CPUs to be
388 * included in rcp->cpumask, which will extend graceperiods
392 cpus_andnot(rcp
->cpumask
, cpu_online_map
, nohz_cpu_mask
);
399 * cpu went through a quiescent state since the beginning of the grace period.
400 * Clear it from the cpu mask and complete the grace period if it was the last
401 * cpu. Start another grace period if someone has further entries pending
403 static void cpu_quiet(int cpu
, struct rcu_ctrlblk
*rcp
)
405 cpu_clear(cpu
, rcp
->cpumask
);
406 if (cpus_empty(rcp
->cpumask
)) {
407 /* batch completed ! */
408 rcp
->completed
= rcp
->cur
;
409 rcu_start_batch(rcp
);
414 * Check if the cpu has gone through a quiescent state (say context
415 * switch). If so and if it already hasn't done so in this RCU
416 * quiescent cycle, then indicate that it has done so.
418 static void rcu_check_quiescent_state(struct rcu_ctrlblk
*rcp
,
419 struct rcu_data
*rdp
)
423 if (rdp
->quiescbatch
!= rcp
->cur
) {
424 /* start new grace period: */
426 rdp
->passed_quiesc
= 0;
427 rdp
->quiescbatch
= rcp
->cur
;
431 /* Grace period already completed for this cpu?
432 * qs_pending is checked instead of the actual bitmap to avoid
433 * cacheline trashing.
435 if (!rdp
->qs_pending
)
439 * Was there a quiescent state since the beginning of the grace
440 * period? If no, then exit and wait for the next call.
442 if (!rdp
->passed_quiesc
)
446 spin_lock_irqsave(&rcp
->lock
, flags
);
448 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
449 * during cpu startup. Ignore the quiescent state.
451 if (likely(rdp
->quiescbatch
== rcp
->cur
))
452 cpu_quiet(rdp
->cpu
, rcp
);
454 spin_unlock_irqrestore(&rcp
->lock
, flags
);
458 #ifdef CONFIG_HOTPLUG_CPU
460 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
461 * locking requirements, the list it's pulling from has to belong to a cpu
462 * which is dead and hence not processing interrupts.
464 static void rcu_move_batch(struct rcu_data
*this_rdp
, struct rcu_head
*list
,
465 struct rcu_head
**tail
, long batch
)
469 this_rdp
->batch
= batch
;
470 *this_rdp
->nxttail
[2] = list
;
471 this_rdp
->nxttail
[2] = tail
;
476 static void __rcu_offline_cpu(struct rcu_data
*this_rdp
,
477 struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
482 * if the cpu going offline owns the grace period
483 * we can block indefinitely waiting for it, so flush
486 spin_lock_irqsave(&rcp
->lock
, flags
);
487 if (rcp
->cur
!= rcp
->completed
)
488 cpu_quiet(rdp
->cpu
, rcp
);
489 rcu_move_batch(this_rdp
, rdp
->donelist
, rdp
->donetail
, rcp
->cur
+ 1);
490 rcu_move_batch(this_rdp
, rdp
->nxtlist
, rdp
->nxttail
[2], rcp
->cur
+ 1);
491 spin_unlock(&rcp
->lock
);
493 this_rdp
->qlen
+= rdp
->qlen
;
494 local_irq_restore(flags
);
497 static void rcu_offline_cpu(int cpu
)
499 struct rcu_data
*this_rdp
= &get_cpu_var(rcu_data
);
500 struct rcu_data
*this_bh_rdp
= &get_cpu_var(rcu_bh_data
);
502 __rcu_offline_cpu(this_rdp
, &rcu_ctrlblk
,
503 &per_cpu(rcu_data
, cpu
));
504 __rcu_offline_cpu(this_bh_rdp
, &rcu_bh_ctrlblk
,
505 &per_cpu(rcu_bh_data
, cpu
));
506 put_cpu_var(rcu_data
);
507 put_cpu_var(rcu_bh_data
);
512 static void rcu_offline_cpu(int cpu
)
519 * This does the RCU processing work from softirq context.
521 static void __rcu_process_callbacks(struct rcu_ctrlblk
*rcp
,
522 struct rcu_data
*rdp
)
528 completed_snap
= ACCESS_ONCE(rcp
->completed
);
531 * move the other grace-period-completed entries to
532 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
534 if (!rcu_batch_before(completed_snap
, rdp
->batch
))
535 rdp
->nxttail
[0] = rdp
->nxttail
[1] = rdp
->nxttail
[2];
536 else if (!rcu_batch_before(completed_snap
, rdp
->batch
- 1))
537 rdp
->nxttail
[0] = rdp
->nxttail
[1];
540 * the grace period for entries in
541 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
542 * move these entries to donelist
544 if (rdp
->nxttail
[0] != &rdp
->nxtlist
) {
545 *rdp
->donetail
= rdp
->nxtlist
;
546 rdp
->donetail
= rdp
->nxttail
[0];
547 rdp
->nxtlist
= *rdp
->nxttail
[0];
548 *rdp
->donetail
= NULL
;
550 if (rdp
->nxttail
[1] == rdp
->nxttail
[0])
551 rdp
->nxttail
[1] = &rdp
->nxtlist
;
552 if (rdp
->nxttail
[2] == rdp
->nxttail
[0])
553 rdp
->nxttail
[2] = &rdp
->nxtlist
;
554 rdp
->nxttail
[0] = &rdp
->nxtlist
;
559 if (rcu_batch_after(rdp
->batch
, rcp
->pending
)) {
560 /* and start it/schedule start if it's a new batch */
561 spin_lock_irqsave(&rcp
->lock
, flags
);
562 if (rcu_batch_after(rdp
->batch
, rcp
->pending
)) {
563 rcp
->pending
= rdp
->batch
;
564 rcu_start_batch(rcp
);
566 spin_unlock_irqrestore(&rcp
->lock
, flags
);
570 rcu_check_quiescent_state(rcp
, rdp
);
575 static void rcu_process_callbacks(struct softirq_action
*unused
)
578 * Memory references from any prior RCU read-side critical sections
579 * executed by the interrupted code must be see before any RCU
580 * grace-period manupulations below.
583 smp_mb(); /* See above block comment. */
585 __rcu_process_callbacks(&rcu_ctrlblk
, &__get_cpu_var(rcu_data
));
586 __rcu_process_callbacks(&rcu_bh_ctrlblk
, &__get_cpu_var(rcu_bh_data
));
589 * Memory references from any later RCU read-side critical sections
590 * executed by the interrupted code must be see after any RCU
591 * grace-period manupulations above.
594 smp_mb(); /* See above block comment. */
597 static int __rcu_pending(struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
599 /* Check for CPU stalls, if enabled. */
600 check_cpu_stall(rcp
, rdp
);
603 long completed_snap
= ACCESS_ONCE(rcp
->completed
);
606 * This cpu has pending rcu entries and the grace period
607 * for them has completed.
609 if (!rcu_batch_before(completed_snap
, rdp
->batch
))
611 if (!rcu_batch_before(completed_snap
, rdp
->batch
- 1) &&
612 rdp
->nxttail
[0] != rdp
->nxttail
[1])
614 if (rdp
->nxttail
[0] != &rdp
->nxtlist
)
618 * This cpu has pending rcu entries and the new batch
619 * for then hasn't been started nor scheduled start
621 if (rcu_batch_after(rdp
->batch
, rcp
->pending
))
625 /* This cpu has finished callbacks to invoke */
629 /* The rcu core waits for a quiescent state from the cpu */
630 if (rdp
->quiescbatch
!= rcp
->cur
|| rdp
->qs_pending
)
638 * Check to see if there is any immediate RCU-related work to be done
639 * by the current CPU, returning 1 if so. This function is part of the
640 * RCU implementation; it is -not- an exported member of the RCU API.
642 int rcu_pending(int cpu
)
644 return __rcu_pending(&rcu_ctrlblk
, &per_cpu(rcu_data
, cpu
)) ||
645 __rcu_pending(&rcu_bh_ctrlblk
, &per_cpu(rcu_bh_data
, cpu
));
649 * Check to see if any future RCU-related work will need to be done
650 * by the current CPU, even if none need be done immediately, returning
651 * 1 if so. This function is part of the RCU implementation; it is -not-
652 * an exported member of the RCU API.
654 int rcu_needs_cpu(int cpu
)
656 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
657 struct rcu_data
*rdp_bh
= &per_cpu(rcu_bh_data
, cpu
);
659 return !!rdp
->nxtlist
|| !!rdp_bh
->nxtlist
|| rcu_pending(cpu
);
663 * Top-level function driving RCU grace-period detection, normally
664 * invoked from the scheduler-clock interrupt. This function simply
665 * increments counters that are read only from softirq by this same
666 * CPU, so there are no memory barriers required.
668 void rcu_check_callbacks(int cpu
, int user
)
671 (idle_cpu(cpu
) && !in_softirq() &&
672 hardirq_count() <= (1 << HARDIRQ_SHIFT
))) {
675 * Get here if this CPU took its interrupt from user
676 * mode or from the idle loop, and if this is not a
677 * nested interrupt. In this case, the CPU is in
678 * a quiescent state, so count it.
680 * Also do a memory barrier. This is needed to handle
681 * the case where writes from a preempt-disable section
682 * of code get reordered into schedule() by this CPU's
683 * write buffer. The memory barrier makes sure that
684 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
685 * by other CPUs to happen after any such write.
688 smp_mb(); /* See above block comment. */
690 rcu_bh_qsctr_inc(cpu
);
692 } else if (!in_softirq()) {
695 * Get here if this CPU did not take its interrupt from
696 * softirq, in other words, if it is not interrupting
697 * a rcu_bh read-side critical section. This is an _bh
698 * critical section, so count it. The memory barrier
699 * is needed for the same reason as is the above one.
702 smp_mb(); /* See above block comment. */
703 rcu_bh_qsctr_inc(cpu
);
708 static void rcu_init_percpu_data(int cpu
, struct rcu_ctrlblk
*rcp
,
709 struct rcu_data
*rdp
)
713 spin_lock_irqsave(&rcp
->lock
, flags
);
714 memset(rdp
, 0, sizeof(*rdp
));
715 rdp
->nxttail
[0] = rdp
->nxttail
[1] = rdp
->nxttail
[2] = &rdp
->nxtlist
;
716 rdp
->donetail
= &rdp
->donelist
;
717 rdp
->quiescbatch
= rcp
->completed
;
720 rdp
->blimit
= blimit
;
721 spin_unlock_irqrestore(&rcp
->lock
, flags
);
724 static void __cpuinit
rcu_online_cpu(int cpu
)
726 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
727 struct rcu_data
*bh_rdp
= &per_cpu(rcu_bh_data
, cpu
);
729 rcu_init_percpu_data(cpu
, &rcu_ctrlblk
, rdp
);
730 rcu_init_percpu_data(cpu
, &rcu_bh_ctrlblk
, bh_rdp
);
731 open_softirq(RCU_SOFTIRQ
, rcu_process_callbacks
);
734 static int __cpuinit
rcu_cpu_notify(struct notifier_block
*self
,
735 unsigned long action
, void *hcpu
)
737 long cpu
= (long)hcpu
;
741 case CPU_UP_PREPARE_FROZEN
:
745 case CPU_DEAD_FROZEN
:
746 rcu_offline_cpu(cpu
);
754 static struct notifier_block __cpuinitdata rcu_nb
= {
755 .notifier_call
= rcu_cpu_notify
,
759 * Initializes rcu mechanism. Assumed to be called early.
760 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
761 * Note that rcu_qsctr and friends are implicitly
762 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
764 void __init
__rcu_init(void)
766 rcu_cpu_notify(&rcu_nb
, CPU_UP_PREPARE
,
767 (void *)(long)smp_processor_id());
768 /* Register notifier for non-boot CPUs */
769 register_cpu_notifier(&rcu_nb
);
772 module_param(blimit
, int, 0);
773 module_param(qhimark
, int, 0);
774 module_param(qlowmark
, int, 0);