Commit | Line | Data |
---|---|---|
01c1c660 PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2001 | |
19 | * | |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
22 | * | |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
30 | * Documentation/RCU | |
31 | * | |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/rcupdate.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/sched.h> | |
41 | #include <asm/atomic.h> | |
42 | #include <linux/bitops.h> | |
43 | #include <linux/module.h> | |
44 | #include <linux/completion.h> | |
45 | #include <linux/moduleparam.h> | |
46 | #include <linux/percpu.h> | |
47 | #include <linux/notifier.h> | |
01c1c660 PM |
48 | #include <linux/cpu.h> |
49 | #include <linux/mutex.h> | |
50 | ||
51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
52 | static struct lock_class_key rcu_lock_key; | |
53 | struct lockdep_map rcu_lock_map = | |
54 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
55 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
56 | #endif | |
57 | ||
58 | ||
59 | /* Definition for rcupdate control block. */ | |
60 | static struct rcu_ctrlblk rcu_ctrlblk = { | |
61 | .cur = -300, | |
62 | .completed = -300, | |
3cac97cb | 63 | .pending = -300, |
01c1c660 PM |
64 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
65 | .cpumask = CPU_MASK_NONE, | |
66 | }; | |
67 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |
68 | .cur = -300, | |
69 | .completed = -300, | |
3cac97cb | 70 | .pending = -300, |
01c1c660 PM |
71 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
72 | .cpumask = CPU_MASK_NONE, | |
73 | }; | |
74 | ||
75 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | |
76 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | |
77 | ||
78 | static int blimit = 10; | |
79 | static int qhimark = 10000; | |
80 | static int qlowmark = 100; | |
81 | ||
82 | #ifdef CONFIG_SMP | |
83 | static void force_quiescent_state(struct rcu_data *rdp, | |
84 | struct rcu_ctrlblk *rcp) | |
85 | { | |
86 | int cpu; | |
87 | cpumask_t cpumask; | |
88 | set_need_resched(); | |
89 | if (unlikely(!rcp->signaled)) { | |
90 | rcp->signaled = 1; | |
91 | /* | |
92 | * Don't send IPI to itself. With irqs disabled, | |
93 | * rdp->cpu is the current cpu. | |
8558f8f8 GS |
94 | * |
95 | * cpu_online_map is updated by the _cpu_down() | |
9b1a4d38 RR |
96 | * using __stop_machine(). Since we're in irqs disabled |
97 | * section, __stop_machine() is not exectuting, hence | |
8558f8f8 GS |
98 | * the cpu_online_map is stable. |
99 | * | |
100 | * However, a cpu might have been offlined _just_ before | |
101 | * we disabled irqs while entering here. | |
102 | * And rcu subsystem might not yet have handled the CPU_DEAD | |
103 | * notification, leading to the offlined cpu's bit | |
104 | * being set in the rcp->cpumask. | |
105 | * | |
106 | * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent | |
107 | * sending smp_reschedule() to an offlined CPU. | |
01c1c660 | 108 | */ |
8558f8f8 | 109 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); |
01c1c660 | 110 | cpu_clear(rdp->cpu, cpumask); |
363ab6f1 | 111 | for_each_cpu_mask_nr(cpu, cpumask) |
01c1c660 PM |
112 | smp_send_reschedule(cpu); |
113 | } | |
114 | } | |
115 | #else | |
116 | static inline void force_quiescent_state(struct rcu_data *rdp, | |
117 | struct rcu_ctrlblk *rcp) | |
118 | { | |
119 | set_need_resched(); | |
120 | } | |
121 | #endif | |
122 | ||
5127bed5 LJ |
123 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, |
124 | struct rcu_data *rdp) | |
125 | { | |
126 | long batch; | |
127 | smp_mb(); /* reads the most recently updated value of rcu->cur. */ | |
128 | ||
129 | /* | |
130 | * Determine the batch number of this callback. | |
131 | * | |
132 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | |
133 | * local variable "batch" and emits codes like this: | |
134 | * 1) rdp->batch = rcp->cur + 1 # gets old value | |
135 | * ...... | |
136 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | |
137 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | |
138 | * that batch# = rdp->batch, see the comment of struct rcu_data. | |
139 | */ | |
140 | batch = ACCESS_ONCE(rcp->cur) + 1; | |
141 | ||
142 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | |
143 | /* process callbacks */ | |
144 | rdp->nxttail[0] = rdp->nxttail[1]; | |
145 | rdp->nxttail[1] = rdp->nxttail[2]; | |
146 | if (rcu_batch_after(batch - 1, rdp->batch)) | |
147 | rdp->nxttail[0] = rdp->nxttail[2]; | |
148 | } | |
149 | ||
150 | rdp->batch = batch; | |
151 | *rdp->nxttail[2] = head; | |
152 | rdp->nxttail[2] = &head->next; | |
153 | ||
154 | if (unlikely(++rdp->qlen > qhimark)) { | |
155 | rdp->blimit = INT_MAX; | |
156 | force_quiescent_state(rdp, &rcu_ctrlblk); | |
157 | } | |
158 | } | |
159 | ||
01c1c660 PM |
160 | /** |
161 | * call_rcu - Queue an RCU callback for invocation after a grace period. | |
162 | * @head: structure to be used for queueing the RCU updates. | |
163 | * @func: actual update function to be invoked after the grace period | |
164 | * | |
165 | * The update function will be invoked some time after a full grace | |
166 | * period elapses, in other words after all currently executing RCU | |
167 | * read-side critical sections have completed. RCU read-side critical | |
168 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
169 | * and may be nested. | |
170 | */ | |
171 | void call_rcu(struct rcu_head *head, | |
172 | void (*func)(struct rcu_head *rcu)) | |
173 | { | |
174 | unsigned long flags; | |
01c1c660 PM |
175 | |
176 | head->func = func; | |
177 | head->next = NULL; | |
178 | local_irq_save(flags); | |
5127bed5 | 179 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
01c1c660 PM |
180 | local_irq_restore(flags); |
181 | } | |
182 | EXPORT_SYMBOL_GPL(call_rcu); | |
183 | ||
184 | /** | |
185 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | |
186 | * @head: structure to be used for queueing the RCU updates. | |
187 | * @func: actual update function to be invoked after the grace period | |
188 | * | |
189 | * The update function will be invoked some time after a full grace | |
190 | * period elapses, in other words after all currently executing RCU | |
191 | * read-side critical sections have completed. call_rcu_bh() assumes | |
192 | * that the read-side critical sections end on completion of a softirq | |
193 | * handler. This means that read-side critical sections in process | |
194 | * context must not be interrupted by softirqs. This interface is to be | |
195 | * used when most of the read-side critical sections are in softirq context. | |
196 | * RCU read-side critical sections are delimited by rcu_read_lock() and | |
197 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | |
198 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | |
199 | */ | |
200 | void call_rcu_bh(struct rcu_head *head, | |
201 | void (*func)(struct rcu_head *rcu)) | |
202 | { | |
203 | unsigned long flags; | |
01c1c660 PM |
204 | |
205 | head->func = func; | |
206 | head->next = NULL; | |
207 | local_irq_save(flags); | |
5127bed5 | 208 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
01c1c660 PM |
209 | local_irq_restore(flags); |
210 | } | |
211 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
212 | ||
213 | /* | |
214 | * Return the number of RCU batches processed thus far. Useful | |
215 | * for debug and statistics. | |
216 | */ | |
217 | long rcu_batches_completed(void) | |
218 | { | |
219 | return rcu_ctrlblk.completed; | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
222 | ||
223 | /* | |
224 | * Return the number of RCU batches processed thus far. Useful | |
225 | * for debug and statistics. | |
226 | */ | |
227 | long rcu_batches_completed_bh(void) | |
228 | { | |
229 | return rcu_bh_ctrlblk.completed; | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |
232 | ||
233 | /* Raises the softirq for processing rcu_callbacks. */ | |
234 | static inline void raise_rcu_softirq(void) | |
235 | { | |
236 | raise_softirq(RCU_SOFTIRQ); | |
01c1c660 PM |
237 | } |
238 | ||
239 | /* | |
240 | * Invoke the completed RCU callbacks. They are expected to be in | |
241 | * a per-cpu list. | |
242 | */ | |
243 | static void rcu_do_batch(struct rcu_data *rdp) | |
244 | { | |
245 | struct rcu_head *next, *list; | |
246 | int count = 0; | |
247 | ||
248 | list = rdp->donelist; | |
249 | while (list) { | |
250 | next = list->next; | |
251 | prefetch(next); | |
252 | list->func(list); | |
253 | list = next; | |
254 | if (++count >= rdp->blimit) | |
255 | break; | |
256 | } | |
257 | rdp->donelist = list; | |
258 | ||
259 | local_irq_disable(); | |
260 | rdp->qlen -= count; | |
261 | local_irq_enable(); | |
262 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | |
263 | rdp->blimit = blimit; | |
264 | ||
265 | if (!rdp->donelist) | |
266 | rdp->donetail = &rdp->donelist; | |
267 | else | |
268 | raise_rcu_softirq(); | |
269 | } | |
270 | ||
271 | /* | |
272 | * Grace period handling: | |
273 | * The grace period handling consists out of two steps: | |
274 | * - A new grace period is started. | |
275 | * This is done by rcu_start_batch. The start is not broadcasted to | |
276 | * all cpus, they must pick this up by comparing rcp->cur with | |
277 | * rdp->quiescbatch. All cpus are recorded in the | |
278 | * rcu_ctrlblk.cpumask bitmap. | |
279 | * - All cpus must go through a quiescent state. | |
280 | * Since the start of the grace period is not broadcasted, at least two | |
281 | * calls to rcu_check_quiescent_state are required: | |
282 | * The first call just notices that a new grace period is running. The | |
283 | * following calls check if there was a quiescent state since the beginning | |
284 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If | |
285 | * the bitmap is empty, then the grace period is completed. | |
286 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | |
287 | * period (if necessary). | |
288 | */ | |
289 | /* | |
290 | * Register a new batch of callbacks, and start it up if there is currently no | |
291 | * active batch and the batch to be registered has not already occurred. | |
292 | * Caller must hold rcu_ctrlblk.lock. | |
293 | */ | |
294 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |
295 | { | |
3cac97cb | 296 | if (rcp->cur != rcp->pending && |
01c1c660 | 297 | rcp->completed == rcp->cur) { |
01c1c660 PM |
298 | rcp->cur++; |
299 | ||
300 | /* | |
301 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | |
302 | * Barrier Otherwise it can cause tickless idle CPUs to be | |
303 | * included in rcp->cpumask, which will extend graceperiods | |
304 | * unnecessarily. | |
305 | */ | |
306 | smp_mb(); | |
307 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); | |
308 | ||
309 | rcp->signaled = 0; | |
310 | } | |
311 | } | |
312 | ||
313 | /* | |
314 | * cpu went through a quiescent state since the beginning of the grace period. | |
315 | * Clear it from the cpu mask and complete the grace period if it was the last | |
316 | * cpu. Start another grace period if someone has further entries pending | |
317 | */ | |
318 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | |
319 | { | |
320 | cpu_clear(cpu, rcp->cpumask); | |
321 | if (cpus_empty(rcp->cpumask)) { | |
322 | /* batch completed ! */ | |
323 | rcp->completed = rcp->cur; | |
324 | rcu_start_batch(rcp); | |
325 | } | |
326 | } | |
327 | ||
328 | /* | |
329 | * Check if the cpu has gone through a quiescent state (say context | |
330 | * switch). If so and if it already hasn't done so in this RCU | |
331 | * quiescent cycle, then indicate that it has done so. | |
332 | */ | |
333 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |
334 | struct rcu_data *rdp) | |
335 | { | |
336 | if (rdp->quiescbatch != rcp->cur) { | |
337 | /* start new grace period: */ | |
338 | rdp->qs_pending = 1; | |
339 | rdp->passed_quiesc = 0; | |
340 | rdp->quiescbatch = rcp->cur; | |
341 | return; | |
342 | } | |
343 | ||
344 | /* Grace period already completed for this cpu? | |
345 | * qs_pending is checked instead of the actual bitmap to avoid | |
346 | * cacheline trashing. | |
347 | */ | |
348 | if (!rdp->qs_pending) | |
349 | return; | |
350 | ||
351 | /* | |
352 | * Was there a quiescent state since the beginning of the grace | |
353 | * period? If no, then exit and wait for the next call. | |
354 | */ | |
355 | if (!rdp->passed_quiesc) | |
356 | return; | |
357 | rdp->qs_pending = 0; | |
358 | ||
359 | spin_lock(&rcp->lock); | |
360 | /* | |
361 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | |
362 | * during cpu startup. Ignore the quiescent state. | |
363 | */ | |
364 | if (likely(rdp->quiescbatch == rcp->cur)) | |
365 | cpu_quiet(rdp->cpu, rcp); | |
366 | ||
367 | spin_unlock(&rcp->lock); | |
368 | } | |
369 | ||
370 | ||
371 | #ifdef CONFIG_HOTPLUG_CPU | |
372 | ||
373 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | |
374 | * locking requirements, the list it's pulling from has to belong to a cpu | |
375 | * which is dead and hence not processing interrupts. | |
376 | */ | |
377 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |
5127bed5 | 378 | struct rcu_head **tail, long batch) |
01c1c660 | 379 | { |
5127bed5 LJ |
380 | if (list) { |
381 | local_irq_disable(); | |
382 | this_rdp->batch = batch; | |
383 | *this_rdp->nxttail[2] = list; | |
384 | this_rdp->nxttail[2] = tail; | |
385 | local_irq_enable(); | |
386 | } | |
01c1c660 PM |
387 | } |
388 | ||
389 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |
390 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
391 | { | |
392 | /* if the cpu going offline owns the grace period | |
393 | * we can block indefinitely waiting for it, so flush | |
394 | * it here | |
395 | */ | |
396 | spin_lock_bh(&rcp->lock); | |
397 | if (rcp->cur != rcp->completed) | |
398 | cpu_quiet(rdp->cpu, rcp); | |
399 | spin_unlock_bh(&rcp->lock); | |
5127bed5 LJ |
400 | /* spin_lock implies smp_mb() */ |
401 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); | |
402 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); | |
199a9528 LJ |
403 | |
404 | local_irq_disable(); | |
405 | this_rdp->qlen += rdp->qlen; | |
406 | local_irq_enable(); | |
01c1c660 PM |
407 | } |
408 | ||
409 | static void rcu_offline_cpu(int cpu) | |
410 | { | |
411 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | |
412 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | |
413 | ||
414 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, | |
415 | &per_cpu(rcu_data, cpu)); | |
416 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, | |
417 | &per_cpu(rcu_bh_data, cpu)); | |
418 | put_cpu_var(rcu_data); | |
419 | put_cpu_var(rcu_bh_data); | |
420 | } | |
421 | ||
422 | #else | |
423 | ||
424 | static void rcu_offline_cpu(int cpu) | |
425 | { | |
426 | } | |
427 | ||
428 | #endif | |
429 | ||
430 | /* | |
431 | * This does the RCU processing work from softirq context. | |
432 | */ | |
433 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |
434 | struct rcu_data *rdp) | |
435 | { | |
5127bed5 | 436 | if (rdp->nxtlist) { |
01c1c660 | 437 | local_irq_disable(); |
01c1c660 PM |
438 | |
439 | /* | |
5127bed5 LJ |
440 | * move the other grace-period-completed entries to |
441 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | |
442 | */ | |
443 | if (!rcu_batch_before(rcp->completed, rdp->batch)) | |
444 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; | |
445 | else if (!rcu_batch_before(rcp->completed, rdp->batch - 1)) | |
446 | rdp->nxttail[0] = rdp->nxttail[1]; | |
447 | ||
448 | /* | |
449 | * the grace period for entries in | |
450 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and | |
451 | * move these entries to donelist | |
01c1c660 | 452 | */ |
5127bed5 LJ |
453 | if (rdp->nxttail[0] != &rdp->nxtlist) { |
454 | *rdp->donetail = rdp->nxtlist; | |
455 | rdp->donetail = rdp->nxttail[0]; | |
456 | rdp->nxtlist = *rdp->nxttail[0]; | |
457 | *rdp->donetail = NULL; | |
458 | ||
459 | if (rdp->nxttail[1] == rdp->nxttail[0]) | |
460 | rdp->nxttail[1] = &rdp->nxtlist; | |
461 | if (rdp->nxttail[2] == rdp->nxttail[0]) | |
462 | rdp->nxttail[2] = &rdp->nxtlist; | |
463 | rdp->nxttail[0] = &rdp->nxtlist; | |
464 | } | |
01c1c660 | 465 | |
5127bed5 | 466 | local_irq_enable(); |
01c1c660 | 467 | |
3cac97cb | 468 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
01c1c660 PM |
469 | /* and start it/schedule start if it's a new batch */ |
470 | spin_lock(&rcp->lock); | |
3cac97cb LJ |
471 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
472 | rcp->pending = rdp->batch; | |
473 | rcu_start_batch(rcp); | |
474 | } | |
01c1c660 PM |
475 | spin_unlock(&rcp->lock); |
476 | } | |
477 | } | |
478 | ||
479 | rcu_check_quiescent_state(rcp, rdp); | |
480 | if (rdp->donelist) | |
481 | rcu_do_batch(rdp); | |
482 | } | |
483 | ||
484 | static void rcu_process_callbacks(struct softirq_action *unused) | |
485 | { | |
486 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); | |
487 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | |
488 | } | |
489 | ||
490 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
491 | { | |
5127bed5 LJ |
492 | if (rdp->nxtlist) { |
493 | /* | |
494 | * This cpu has pending rcu entries and the grace period | |
495 | * for them has completed. | |
496 | */ | |
497 | if (!rcu_batch_before(rcp->completed, rdp->batch)) | |
498 | return 1; | |
499 | if (!rcu_batch_before(rcp->completed, rdp->batch - 1) && | |
500 | rdp->nxttail[0] != rdp->nxttail[1]) | |
501 | return 1; | |
502 | if (rdp->nxttail[0] != &rdp->nxtlist) | |
503 | return 1; | |
01c1c660 | 504 | |
5127bed5 LJ |
505 | /* |
506 | * This cpu has pending rcu entries and the new batch | |
507 | * for then hasn't been started nor scheduled start | |
508 | */ | |
509 | if (rcu_batch_after(rdp->batch, rcp->pending)) | |
510 | return 1; | |
511 | } | |
01c1c660 PM |
512 | |
513 | /* This cpu has finished callbacks to invoke */ | |
514 | if (rdp->donelist) | |
515 | return 1; | |
516 | ||
517 | /* The rcu core waits for a quiescent state from the cpu */ | |
518 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | |
519 | return 1; | |
520 | ||
521 | /* nothing to do */ | |
522 | return 0; | |
523 | } | |
524 | ||
525 | /* | |
526 | * Check to see if there is any immediate RCU-related work to be done | |
527 | * by the current CPU, returning 1 if so. This function is part of the | |
528 | * RCU implementation; it is -not- an exported member of the RCU API. | |
529 | */ | |
530 | int rcu_pending(int cpu) | |
531 | { | |
532 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | |
533 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | |
534 | } | |
535 | ||
536 | /* | |
537 | * Check to see if any future RCU-related work will need to be done | |
538 | * by the current CPU, even if none need be done immediately, returning | |
539 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
540 | * an exported member of the RCU API. | |
541 | */ | |
542 | int rcu_needs_cpu(int cpu) | |
543 | { | |
544 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
545 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | |
546 | ||
5127bed5 | 547 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
01c1c660 PM |
548 | } |
549 | ||
550 | void rcu_check_callbacks(int cpu, int user) | |
551 | { | |
552 | if (user || | |
553 | (idle_cpu(cpu) && !in_softirq() && | |
554 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | |
8db559b8 PM |
555 | |
556 | /* | |
557 | * Get here if this CPU took its interrupt from user | |
558 | * mode or from the idle loop, and if this is not a | |
559 | * nested interrupt. In this case, the CPU is in | |
560 | * a quiescent state, so count it. | |
561 | * | |
562 | * Also do a memory barrier. This is needed to handle | |
563 | * the case where writes from a preempt-disable section | |
564 | * of code get reordered into schedule() by this CPU's | |
565 | * write buffer. The memory barrier makes sure that | |
566 | * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see | |
567 | * by other CPUs to happen after any such write. | |
568 | */ | |
569 | ||
570 | smp_mb(); /* See above block comment. */ | |
01c1c660 PM |
571 | rcu_qsctr_inc(cpu); |
572 | rcu_bh_qsctr_inc(cpu); | |
8db559b8 PM |
573 | |
574 | } else if (!in_softirq()) { | |
575 | ||
576 | /* | |
577 | * Get here if this CPU did not take its interrupt from | |
578 | * softirq, in other words, if it is not interrupting | |
579 | * a rcu_bh read-side critical section. This is an _bh | |
580 | * critical section, so count it. The memory barrier | |
581 | * is needed for the same reason as is the above one. | |
582 | */ | |
583 | ||
584 | smp_mb(); /* See above block comment. */ | |
01c1c660 | 585 | rcu_bh_qsctr_inc(cpu); |
8db559b8 | 586 | } |
01c1c660 PM |
587 | raise_rcu_softirq(); |
588 | } | |
589 | ||
590 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |
591 | struct rcu_data *rdp) | |
592 | { | |
593 | memset(rdp, 0, sizeof(*rdp)); | |
5127bed5 | 594 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
01c1c660 PM |
595 | rdp->donetail = &rdp->donelist; |
596 | rdp->quiescbatch = rcp->completed; | |
597 | rdp->qs_pending = 0; | |
598 | rdp->cpu = cpu; | |
599 | rdp->blimit = blimit; | |
600 | } | |
601 | ||
602 | static void __cpuinit rcu_online_cpu(int cpu) | |
603 | { | |
604 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
605 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | |
606 | ||
607 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | |
608 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | |
962cf36c | 609 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
01c1c660 PM |
610 | } |
611 | ||
612 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
613 | unsigned long action, void *hcpu) | |
614 | { | |
615 | long cpu = (long)hcpu; | |
616 | ||
617 | switch (action) { | |
618 | case CPU_UP_PREPARE: | |
619 | case CPU_UP_PREPARE_FROZEN: | |
620 | rcu_online_cpu(cpu); | |
621 | break; | |
622 | case CPU_DEAD: | |
623 | case CPU_DEAD_FROZEN: | |
624 | rcu_offline_cpu(cpu); | |
625 | break; | |
626 | default: | |
627 | break; | |
628 | } | |
629 | return NOTIFY_OK; | |
630 | } | |
631 | ||
632 | static struct notifier_block __cpuinitdata rcu_nb = { | |
633 | .notifier_call = rcu_cpu_notify, | |
634 | }; | |
635 | ||
636 | /* | |
637 | * Initializes rcu mechanism. Assumed to be called early. | |
638 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | |
639 | * Note that rcu_qsctr and friends are implicitly | |
640 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | |
641 | */ | |
642 | void __init __rcu_init(void) | |
643 | { | |
644 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | |
645 | (void *)(long)smp_processor_id()); | |
646 | /* Register notifier for non-boot CPUs */ | |
647 | register_cpu_notifier(&rcu_nb); | |
648 | } | |
649 | ||
650 | module_param(blimit, int, 0); | |
651 | module_param(qhimark, int, 0); | |
652 | module_param(qlowmark, int, 0); |