Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
9b1d82fa | 25 | #include <linux/moduleparam.h> |
4ce5b903 IM |
26 | #include <linux/completion.h> |
27 | #include <linux/interrupt.h> | |
9b1d82fa | 28 | #include <linux/notifier.h> |
4ce5b903 IM |
29 | #include <linux/rcupdate.h> |
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
9b1d82fa | 32 | #include <linux/mutex.h> |
4ce5b903 IM |
33 | #include <linux/sched.h> |
34 | #include <linux/types.h> | |
35 | #include <linux/init.h> | |
9b1d82fa | 36 | #include <linux/time.h> |
4ce5b903 | 37 | #include <linux/cpu.h> |
9b1d82fa | 38 | |
24278d14 PM |
39 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
40 | static struct task_struct *rcu_kthread_task; | |
41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | |
42 | static unsigned long have_rcu_kthread_work; | |
43 | static void invoke_rcu_kthread(void); | |
b2c0710c | 44 | |
a57eb940 | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
24278d14 | 46 | struct rcu_ctrlblk; |
b2c0710c | 47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
24278d14 | 48 | static int rcu_kthread(void *arg); |
a57eb940 PM |
49 | static void __call_rcu(struct rcu_head *head, |
50 | void (*func)(struct rcu_head *rcu), | |
51 | struct rcu_ctrlblk *rcp); | |
52 | ||
53 | #include "rcutiny_plugin.h" | |
54 | ||
9b1d82fa PM |
55 | #ifdef CONFIG_NO_HZ |
56 | ||
57 | static long rcu_dynticks_nesting = 1; | |
58 | ||
59 | /* | |
60 | * Enter dynticks-idle mode, which is an extended quiescent state | |
61 | * if we have fully entered that mode (i.e., if the new value of | |
62 | * dynticks_nesting is zero). | |
63 | */ | |
64 | void rcu_enter_nohz(void) | |
65 | { | |
66 | if (--rcu_dynticks_nesting == 0) | |
67 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | |
68 | } | |
69 | ||
70 | /* | |
71 | * Exit dynticks-idle mode, so that we are no longer in an extended | |
72 | * quiescent state. | |
73 | */ | |
74 | void rcu_exit_nohz(void) | |
75 | { | |
76 | rcu_dynticks_nesting++; | |
77 | } | |
78 | ||
79 | #endif /* #ifdef CONFIG_NO_HZ */ | |
80 | ||
81 | /* | |
82 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | |
4ce5b903 IM |
83 | * Also disable irqs to avoid confusion due to interrupt handlers |
84 | * invoking call_rcu(). | |
9b1d82fa PM |
85 | */ |
86 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
87 | { | |
88 | unsigned long flags; | |
89 | ||
90 | local_irq_save(flags); | |
91 | if (rcp->rcucblist != NULL && | |
92 | rcp->donetail != rcp->curtail) { | |
93 | rcp->donetail = rcp->curtail; | |
94 | local_irq_restore(flags); | |
95 | return 1; | |
96 | } | |
97 | local_irq_restore(flags); | |
4ce5b903 | 98 | |
9b1d82fa PM |
99 | return 0; |
100 | } | |
101 | ||
102 | /* | |
103 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
104 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
105 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
106 | */ | |
107 | void rcu_sched_qs(int cpu) | |
108 | { | |
99652b54 PM |
109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
24278d14 | 111 | invoke_rcu_kthread(); |
9b1d82fa PM |
112 | } |
113 | ||
114 | /* | |
115 | * Record an rcu_bh quiescent state. | |
116 | */ | |
117 | void rcu_bh_qs(int cpu) | |
118 | { | |
119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
24278d14 | 120 | invoke_rcu_kthread(); |
9b1d82fa PM |
121 | } |
122 | ||
123 | /* | |
124 | * Check to see if the scheduling-clock interrupt came from an extended | |
125 | * quiescent state, and, if so, tell RCU about it. | |
126 | */ | |
127 | void rcu_check_callbacks(int cpu, int user) | |
128 | { | |
129 | if (user || | |
130 | (idle_cpu(cpu) && | |
131 | !in_softirq() && | |
132 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) | |
133 | rcu_sched_qs(cpu); | |
134 | else if (!in_softirq()) | |
135 | rcu_bh_qs(cpu); | |
a57eb940 | 136 | rcu_preempt_check_callbacks(); |
9b1d82fa PM |
137 | } |
138 | ||
139 | /* | |
b2c0710c PM |
140 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
141 | * whose grace period has elapsed. | |
9b1d82fa | 142 | */ |
b2c0710c | 143 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa | 144 | { |
9b1d82fa | 145 | struct rcu_head *next, *list; |
4ce5b903 | 146 | unsigned long flags; |
9b1d82fa PM |
147 | |
148 | /* If no RCU callbacks ready to invoke, just return. */ | |
149 | if (&rcp->rcucblist == rcp->donetail) | |
150 | return; | |
151 | ||
152 | /* Move the ready-to-invoke callbacks to a local list. */ | |
153 | local_irq_save(flags); | |
154 | list = rcp->rcucblist; | |
155 | rcp->rcucblist = *rcp->donetail; | |
156 | *rcp->donetail = NULL; | |
157 | if (rcp->curtail == rcp->donetail) | |
158 | rcp->curtail = &rcp->rcucblist; | |
a57eb940 | 159 | rcu_preempt_remove_callbacks(rcp); |
9b1d82fa PM |
160 | rcp->donetail = &rcp->rcucblist; |
161 | local_irq_restore(flags); | |
162 | ||
163 | /* Invoke the callbacks on the local list. */ | |
164 | while (list) { | |
165 | next = list->next; | |
166 | prefetch(next); | |
551d55a9 | 167 | debug_rcu_head_unqueue(list); |
b2c0710c | 168 | local_bh_disable(); |
9b1d82fa | 169 | list->func(list); |
b2c0710c | 170 | local_bh_enable(); |
9b1d82fa PM |
171 | list = next; |
172 | } | |
173 | } | |
174 | ||
175 | /* | |
b2c0710c PM |
176 | * This kthread invokes RCU callbacks whose grace periods have |
177 | * elapsed. It is awakened as needed, and takes the place of the | |
178 | * RCU_SOFTIRQ that was used previously for this purpose. | |
179 | * This is a kthread, but it is never stopped, at least not until | |
180 | * the system goes down. | |
181 | */ | |
24278d14 | 182 | static int rcu_kthread(void *arg) |
b2c0710c PM |
183 | { |
184 | unsigned long work; | |
24278d14 | 185 | unsigned long morework; |
b2c0710c PM |
186 | unsigned long flags; |
187 | ||
188 | for (;;) { | |
24278d14 PM |
189 | wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); |
190 | morework = rcu_boost(); | |
b2c0710c | 191 | local_irq_save(flags); |
24278d14 PM |
192 | work = have_rcu_kthread_work; |
193 | have_rcu_kthread_work = morework; | |
b2c0710c PM |
194 | local_irq_restore(flags); |
195 | if (work) { | |
196 | rcu_process_callbacks(&rcu_sched_ctrlblk); | |
197 | rcu_process_callbacks(&rcu_bh_ctrlblk); | |
198 | rcu_preempt_process_callbacks(); | |
199 | } | |
24278d14 | 200 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ |
b2c0710c PM |
201 | } |
202 | ||
203 | return 0; /* Not reached, but needed to shut gcc up. */ | |
204 | } | |
205 | ||
206 | /* | |
24278d14 PM |
207 | * Wake up rcu_kthread() to process callbacks now eligible for invocation |
208 | * or to boost readers. | |
9b1d82fa | 209 | */ |
24278d14 | 210 | static void invoke_rcu_kthread(void) |
9b1d82fa | 211 | { |
b2c0710c PM |
212 | unsigned long flags; |
213 | ||
214 | local_irq_save(flags); | |
24278d14 PM |
215 | have_rcu_kthread_work = 1; |
216 | wake_up(&rcu_kthread_wq); | |
b2c0710c | 217 | local_irq_restore(flags); |
9b1d82fa PM |
218 | } |
219 | ||
9b1d82fa PM |
220 | /* |
221 | * Wait for a grace period to elapse. But it is illegal to invoke | |
222 | * synchronize_sched() from within an RCU read-side critical section. | |
223 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
224 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
225 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
226 | * benefits of doing might_sleep() to reduce latency.) | |
227 | * | |
228 | * Cool, huh? (Due to Josh Triplett.) | |
229 | * | |
da848c47 PM |
230 | * But we want to make this a static inline later. The cond_resched() |
231 | * currently makes this problematic. | |
9b1d82fa PM |
232 | */ |
233 | void synchronize_sched(void) | |
234 | { | |
235 | cond_resched(); | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
238 | ||
9b1d82fa PM |
239 | /* |
240 | * Helper function for call_rcu() and call_rcu_bh(). | |
241 | */ | |
242 | static void __call_rcu(struct rcu_head *head, | |
243 | void (*func)(struct rcu_head *rcu), | |
244 | struct rcu_ctrlblk *rcp) | |
245 | { | |
246 | unsigned long flags; | |
247 | ||
551d55a9 | 248 | debug_rcu_head_queue(head); |
9b1d82fa PM |
249 | head->func = func; |
250 | head->next = NULL; | |
4ce5b903 | 251 | |
9b1d82fa PM |
252 | local_irq_save(flags); |
253 | *rcp->curtail = head; | |
254 | rcp->curtail = &head->next; | |
255 | local_irq_restore(flags); | |
256 | } | |
257 | ||
258 | /* | |
a57eb940 | 259 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa PM |
260 | * period. But since we have but one CPU, that would be after any |
261 | * quiescent state. | |
262 | */ | |
a57eb940 | 263 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa | 264 | { |
99652b54 | 265 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa | 266 | } |
a57eb940 | 267 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa PM |
268 | |
269 | /* | |
270 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
271 | * quiescent state. | |
272 | */ | |
4ce5b903 | 273 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
274 | { |
275 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
278 | ||
9b1d82fa PM |
279 | void rcu_barrier_bh(void) |
280 | { | |
281 | struct rcu_synchronize rcu; | |
282 | ||
72d5a9f7 | 283 | init_rcu_head_on_stack(&rcu.head); |
9b1d82fa PM |
284 | init_completion(&rcu.completion); |
285 | /* Will wake me after RCU finished. */ | |
286 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | |
287 | /* Wait for it. */ | |
288 | wait_for_completion(&rcu.completion); | |
72d5a9f7 | 289 | destroy_rcu_head_on_stack(&rcu.head); |
9b1d82fa PM |
290 | } |
291 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | |
292 | ||
293 | void rcu_barrier_sched(void) | |
294 | { | |
295 | struct rcu_synchronize rcu; | |
296 | ||
72d5a9f7 | 297 | init_rcu_head_on_stack(&rcu.head); |
9b1d82fa PM |
298 | init_completion(&rcu.completion); |
299 | /* Will wake me after RCU finished. */ | |
300 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | |
301 | /* Wait for it. */ | |
302 | wait_for_completion(&rcu.completion); | |
72d5a9f7 | 303 | destroy_rcu_head_on_stack(&rcu.head); |
9b1d82fa PM |
304 | } |
305 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | |
306 | ||
b2c0710c PM |
307 | /* |
308 | * Spawn the kthread that invokes RCU callbacks. | |
309 | */ | |
310 | static int __init rcu_spawn_kthreads(void) | |
9b1d82fa | 311 | { |
24278d14 PM |
312 | struct sched_param sp; |
313 | ||
314 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); | |
315 | sp.sched_priority = RCU_BOOST_PRIO; | |
316 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | |
b2c0710c | 317 | return 0; |
9b1d82fa | 318 | } |
b2c0710c | 319 | early_initcall(rcu_spawn_kthreads); |